summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorNathan Binkert <nate@binkert.org>2009-05-11 10:38:43 -0700
committerNathan Binkert <nate@binkert.org>2009-05-11 10:38:43 -0700
commit2f30950143cc70bc42a3c8a4111d7cf8198ec881 (patch)
tree708f6c22edb3c6feb31dd82866c26623a5329580 /src
parentc70241810d4e4f523f173c1646b008dc40faad8e (diff)
downloadgem5-2f30950143cc70bc42a3c8a4111d7cf8198ec881.tar.xz
ruby: Import ruby and slicc from GEMS
We eventually plan to replace the m5 cache hierarchy with the GEMS hierarchy, but for now we will make both live alongside eachother.
Diffstat (limited to 'src')
-rw-r--r--src/mem/gems_common/Allocator.hh83
-rw-r--r--src/mem/gems_common/Map.hh186
-rw-r--r--src/mem/gems_common/PrioHeap.hh249
-rw-r--r--src/mem/gems_common/RefCnt.hh162
-rw-r--r--src/mem/gems_common/RefCnt_tester.cc78
-rw-r--r--src/mem/gems_common/RefCountable.hh59
-rw-r--r--src/mem/gems_common/Vector.hh334
-rwxr-xr-xsrc/mem/gems_common/calc_host.sh38
-rw-r--r--src/mem/gems_common/ioutil/attrlex.ll229
-rw-r--r--src/mem/gems_common/ioutil/attrparse.yy232
-rw-r--r--src/mem/gems_common/ioutil/confio.cc456
-rw-r--r--src/mem/gems_common/ioutil/confio.hh192
-rw-r--r--src/mem/gems_common/ioutil/embedtext.py54
-rw-r--r--src/mem/gems_common/ioutil/initvar.cc626
-rw-r--r--src/mem/gems_common/ioutil/initvar.hh181
-rw-r--r--src/mem/gems_common/ioutil/vardecl.hh75
-rw-r--r--src/mem/gems_common/std-includes.hh51
-rw-r--r--src/mem/gems_common/util.cc109
-rw-r--r--src/mem/gems_common/util.hh68
-rw-r--r--src/mem/protocol/LogTM.sm83
-rw-r--r--src/mem/protocol/MESI_CMP_directory-L1cache.sm867
-rw-r--r--src/mem/protocol/MESI_CMP_directory-L2cache.sm1036
-rw-r--r--src/mem/protocol/MESI_CMP_directory-mem.sm166
-rw-r--r--src/mem/protocol/MESI_CMP_directory-msg.sm112
-rw-r--r--src/mem/protocol/MESI_CMP_directory.slicc5
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory-L1cache.sm1800
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory-L2cache.sm2123
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory-mem.sm166
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory-msg.sm153
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory.slicc7
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory_m-mem.sm250
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory_m.slicc7
-rw-r--r--src/mem/protocol/MESI_SCMP_bankdirectory-L1cache.sm894
-rw-r--r--src/mem/protocol/MESI_SCMP_bankdirectory-L2cache.sm1052
-rw-r--r--src/mem/protocol/MESI_SCMP_bankdirectory-mem.sm166
-rw-r--r--src/mem/protocol/MESI_SCMP_bankdirectory-msg.sm112
-rw-r--r--src/mem/protocol/MESI_SCMP_bankdirectory.slicc5
-rw-r--r--src/mem/protocol/MESI_SCMP_bankdirectory_m-mem.sm250
-rw-r--r--src/mem/protocol/MESI_SCMP_bankdirectory_m.slicc5
-rw-r--r--src/mem/protocol/MI_example-cache.sm369
-rw-r--r--src/mem/protocol/MI_example-dir.sm257
-rw-r--r--src/mem/protocol/MI_example-msg.sm92
-rw-r--r--src/mem/protocol/MI_example.slicc4
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L1cache.sm1153
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L2cache.sm2569
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dir.sm573
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-msg.sm126
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-perfectDir.sm573
-rw-r--r--src/mem/protocol/MOESI_CMP_directory.slicc5
-rw-r--r--src/mem/protocol/MOESI_CMP_directory_m-dir.sm652
-rw-r--r--src/mem/protocol/MOESI_CMP_directory_m.slicc5
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L1cache.sm2041
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L2cache.sm1424
-rw-r--r--src/mem/protocol/MOESI_CMP_token-dir.sm435
-rw-r--r--src/mem/protocol/MOESI_CMP_token-msg.sm123
-rw-r--r--src/mem/protocol/MOESI_CMP_token.slicc5
-rw-r--r--src/mem/protocol/MOESI_SMP_directory-cache.sm981
-rw-r--r--src/mem/protocol/MOESI_SMP_directory-dir.sm495
-rw-r--r--src/mem/protocol/MOESI_SMP_directory-msg.sm89
-rw-r--r--src/mem/protocol/MOESI_SMP_directory.slicc4
-rw-r--r--src/mem/protocol/MOESI_SMP_token-cache.sm1734
-rw-r--r--src/mem/protocol/MOESI_SMP_token-dir.sm405
-rw-r--r--src/mem/protocol/MOESI_SMP_token-msg.sm61
-rw-r--r--src/mem/protocol/MOESI_SMP_token.slicc4
-rw-r--r--src/mem/protocol/MOSI_SMP_bcast-cache.sm1000
-rw-r--r--src/mem/protocol/MOSI_SMP_bcast-dir.sm267
-rw-r--r--src/mem/protocol/MOSI_SMP_bcast-msg.sm79
-rw-r--r--src/mem/protocol/MOSI_SMP_bcast.slicc4
-rw-r--r--src/mem/protocol/MOSI_SMP_bcast_1level-cache.sm921
-rw-r--r--src/mem/protocol/MOSI_SMP_bcast_1level.slicc4
-rw-r--r--src/mem/protocol/MOSI_SMP_bcast_m-dir.sm345
-rw-r--r--src/mem/protocol/MOSI_SMP_bcast_m.slicc4
-rw-r--r--src/mem/protocol/MOSI_SMP_directory_1level-cache.sm838
-rw-r--r--src/mem/protocol/MOSI_SMP_directory_1level-dir.sm333
-rw-r--r--src/mem/protocol/MOSI_SMP_directory_1level-msg.sm74
-rw-r--r--src/mem/protocol/MOSI_SMP_directory_1level.slicc4
-rw-r--r--src/mem/protocol/MSI_MOSI_CMP_directory-L1cache.sm799
-rw-r--r--src/mem/protocol/MSI_MOSI_CMP_directory-L2cache.sm2191
-rw-r--r--src/mem/protocol/MSI_MOSI_CMP_directory-dir.sm497
-rw-r--r--src/mem/protocol/MSI_MOSI_CMP_directory-msg.sm115
-rw-r--r--src/mem/protocol/MSI_MOSI_CMP_directory.slicc8
-rw-r--r--src/mem/protocol/RubySlicc_ComponentMapping.sm62
-rw-r--r--src/mem/protocol/RubySlicc_Defines.sm34
-rw-r--r--src/mem/protocol/RubySlicc_Exports.sm339
-rw-r--r--src/mem/protocol/RubySlicc_MemControl.sm67
-rw-r--r--src/mem/protocol/RubySlicc_Profiler.sm64
-rw-r--r--src/mem/protocol/RubySlicc_Types.sm168
-rw-r--r--src/mem/protocol/RubySlicc_Util.sm61
-rw-r--r--src/mem/protocol/RubySlicc_interfaces.slicc7
-rw-r--r--src/mem/protocol/doc/MSI_MOSI_CMP_directory_2level-protocol-description.txt49
-rw-r--r--src/mem/protocol/doc/MSI_dir_L1_MOSI_dir_L2_SNUCA_CMP-protocol-description.txt49
-rw-r--r--src/mem/protocol/standard_1level_SMP-protocol.sm39
-rw-r--r--src/mem/protocol/standard_CMP-protocol.sm36
-rw-r--r--src/mem/protocol/standard_SMP-protocol.sm39
-rw-r--r--src/mem/ruby/Decomissioning_note10
-rw-r--r--src/mem/ruby/FakeSimicsDataTypes.hh63
-rw-r--r--src/mem/ruby/README.debugging104
-rw-r--r--src/mem/ruby/buffers/MessageBuffer.cc363
-rw-r--r--src/mem/ruby/buffers/MessageBuffer.hh156
-rw-r--r--src/mem/ruby/buffers/MessageBufferNode.cc48
-rw-r--r--src/mem/ruby/buffers/MessageBufferNode.hh88
-rw-r--r--src/mem/ruby/common/Address.cc68
-rw-r--r--src/mem/ruby/common/Address.hh255
-rw-r--r--src/mem/ruby/common/BigSet.cc249
-rw-r--r--src/mem/ruby/common/BigSet.hh125
-rw-r--r--src/mem/ruby/common/Consumer.hh89
-rw-r--r--src/mem/ruby/common/DataBlock.cc91
-rw-r--r--src/mem/ruby/common/DataBlock.hh82
-rw-r--r--src/mem/ruby/common/Debug.cc369
-rw-r--r--src/mem/ruby/common/Debug.def17
-rw-r--r--src/mem/ruby/common/Debug.hh291
-rw-r--r--src/mem/ruby/common/Driver.cc39
-rw-r--r--src/mem/ruby/common/Driver.hh98
-rw-r--r--src/mem/ruby/common/Global.cc35
-rw-r--r--src/mem/ruby/common/Global.hh110
-rw-r--r--src/mem/ruby/common/Histogram.cc185
-rw-r--r--src/mem/ruby/common/Histogram.hh104
-rw-r--r--src/mem/ruby/common/Message.cc34
-rw-r--r--src/mem/ruby/common/NetDest.cc259
-rw-r--r--src/mem/ruby/common/NetDest.hh145
-rw-r--r--src/mem/ruby/common/OptBigSet.cc576
-rw-r--r--src/mem/ruby/common/OptBigSet.hh202
-rw-r--r--src/mem/ruby/common/Set.cc231
-rw-r--r--src/mem/ruby/common/Set.hh149
-rw-r--r--src/mem/ruby/common/SubBlock.cc81
-rw-r--r--src/mem/ruby/common/SubBlock.hh105
-rw-r--r--src/mem/ruby/config/RubyConfig.cc193
-rw-r--r--src/mem/ruby/config/RubyConfig.hh157
-rw-r--r--src/mem/ruby/config/config.include323
-rw-r--r--src/mem/ruby/config/rubyconfig.defaults466
-rw-r--r--src/mem/ruby/config/tester.defaults60
-rw-r--r--src/mem/ruby/eventqueue/EventQueue.cc120
-rw-r--r--src/mem/ruby/eventqueue/EventQueue.hh118
-rw-r--r--src/mem/ruby/eventqueue/EventQueueNode.cc47
-rw-r--r--src/mem/ruby/eventqueue/EventQueueNode.hh98
-rw-r--r--src/mem/ruby/eventqueue/EventQueue_tester.cc89
-rw-r--r--src/mem/ruby/init.cc307
-rw-r--r--src/mem/ruby/init.hh56
-rw-r--r--src/mem/ruby/interfaces/OpalInterface.cc446
-rw-r--r--src/mem/ruby/interfaces/OpalInterface.hh214
-rw-r--r--src/mem/ruby/interfaces/mf_api.hh165
-rw-r--r--src/mem/ruby/network/Network.hh148
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/CreditLink_d.hh17
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.cc349
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.hh142
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/InputUnit_d.cc95
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/InputUnit_d.hh172
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/NetworkHeader.hh54
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/NetworkInterface_d.cc351
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/NetworkInterface_d.hh96
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.cc103
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.hh91
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.cc46
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.hh86
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/OutputUnit_d.cc111
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/OutputUnit_d.hh95
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/Router_d.cc167
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/Router_d.hh101
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/RoutingUnit_d.cc93
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/RoutingUnit_d.hh61
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/SWallocator_d.cc230
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/SWallocator_d.hh82
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/Switch_d.cc98
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/Switch_d.hh73
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/VCallocator_d.cc271
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/VCallocator_d.hh86
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/VirtualChannel_d.cc92
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/VirtualChannel_d.hh119
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/flitBuffer_d.cc88
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/flitBuffer_d.hh87
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/flit_d.cc134
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/flit_d.hh166
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/FlexibleConsumer.hh50
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.cc307
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.hh117
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/InVcState.cc75
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/InVcState.hh61
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/NetworkConfig.hh123
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/NetworkInterface.cc306
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/NetworkInterface.hh98
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/NetworkLink.cc147
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/NetworkLink.hh88
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/OutVcState.cc60
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/OutVcState.hh55
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/Router.cc389
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/Router.hh98
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/VCarbiter.cc47
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/VCarbiter.hh56
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/flit.cc113
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/flit.hh97
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/flitBuffer.cc104
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/flitBuffer.hh78
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/netconfig.defaults8
-rw-r--r--src/mem/ruby/network/orion/NetworkPower.cc430
-rw-r--r--src/mem/ruby/network/orion/NetworkPower.hh32
-rw-r--r--src/mem/ruby/network/orion/SIM_port.hh172
-rw-r--r--src/mem/ruby/network/orion/SIM_power.hh384
-rw-r--r--src/mem/ruby/network/orion/SIM_power_test.hh285
-rw-r--r--src/mem/ruby/network/orion/parm_technology.hh474
-rw-r--r--src/mem/ruby/network/orion/power_arbiter.cc392
-rw-r--r--src/mem/ruby/network/orion/power_arbiter.hh90
-rw-r--r--src/mem/ruby/network/orion/power_array.cc2158
-rw-r--r--src/mem/ruby/network/orion/power_array.hh394
-rw-r--r--src/mem/ruby/network/orion/power_bus.cc215
-rw-r--r--src/mem/ruby/network/orion/power_bus.hh64
-rw-r--r--src/mem/ruby/network/orion/power_crossbar.cc365
-rw-r--r--src/mem/ruby/network/orion/power_crossbar.hh81
-rw-r--r--src/mem/ruby/network/orion/power_ll.cc270
-rw-r--r--src/mem/ruby/network/orion/power_ll.hh53
-rw-r--r--src/mem/ruby/network/orion/power_router_init.cc260
-rw-r--r--src/mem/ruby/network/orion/power_router_init.hh126
-rw-r--r--src/mem/ruby/network/orion/power_static.cc46
-rw-r--r--src/mem/ruby/network/orion/power_static.hh39
-rw-r--r--src/mem/ruby/network/orion/power_utils.cc157
-rw-r--r--src/mem/ruby/network/orion/power_utils.hh35
-rw-r--r--src/mem/ruby/network/simple/Network_Files/GarnetFileMaker.py45
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-16_L2Banks-16_Memories-16.txt78
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-16_L2Banks-16_Memories-4.txt56
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-16_L2Banks-16_Memories-8.txt61
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-1_L2Banks-16_Memories-16.txt190
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-16_Memories-16.txt90
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-16_Memories-4.txt78
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-32_Memories-4.txt123
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-4_Memories-16.txt78
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-4_Memories-4.txt66
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-1_ProcsPerChip-1_L2Banks-1_Memories-1.txt10
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-1_ProcsPerChip-1_L2Banks-256_Memories-1.txt780
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-1_ProcsPerChip-1_L2Banks-32_Memories-1.txt107
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-1_ProcsPerChip-1_L2Banks-64_Memories-1.txt204
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-2_ProcsPerChip-1_L2Banks-2_Memories-2.txt15
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-2_ProcsPerChip-2_L2Banks-2_Memories-2.txt15
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-32_ProcsPerChip-32_L2Banks-32_Memories-16.txt148
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-32_ProcsPerChip-32_L2Banks-32_Memories-4.txt126
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-4_ProcsPerChip-1_L2Banks-4_Memories-4.txt28
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-4_ProcsPerChip-4_L2Banks-4_Memories-4.txt24
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-7_ProcsPerChip-7_L2Banks-7_Memories-7.txt139
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-8_ProcsPerChip-1_L2Banks-8_Memories-8.txt66
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-8_ProcsPerChip-4_L2Banks-8_Memories-8.txt46
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-8_ProcsPerChip-8_L2Banks-256_Memories-8.txt412
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NUCA_Procs-8_ProcsPerChip-8_L2Banks-8_Memories-8.txt44
-rw-r--r--src/mem/ruby/network/simple/Network_Files/NetworkFileMaker.py44
-rw-r--r--src/mem/ruby/network/simple/Network_Files/TLC_Procs-8_ProcsPerChip-8_L2Banks-256_Memories-8.txt367
-rw-r--r--src/mem/ruby/network/simple/Network_Files/TOKEN_CMP_Procs-16_ProcsPerChip-16_L2Banks-16_Memories-16.txt74
-rw-r--r--src/mem/ruby/network/simple/Network_Files/TOKEN_CMP_Procs-16_ProcsPerChip-4_L2Banks-16_Memories-16.txt101
-rw-r--r--src/mem/ruby/network/simple/PerfectSwitch.cc319
-rw-r--r--src/mem/ruby/network/simple/PerfectSwitch.hh118
-rw-r--r--src/mem/ruby/network/simple/SimpleNetwork.cc257
-rw-r--r--src/mem/ruby/network/simple/SimpleNetwork.hh157
-rw-r--r--src/mem/ruby/network/simple/Switch.cc205
-rw-r--r--src/mem/ruby/network/simple/Switch.hh105
-rw-r--r--src/mem/ruby/network/simple/Throttle.cc291
-rw-r--r--src/mem/ruby/network/simple/Throttle.hh124
-rw-r--r--src/mem/ruby/network/simple/Topology.cc801
-rw-r--r--src/mem/ruby/network/simple/Topology.hh126
-rw-r--r--src/mem/ruby/profiler/AccessTraceForAddress.cc126
-rw-r--r--src/mem/ruby/profiler/AccessTraceForAddress.hh104
-rw-r--r--src/mem/ruby/profiler/AddressProfiler.cc310
-rw-r--r--src/mem/ruby/profiler/AddressProfiler.hh109
-rw-r--r--src/mem/ruby/profiler/CacheProfiler.cc151
-rw-r--r--src/mem/ruby/profiler/CacheProfiler.hh100
-rw-r--r--src/mem/ruby/profiler/Profiler.cc2294
-rw-r--r--src/mem/ruby/profiler/Profiler.hh449
-rw-r--r--src/mem/ruby/profiler/StoreTrace.cc158
-rw-r--r--src/mem/ruby/profiler/StoreTrace.hh109
-rw-r--r--src/mem/ruby/profiler/XactProfiler.cc263
-rw-r--r--src/mem/ruby/profiler/XactProfiler.hh125
-rw-r--r--src/mem/ruby/recorder/CacheRecorder.cc75
-rw-r--r--src/mem/ruby/recorder/CacheRecorder.hh87
-rw-r--r--src/mem/ruby/recorder/TraceRecord.cc132
-rw-r--r--src/mem/ruby/recorder/TraceRecord.hh101
-rw-r--r--src/mem/ruby/recorder/Tracer.cc126
-rw-r--r--src/mem/ruby/recorder/Tracer.hh94
-rw-r--r--src/mem/ruby/simics/commands.cc867
-rw-r--r--src/mem/ruby/simics/commands.hh106
-rw-r--r--src/mem/ruby/simics/interface.cc935
-rw-r--r--src/mem/ruby/simics/interface.hh152
-rw-r--r--src/mem/ruby/simics/simics_api_dummy.c105
-rw-r--r--src/mem/ruby/slicc_interface/AbstractCacheEntry.cc45
-rw-r--r--src/mem/ruby/slicc_interface/AbstractCacheEntry.hh81
-rw-r--r--src/mem/ruby/slicc_interface/AbstractChip.cc47
-rw-r--r--src/mem/ruby/slicc_interface/AbstractChip.hh126
-rw-r--r--src/mem/ruby/slicc_interface/AbstractProtocol.hh60
-rw-r--r--src/mem/ruby/slicc_interface/Message.hh91
-rw-r--r--src/mem/ruby/slicc_interface/NetworkMessage.hh115
-rw-r--r--src/mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh425
-rw-r--r--src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.cc161
-rw-r--r--src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.hh73
-rw-r--r--src/mem/ruby/slicc_interface/RubySlicc_Util.hh219
-rw-r--r--src/mem/ruby/slicc_interface/RubySlicc_includes.hh9
-rw-r--r--src/mem/ruby/system/AbstractBloomFilter.hh72
-rw-r--r--src/mem/ruby/system/AbstractMemOrCache.hh42
-rw-r--r--src/mem/ruby/system/AbstractReplacementPolicy.hh62
-rw-r--r--src/mem/ruby/system/BlockBloomFilter.cc147
-rw-r--r--src/mem/ruby/system/BlockBloomFilter.hh83
-rw-r--r--src/mem/ruby/system/BulkBloomFilter.cc233
-rw-r--r--src/mem/ruby/system/BulkBloomFilter.hh88
-rw-r--r--src/mem/ruby/system/CacheMemory.hh559
-rw-r--r--src/mem/ruby/system/DirectoryMemory.cc175
-rw-r--r--src/mem/ruby/system/DirectoryMemory.hh91
-rw-r--r--src/mem/ruby/system/GenericBloomFilter.cc154
-rw-r--r--src/mem/ruby/system/GenericBloomFilter.hh96
-rw-r--r--src/mem/ruby/system/H3BloomFilter.cc210
-rw-r--r--src/mem/ruby/system/H3BloomFilter.hh1259
-rw-r--r--src/mem/ruby/system/LRUPolicy.hh65
-rw-r--r--src/mem/ruby/system/LSB_CountingBloomFilter.cc141
-rw-r--r--src/mem/ruby/system/LSB_CountingBloomFilter.hh83
-rw-r--r--src/mem/ruby/system/MachineID.hh89
-rw-r--r--src/mem/ruby/system/MemoryControl.cc632
-rw-r--r--src/mem/ruby/system/MemoryControl.hh176
-rw-r--r--src/mem/ruby/system/MemoryNode.cc37
-rw-r--r--src/mem/ruby/system/MemoryNode.hh95
-rw-r--r--src/mem/ruby/system/MultiBitSelBloomFilter.cc191
-rw-r--r--src/mem/ruby/system/MultiBitSelBloomFilter.hh98
-rw-r--r--src/mem/ruby/system/MultiGrainBloomFilter.cc172
-rw-r--r--src/mem/ruby/system/MultiGrainBloomFilter.hh89
-rw-r--r--src/mem/ruby/system/NodeID.hh50
-rw-r--r--src/mem/ruby/system/NodePersistentTable.cc194
-rw-r--r--src/mem/ruby/system/NodePersistentTable.hh99
-rw-r--r--src/mem/ruby/system/NonCountingBloomFilter.cc145
-rw-r--r--src/mem/ruby/system/NonCountingBloomFilter.hh89
-rw-r--r--src/mem/ruby/system/PerfectCacheMemory.hh239
-rw-r--r--src/mem/ruby/system/PersistentArbiter.cc165
-rw-r--r--src/mem/ruby/system/PersistentArbiter.hh108
-rw-r--r--src/mem/ruby/system/PersistentTable.cc195
-rw-r--r--src/mem/ruby/system/PersistentTable.hh99
-rw-r--r--src/mem/ruby/system/PseudoLRUPolicy.hh110
-rw-r--r--src/mem/ruby/system/Sequencer.cc1161
-rw-r--r--src/mem/ruby/system/Sequencer.hh170
-rw-r--r--src/mem/ruby/system/StoreBuffer.cc300
-rw-r--r--src/mem/ruby/system/StoreBuffer.hh120
-rw-r--r--src/mem/ruby/system/StoreCache.cc178
-rw-r--r--src/mem/ruby/system/StoreCache.hh85
-rw-r--r--src/mem/ruby/system/System.cc269
-rw-r--r--src/mem/ruby/system/System.hh137
-rw-r--r--src/mem/ruby/system/TBETable.hh165
-rw-r--r--src/mem/ruby/system/TimerTable.cc129
-rw-r--r--src/mem/ruby/system/TimerTable.hh98
-rw-r--r--src/mem/ruby/tester/BarrierGenerator.cc333
-rw-r--r--src/mem/ruby/tester/BarrierGenerator.hh138
-rw-r--r--src/mem/ruby/tester/Check.cc251
-rw-r--r--src/mem/ruby/tester/Check.hh107
-rw-r--r--src/mem/ruby/tester/CheckTable.cc128
-rw-r--r--src/mem/ruby/tester/CheckTable.hh93
-rw-r--r--src/mem/ruby/tester/DetermGETXGenerator.cc151
-rw-r--r--src/mem/ruby/tester/DetermGETXGenerator.hh104
-rw-r--r--src/mem/ruby/tester/DetermInvGenerator.cc202
-rw-r--r--src/mem/ruby/tester/DetermInvGenerator.hh109
-rw-r--r--src/mem/ruby/tester/DetermSeriesGETSGenerator.cc149
-rw-r--r--src/mem/ruby/tester/DetermSeriesGETSGenerator.hh106
-rw-r--r--src/mem/ruby/tester/DeterministicDriver.cc282
-rw-r--r--src/mem/ruby/tester/DeterministicDriver.hh125
-rw-r--r--src/mem/ruby/tester/Instruction.cc51
-rw-r--r--src/mem/ruby/tester/Instruction.hh57
-rw-r--r--src/mem/ruby/tester/RaceyDriver.cc139
-rw-r--r--src/mem/ruby/tester/RaceyDriver.hh112
-rw-r--r--src/mem/ruby/tester/RequestGenerator.cc196
-rw-r--r--src/mem/ruby/tester/RequestGenerator.hh102
-rw-r--r--src/mem/ruby/tester/SpecifiedGenerator.cc48
-rw-r--r--src/mem/ruby/tester/SpecifiedGenerator.hh69
-rw-r--r--src/mem/ruby/tester/SyntheticDriver.cc296
-rw-r--r--src/mem/ruby/tester/SyntheticDriver.hh118
-rw-r--r--src/mem/ruby/tester/Tester.cc116
-rw-r--r--src/mem/ruby/tester/Tester.hh93
-rw-r--r--src/mem/ruby/tester/XactAbortRequestGenerator.cc403
-rw-r--r--src/mem/ruby/tester/XactAbortRequestGenerator.hh122
-rw-r--r--src/mem/ruby/tester/XactRequestGenerator.cc637
-rw-r--r--src/mem/ruby/tester/XactRequestGenerator.hh134
-rw-r--r--src/mem/ruby/tester/main.cc51
-rw-r--r--src/mem/ruby/tester/main.hh42
-rw-r--r--src/mem/ruby/tester/test_framework.cc431
-rw-r--r--src/mem/ruby/tester/test_framework.hh46
-rw-r--r--src/mem/slicc/README114
-rw-r--r--src/mem/slicc/ast/AST.cc39
-rw-r--r--src/mem/slicc/ast/AST.hh94
-rw-r--r--src/mem/slicc/ast/ASTs.hh90
-rw-r--r--src/mem/slicc/ast/ActionDeclAST.cc96
-rw-r--r--src/mem/slicc/ast/ActionDeclAST.hh85
-rw-r--r--src/mem/slicc/ast/AssignStatementAST.cc76
-rw-r--r--src/mem/slicc/ast/AssignStatementAST.hh85
-rw-r--r--src/mem/slicc/ast/CheckAllocateStatementAST.cc72
-rw-r--r--src/mem/slicc/ast/CheckAllocateStatementAST.hh82
-rw-r--r--src/mem/slicc/ast/CheckStopSlotsStatementAST.cc115
-rw-r--r--src/mem/slicc/ast/CheckStopSlotsStatementAST.hh85
-rw-r--r--src/mem/slicc/ast/ChipComponentAccessAST.cc244
-rw-r--r--src/mem/slicc/ast/ChipComponentAccessAST.hh101
-rw-r--r--src/mem/slicc/ast/CopyHeadStatementAST.cc85
-rw-r--r--src/mem/slicc/ast/CopyHeadStatementAST.hh87
-rw-r--r--src/mem/slicc/ast/DeclAST.cc39
-rw-r--r--src/mem/slicc/ast/DeclAST.hh85
-rw-r--r--src/mem/slicc/ast/DeclListAST.cc86
-rw-r--r--src/mem/slicc/ast/DeclListAST.hh84
-rw-r--r--src/mem/slicc/ast/EnqueueStatementAST.cc104
-rw-r--r--src/mem/slicc/ast/EnqueueStatementAST.hh93
-rw-r--r--src/mem/slicc/ast/EnumDeclAST.cc98
-rw-r--r--src/mem/slicc/ast/EnumDeclAST.hh86
-rw-r--r--src/mem/slicc/ast/EnumExprAST.cc76
-rw-r--r--src/mem/slicc/ast/EnumExprAST.hh85
-rw-r--r--src/mem/slicc/ast/ExprAST.cc39
-rw-r--r--src/mem/slicc/ast/ExprAST.hh84
-rw-r--r--src/mem/slicc/ast/ExprStatementAST.cc73
-rw-r--r--src/mem/slicc/ast/ExprStatementAST.hh83
-rw-r--r--src/mem/slicc/ast/FormalParamAST.cc61
-rw-r--r--src/mem/slicc/ast/FormalParamAST.hh84
-rw-r--r--src/mem/slicc/ast/FuncCallExprAST.cc224
-rw-r--r--src/mem/slicc/ast/FuncCallExprAST.hh89
-rw-r--r--src/mem/slicc/ast/FuncDeclAST.cc111
-rw-r--r--src/mem/slicc/ast/FuncDeclAST.hh90
-rw-r--r--src/mem/slicc/ast/IfStatementAST.cc98
-rw-r--r--src/mem/slicc/ast/IfStatementAST.hh89
-rw-r--r--src/mem/slicc/ast/InPortDeclAST.cc149
-rw-r--r--src/mem/slicc/ast/InPortDeclAST.hh91
-rw-r--r--src/mem/slicc/ast/InfixOperatorExprAST.cc121
-rw-r--r--src/mem/slicc/ast/InfixOperatorExprAST.hh85
-rw-r--r--src/mem/slicc/ast/LiteralExprAST.cc55
-rw-r--r--src/mem/slicc/ast/LiteralExprAST.hh83
-rw-r--r--src/mem/slicc/ast/Location.cc83
-rw-r--r--src/mem/slicc/ast/Location.hh93
-rw-r--r--src/mem/slicc/ast/MachineAST.cc96
-rw-r--r--src/mem/slicc/ast/MachineAST.hh88
-rw-r--r--src/mem/slicc/ast/MemberExprAST.cc72
-rw-r--r--src/mem/slicc/ast/MemberExprAST.hh83
-rw-r--r--src/mem/slicc/ast/MethodCallExprAST.cc150
-rw-r--r--src/mem/slicc/ast/MethodCallExprAST.hh93
-rw-r--r--src/mem/slicc/ast/ObjDeclAST.cc148
-rw-r--r--src/mem/slicc/ast/ObjDeclAST.hh86
-rw-r--r--src/mem/slicc/ast/OutPortDeclAST.cc79
-rw-r--r--src/mem/slicc/ast/OutPortDeclAST.hh89
-rw-r--r--src/mem/slicc/ast/PairAST.cc72
-rw-r--r--src/mem/slicc/ast/PairAST.hh86
-rw-r--r--src/mem/slicc/ast/PairListAST.cc49
-rw-r--r--src/mem/slicc/ast/PairListAST.hh82
-rw-r--r--src/mem/slicc/ast/PeekStatementAST.cc115
-rw-r--r--src/mem/slicc/ast/PeekStatementAST.hh91
-rw-r--r--src/mem/slicc/ast/ReturnStatementAST.cc79
-rw-r--r--src/mem/slicc/ast/ReturnStatementAST.hh83
-rw-r--r--src/mem/slicc/ast/StatementAST.cc60
-rw-r--r--src/mem/slicc/ast/StatementAST.hh88
-rw-r--r--src/mem/slicc/ast/StatementListAST.cc86
-rw-r--r--src/mem/slicc/ast/StatementListAST.hh85
-rw-r--r--src/mem/slicc/ast/TransitionDeclAST.cc89
-rw-r--r--src/mem/slicc/ast/TransitionDeclAST.hh89
-rw-r--r--src/mem/slicc/ast/TypeAST.cc67
-rw-r--r--src/mem/slicc/ast/TypeAST.hh83
-rw-r--r--src/mem/slicc/ast/TypeDeclAST.cc86
-rw-r--r--src/mem/slicc/ast/TypeDeclAST.hh86
-rw-r--r--src/mem/slicc/ast/TypeFieldAST.cc44
-rw-r--r--src/mem/slicc/ast/TypeFieldAST.hh83
-rw-r--r--src/mem/slicc/ast/TypeFieldEnumAST.cc82
-rw-r--r--src/mem/slicc/ast/TypeFieldEnumAST.hh86
-rw-r--r--src/mem/slicc/ast/TypeFieldMemberAST.cc84
-rw-r--r--src/mem/slicc/ast/TypeFieldMemberAST.hh91
-rw-r--r--src/mem/slicc/ast/TypeFieldMethodAST.cc81
-rw-r--r--src/mem/slicc/ast/TypeFieldMethodAST.hh87
-rw-r--r--src/mem/slicc/ast/VarExprAST.cc76
-rw-r--r--src/mem/slicc/ast/VarExprAST.hh86
-rw-r--r--src/mem/slicc/doc/SLICC_V03.txt307
-rw-r--r--src/mem/slicc/doc/tutorial.tex574
-rw-r--r--src/mem/slicc/generator/fileio.cc66
-rw-r--r--src/mem/slicc/generator/fileio.hh46
-rw-r--r--src/mem/slicc/generator/html_gen.cc125
-rw-r--r--src/mem/slicc/generator/html_gen.hh49
-rw-r--r--src/mem/slicc/generator/mif_gen.cc1718
-rw-r--r--src/mem/slicc/generator/mif_gen.hh45
-rw-r--r--src/mem/slicc/main.cc246
-rw-r--r--src/mem/slicc/main.hh48
-rw-r--r--src/mem/slicc/parser/lexer.ll118
-rw-r--r--src/mem/slicc/parser/parser.yy352
-rw-r--r--src/mem/slicc/slicc_global.hh127
-rw-r--r--src/mem/slicc/symbols/Action.hh52
-rw-r--r--src/mem/slicc/symbols/Event.hh45
-rw-r--r--src/mem/slicc/symbols/Func.cc144
-rw-r--r--src/mem/slicc/symbols/Func.hh96
-rw-r--r--src/mem/slicc/symbols/State.hh45
-rw-r--r--src/mem/slicc/symbols/StateMachine.cc993
-rw-r--r--src/mem/slicc/symbols/StateMachine.hh141
-rw-r--r--src/mem/slicc/symbols/Symbol.cc72
-rw-r--r--src/mem/slicc/symbols/Symbol.hh100
-rw-r--r--src/mem/slicc/symbols/SymbolTable.cc934
-rw-r--r--src/mem/slicc/symbols/SymbolTable.hh121
-rw-r--r--src/mem/slicc/symbols/Transition.cc173
-rw-r--r--src/mem/slicc/symbols/Transition.hh120
-rw-r--r--src/mem/slicc/symbols/Type.cc746
-rw-r--r--src/mem/slicc/symbols/Type.hh154
-rw-r--r--src/mem/slicc/symbols/Var.cc57
-rw-r--r--src/mem/slicc/symbols/Var.hh98
485 files changed, 101056 insertions, 0 deletions
diff --git a/src/mem/gems_common/Allocator.hh b/src/mem/gems_common/Allocator.hh
new file mode 100644
index 000000000..109696601
--- /dev/null
+++ b/src/mem/gems_common/Allocator.hh
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 1999 by Mark Hill and David Wood for the Wisconsin
+ * Multifacet Project. ALL RIGHTS RESERVED.
+ *
+ * ##HEADER##
+ *
+ * This software is furnished under a license and may be used and
+ * copied only in accordance with the terms of such license and the
+ * inclusion of the above copyright notice. This software or any
+ * other copies thereof or any derivative works may not be provided or
+ * otherwise made available to any other persons. Title to and
+ * ownership of the software is retained by Mark Hill and David Wood.
+ * Any use of this software must include the above copyright notice.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS". THE LICENSOR MAKES NO
+ * WARRANTIES ABOUT ITS CORRECTNESS OR PERFORMANCE.
+ * */
+
+/*
+ * $Id$
+ */
+
+#ifndef ALLOCATOR_H
+#define ALLOCATOR_H
+
+#include "Vector.hh"
+
+template <class TYPE>
+class Allocator {
+public:
+ // Constructors
+ Allocator() { m_counter = 0; }
+
+ // Destructor
+ ~Allocator() { for(int i=0; i<m_pool_vec.size(); i++) { delete m_pool_vec[i]; }}
+
+ // Public Methods
+ TYPE* allocate(const TYPE& obj);
+ void deallocate(TYPE* obj_ptr);
+private:
+ // Private copy constructor and assignment operator
+ Allocator(const Allocator& obj);
+ Allocator& operator=(const Allocator& obj);
+
+ // Private Methods
+
+ // Data Members (m_ prefix)
+ Vector<TYPE*> m_pool_vec;
+ int m_counter;
+};
+
+template <class TYPE>
+inline
+TYPE* Allocator<TYPE>::allocate(const TYPE& obj)
+{
+ m_counter++;
+ DEBUG_EXPR(ALLOCATOR_COMP, LowPrio, m_counter);
+ TYPE* new_obj_ptr;
+
+ // See if we need to allocate any new objects
+ if (m_pool_vec.size() == 0) {
+ // Allocate a new item
+ m_pool_vec.insertAtBottom(new TYPE);
+ }
+
+ // Pop the pointer from the stack/pool
+ int size = m_pool_vec.size();
+ new_obj_ptr = m_pool_vec[size-1];
+ m_pool_vec.setSize(size-1);
+
+ // Copy the object
+ *new_obj_ptr = obj;
+ return new_obj_ptr;
+}
+
+template <class TYPE>
+inline
+void Allocator<TYPE>::deallocate(TYPE* obj)
+{
+ m_pool_vec.insertAtBottom(obj);
+}
+
+#endif //ALLOCATOR_H
diff --git a/src/mem/gems_common/Map.hh b/src/mem/gems_common/Map.hh
new file mode 100644
index 000000000..1ecd13d64
--- /dev/null
+++ b/src/mem/gems_common/Map.hh
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#ifndef MAP_H
+#define MAP_H
+
+#include "Vector.hh"
+
+namespace __gnu_cxx {
+ template <> struct hash <std::string>
+ {
+ size_t operator()(const string& s) const { return hash<char*>()(s.c_str()); }
+ };
+}
+
+typedef unsigned long long uint64;
+//hack for uint64 hashes...
+namespace __gnu_cxx {
+ template <> struct hash <uint64>
+ {
+ size_t operator()(const uint64 & s) const { return (size_t) s; }
+ };
+}
+
+template <class KEY_TYPE, class VALUE_TYPE>
+class Map
+{
+public:
+ Map() { /* empty */ }
+ ~Map() { /* empty */ }
+
+ void add(const KEY_TYPE& key, const VALUE_TYPE& value);
+ bool exist(const KEY_TYPE& key) const;
+ int size() const { return m_map.size(); }
+ void erase(const KEY_TYPE& key) { assert(exist(key)); m_map.erase(key); }
+ Vector<KEY_TYPE> keys() const;
+ Vector<VALUE_TYPE> values() const;
+ void deleteKeys();
+ void deleteValues();
+ VALUE_TYPE& lookup(const KEY_TYPE& key) const;
+ void clear() { m_map.clear(); }
+ void print(ostream& out) const;
+
+ // Synonyms
+ void remove(const KEY_TYPE& key) { erase(key); }
+ void deallocate(const KEY_TYPE& key) { erase(key); }
+ void allocate(const KEY_TYPE& key) { add(key, VALUE_TYPE()); }
+ void insert(const KEY_TYPE& key, const VALUE_TYPE& value) { add(key, value); }
+
+ // Use default copy constructor and assignment operator
+private:
+ // Data members
+
+ // m_map is declared mutable because some methods from the STL "map"
+ // class that should be const are not. Thus we define this as
+ // mutable so we can still have conceptually const accessors.
+ mutable __gnu_cxx::hash_map<KEY_TYPE, VALUE_TYPE> m_map;
+};
+
+template <class KEY_TYPE, class VALUE_TYPE>
+ostream& operator<<(ostream& out, const Map<KEY_TYPE, VALUE_TYPE>& map);
+
+// *********************
+
+template <class KEY_TYPE, class VALUE_TYPE>
+void Map<KEY_TYPE, VALUE_TYPE>::add(const KEY_TYPE& key, const VALUE_TYPE& value)
+{
+ // Update or add a new key/value pair
+ m_map[key] = value;
+}
+
+template <class KEY_TYPE, class VALUE_TYPE>
+bool Map<KEY_TYPE, VALUE_TYPE>::exist(const KEY_TYPE& key) const
+{
+ return (m_map.count(key) != 0);
+}
+
+template <class KEY_TYPE, class VALUE_TYPE>
+VALUE_TYPE& Map<KEY_TYPE, VALUE_TYPE>::lookup(const KEY_TYPE& key) const
+{
+ assert(exist(key));
+ return m_map[key];
+}
+
+template <class KEY_TYPE, class VALUE_TYPE>
+Vector<KEY_TYPE> Map<KEY_TYPE, VALUE_TYPE>::keys() const
+{
+ Vector<KEY_TYPE> keys;
+ typename hash_map<KEY_TYPE, VALUE_TYPE>::const_iterator iter;
+ for (iter = m_map.begin(); iter != m_map.end(); iter++) {
+ keys.insertAtBottom((*iter).first);
+ }
+ return keys;
+}
+
+template <class KEY_TYPE, class VALUE_TYPE>
+Vector<VALUE_TYPE> Map<KEY_TYPE, VALUE_TYPE>::values() const
+{
+ Vector<VALUE_TYPE> values;
+ typename hash_map<KEY_TYPE, VALUE_TYPE>::const_iterator iter;
+ pair<KEY_TYPE, VALUE_TYPE> p;
+
+ for (iter = m_map.begin(); iter != m_map.end(); iter++) {
+ p = *iter;
+ values.insertAtBottom(p.second);
+ }
+ return values;
+}
+
+template <class KEY_TYPE, class VALUE_TYPE>
+void Map<KEY_TYPE, VALUE_TYPE>::deleteKeys()
+{
+ typename hash_map<KEY_TYPE, VALUE_TYPE>::const_iterator iter;
+ pair<KEY_TYPE, VALUE_TYPE> p;
+
+ for (iter = m_map.begin(); iter != m_map.end(); iter++) {
+ p = *iter;
+ delete p.first;
+ }
+}
+
+template <class KEY_TYPE, class VALUE_TYPE>
+void Map<KEY_TYPE, VALUE_TYPE>::deleteValues()
+{
+ typename hash_map<KEY_TYPE, VALUE_TYPE>::const_iterator iter;
+ pair<KEY_TYPE, VALUE_TYPE> p;
+
+ for (iter = m_map.begin(); iter != m_map.end(); iter++) {
+ p = *iter;
+ delete p.second;
+ }
+}
+
+template <class KEY_TYPE, class VALUE_TYPE>
+void Map<KEY_TYPE, VALUE_TYPE>::print(ostream& out) const
+{
+ typename hash_map<KEY_TYPE, VALUE_TYPE>::const_iterator iter;
+ pair<KEY_TYPE, VALUE_TYPE> p;
+
+ out << "[";
+ for (iter = m_map.begin(); iter != m_map.end(); iter++) {
+ // unparse each basic block
+ p = *iter;
+ out << " " << p.first << "=" << p.second;
+ }
+ out << " ]";
+}
+
+template <class KEY_TYPE, class VALUE_TYPE>
+ostream& operator<<(ostream& out, const Map<KEY_TYPE, VALUE_TYPE>& map)
+{
+ map.print(out);
+ return out;
+}
+
+#endif //MAP_H
diff --git a/src/mem/gems_common/PrioHeap.hh b/src/mem/gems_common/PrioHeap.hh
new file mode 100644
index 000000000..d549f0944
--- /dev/null
+++ b/src/mem/gems_common/PrioHeap.hh
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PRIOHEAP_H
+#define PRIOHEAP_H
+
+#include "Vector.hh"
+
+typedef unsigned int HeapIndex;
+
+template <class TYPE>
+class PrioHeap {
+public:
+ // Constructors
+ PrioHeap() { init(); }
+
+ // Destructor
+ //~PrioHeap();
+
+ // Public Methods
+ void init() { m_current_size = 0; }
+ int size() const { return m_current_size; }
+ void insert(const TYPE& key);
+ const TYPE& peekMin() const;
+ const TYPE& peekElement(int index) const;
+ TYPE extractMin();
+ void print(ostream& out) const;
+private:
+ // Private Methods
+ bool verifyHeap() const;
+ bool verifyHeap(HeapIndex index) const;
+ void heapify();
+
+ // Private copy constructor and assignment operator
+ PrioHeap(const PrioHeap& obj);
+ PrioHeap<TYPE>& operator=(const PrioHeap& obj);
+
+ // Data Members (m_ prefix)
+ Vector<TYPE> m_heap;
+ HeapIndex m_current_size;
+};
+
+// Output operator declaration
+template <class TYPE>
+ostream& operator<<(ostream& out, const PrioHeap<TYPE>& obj);
+
+// ******************* Helper Functions *******************
+inline
+HeapIndex get_parent(HeapIndex i)
+{
+ // return (i/2);
+ return (i>>1);
+}
+
+inline
+HeapIndex get_right(HeapIndex i)
+{
+ // return (2*i) + 1;
+ return (i<<1) | 1;
+}
+
+inline
+HeapIndex get_left(HeapIndex i)
+{
+ // return (2*i);
+ return (i<<1);
+}
+
+template <class TYPE>
+void prio_heap_swap(TYPE& n1, TYPE& n2)
+{
+ TYPE temp = n1;
+ n1 = n2;
+ n2 = temp;
+}
+
+// ******************* Definitions *******************
+
+template <class TYPE>
+void PrioHeap<TYPE>::insert(const TYPE& key)
+{
+ int i;
+ // grow the vector size
+ m_current_size++;
+ m_heap.setSize(m_current_size+1);
+
+ if(m_current_size == 1){ // HACK: need to initialize index 0 to avoid purify UMCs
+ m_heap[0] = key;
+ }
+
+ i = m_current_size;
+ while ((i > 1) && (node_less_then_eq(key, m_heap[get_parent(i)]))) {
+ m_heap[i] = m_heap[get_parent(i)];
+ i = get_parent(i);
+ }
+ m_heap[i] = key;
+ // assert(verifyHeap());
+}
+
+template <class TYPE>
+const TYPE& PrioHeap<TYPE>::peekMin() const
+{
+ assert(size() > 0);
+ return m_heap[1]; // 1, not 0, is the first element
+}
+
+template <class TYPE>
+const TYPE& PrioHeap<TYPE>::peekElement(int index) const
+{
+ assert(size() > 0);
+ return m_heap[index];
+}
+
+template <class TYPE>
+TYPE PrioHeap<TYPE>::extractMin()
+{
+ // TYPE temp;
+ assert(size() > 0);
+ TYPE temp = m_heap[1]; // 1, not 0, is the first element
+ m_heap[1] = m_heap[m_current_size];
+ m_current_size--;
+ heapify();
+ return temp;
+}
+
+template <class TYPE>
+bool PrioHeap<TYPE>::verifyHeap() const
+{
+ return verifyHeap(1);
+}
+
+template <class TYPE>
+bool PrioHeap<TYPE>::verifyHeap(HeapIndex index) const
+{
+ // Recursively verify that each node is <= its parent
+ if(index > m_current_size) {
+ return true;
+ } else if (index == 1) {
+ return
+ verifyHeap(get_right(index)) &&
+ verifyHeap(get_left(index));
+ } else if (node_less_then_eq(m_heap[get_parent(index)], m_heap[index])) {
+ return
+ verifyHeap(get_right(index)) &&
+ verifyHeap(get_left(index));
+ } else {
+ // Heap property violation
+ return false;
+ }
+}
+
+template <class TYPE>
+void PrioHeap<TYPE>::heapify()
+{
+ HeapIndex current_node = 1;
+ HeapIndex left, right, smallest;
+ // HeapIndex size = m_current_size;
+
+ while(true) {
+ left = get_left(current_node);
+ right = get_right(current_node);
+
+ // Find the smallest of the current node and children
+ if (left <= m_current_size && node_less_then_eq(m_heap[left], m_heap[current_node])) {
+ smallest = left;
+ } else {
+ smallest = current_node;
+ }
+
+ if (right <= m_current_size && node_less_then_eq(m_heap[right], m_heap[smallest])) {
+ smallest = right;
+ }
+
+ // Check to see if we are done
+ if (smallest == current_node) {
+ // We are done
+ break;
+ } else {
+ // Not done, heapify on the smallest child
+ prio_heap_swap(m_heap[current_node], m_heap[smallest]);
+ current_node = smallest;
+ }
+ }
+ // assert(verifyHeap());
+}
+
+template <class TYPE>
+void PrioHeap<TYPE>::print(ostream& out) const
+{
+ Vector<TYPE> copyHeap(m_heap);
+
+ // sort copyHeap (inefficient, but will not be done often)
+
+ for(HeapIndex i=0;i<m_current_size; i++){
+ for(HeapIndex j=0; j< m_current_size; j++){
+ if(copyHeap[i].m_time < copyHeap[j].m_time){
+ prio_heap_swap(copyHeap[i], copyHeap[j]);
+ }
+ }
+ }
+
+ out << "[PrioHeap: ";
+
+ for(HeapIndex i=1; i<= m_current_size; i++){
+ out << copyHeap[i];
+
+ if(i != m_current_size-1){
+ out << ",";
+ }
+ out << " ";
+ }
+ out << "]";
+}
+
+// Output operator definition
+template <class TYPE>
+ostream& operator<<(ostream& out, const PrioHeap<TYPE>& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //PRIOHEAP_H
diff --git a/src/mem/gems_common/RefCnt.hh b/src/mem/gems_common/RefCnt.hh
new file mode 100644
index 000000000..fc1ddbae9
--- /dev/null
+++ b/src/mem/gems_common/RefCnt.hh
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef REFCNT_H
+#define REFCNT_H
+
+template <class TYPE>
+class RefCnt {
+public:
+ // Constructors
+ RefCnt();
+ RefCnt(const TYPE& data);
+
+ // Destructor
+ ~RefCnt();
+
+ // Public Methods
+ const TYPE* ref() const { return m_data_ptr; }
+ TYPE* ref() { return m_data_ptr; }
+ TYPE* mod_ref() const { return m_data_ptr; }
+ void freeRef();
+ void print(ostream& out) const;
+
+ // Public copy constructor and assignment operator
+ RefCnt(const RefCnt& obj);
+ RefCnt& operator=(const RefCnt& obj);
+
+private:
+ // Private Methods
+
+ // Data Members (m_ prefix)
+ TYPE* m_data_ptr;
+ // int* m_count_ptr; // Not used yet
+};
+
+// Output operator declaration
+template <class TYPE>
+inline
+ostream& operator<<(ostream& out, const RefCnt<TYPE>& obj);
+
+// ******************* Definitions *******************
+
+// Constructors
+template <class TYPE>
+inline
+RefCnt<TYPE>::RefCnt()
+{
+ m_data_ptr = NULL;
+}
+
+template <class TYPE>
+inline
+RefCnt<TYPE>::RefCnt(const TYPE& data)
+{
+ m_data_ptr = data.clone();
+ m_data_ptr->setRefCnt(1);
+}
+
+template <class TYPE>
+inline
+RefCnt<TYPE>::~RefCnt()
+{
+ freeRef();
+}
+
+template <class TYPE>
+inline
+void RefCnt<TYPE>::freeRef()
+{
+ if (m_data_ptr != NULL) {
+ m_data_ptr->decRefCnt();
+ if (m_data_ptr->getRefCnt() == 0) {
+ m_data_ptr->destroy();
+ }
+ m_data_ptr = NULL;
+ }
+}
+
+template <class TYPE>
+inline
+void RefCnt<TYPE>::print(ostream& out) const
+{
+ if (m_data_ptr == NULL) {
+ out << "[RefCnt: Null]";
+ } else {
+ out << "[RefCnt: ";
+ m_data_ptr->print(out);
+ out << "]";
+ }
+}
+
+// Copy constructor
+template <class TYPE>
+inline
+RefCnt<TYPE>::RefCnt(const RefCnt<TYPE>& obj)
+{
+ // m_data_ptr = obj.m_data_ptr->clone();
+ m_data_ptr = obj.m_data_ptr;
+
+ // Increment the reference count
+ if (m_data_ptr != NULL) {
+ m_data_ptr->incRefCnt();
+ }
+}
+
+// Assignment operator
+template <class TYPE>
+inline
+RefCnt<TYPE>& RefCnt<TYPE>::operator=(const RefCnt<TYPE>& obj)
+{
+ if (this == &obj) {
+ // If this is the case, do nothing
+ // assert(false);
+ } else {
+ freeRef();
+ m_data_ptr = obj.m_data_ptr;
+ if (m_data_ptr != NULL) {
+ m_data_ptr->incRefCnt();
+ }
+ }
+ return *this;
+}
+
+
+// Output operator definition
+template <class TYPE>
+inline
+ostream& operator<<(ostream& out, const RefCnt<TYPE>& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+
+#endif //REFCNT_H
diff --git a/src/mem/gems_common/RefCnt_tester.cc b/src/mem/gems_common/RefCnt_tester.cc
new file mode 100644
index 000000000..574f8fe3c
--- /dev/null
+++ b/src/mem/gems_common/RefCnt_tester.cc
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Code used to test the RefCnt class
+ */
+
+#include "RefCnt.hh"
+#include "RefCountable.hh"
+
+class Foo : public RefCountable {
+public:
+ int m_data;
+ Foo* clone() const;
+private:
+};
+
+Foo* Foo::clone() const
+{
+ Foo* temp_ptr;
+ temp_ptr = new Foo;
+ *temp_ptr = *this;
+ cout << "Cloned!" << endl;
+ return temp_ptr;
+}
+
+void bar(RefCnt<Foo> f)
+{
+ cout << f.ref()->m_data << endl;
+}
+
+Foo f2;
+
+int main()
+{
+ Foo f;
+ f.m_data = 2;
+
+ {
+ RefCnt<Foo> a(f);
+
+ f.m_data = 3;
+ cout << a.ref()->m_data << endl;
+ cout << f.m_data << endl;
+ f2 = f;
+ }
+
+ bar(f2);
+
+ return 0;
+}
+
+
diff --git a/src/mem/gems_common/RefCountable.hh b/src/mem/gems_common/RefCountable.hh
new file mode 100644
index 000000000..88aba07e6
--- /dev/null
+++ b/src/mem/gems_common/RefCountable.hh
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Virtual base class for things that can be reference counted
+ */
+
+#ifndef REFCOUNTABLE_H
+#define REFCOUNTABLE_H
+
+#include "RefCnt.hh"
+
+class RefCountable {
+public:
+ // Public Methods
+
+ RefCountable() { m_refcnt = 0; }
+
+ // These are used by the RefCnt class to hold the reference count
+ // for the object. These really should be private and accessed by a
+ // friend class, but I can't figure out how to make a template class
+ // a friend.
+ void incRefCnt() { m_refcnt++; }
+ void decRefCnt() { m_refcnt--; }
+ int getRefCnt() const { return m_refcnt; }
+ void setRefCnt(int cnt) { m_refcnt = cnt; }
+private:
+ // Private Methods
+
+ // Data Members (m_ prefix)
+ int m_refcnt;
+};
+
+#endif //REFCOUNTABLE_H
diff --git a/src/mem/gems_common/Vector.hh b/src/mem/gems_common/Vector.hh
new file mode 100644
index 000000000..744dc698c
--- /dev/null
+++ b/src/mem/gems_common/Vector.hh
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Description: The Vector class is a generic container which acts
+ * much like an array. The Vector class handles dynamic sizing and
+ * resizing as well as performing bounds checking on each access. An
+ * "insertAtBottom" operation is supported to allow adding elements to
+ * the Vector much like you would add new elements to a linked list or
+ * queue.
+ */
+
+#ifndef VECTOR_H
+#define VECTOR_H
+
+#include "std-includes.hh"
+
+template <class TYPE>
+class Vector
+{
+public:
+ Vector();
+ explicit Vector(int initial_size); // Construct with an initial max size
+ ~Vector();
+ const TYPE& ref(int index) const; // Get an element of the vector
+ TYPE& ref(int index); // Get an element of the vector
+ void clear(); // remove all elements of the vector
+ void sortVector(); // sort all elements using < operator
+ int size() const { return m_size; }
+ void setSize(int new_size); // Increase size, reallocates memory as needed
+ void expand(int num) { setSize(m_size+num); } // Increase size by num
+ void increaseSize(int new_size, const TYPE& reset); // and adds num of slots at the bottom set to reset value
+ void insertAtTop(const TYPE& element); // Increase size by one and set last element
+ // FIXME - WARNING: insertAtTop is currently O(n) and needs to be fixed
+ void insertAtBottom(const TYPE& element); // Increase size by one and set last element
+ TYPE sum() const; // Uses the += operator to sum all the elements of the vector
+ void deletePointers(); // Walks the Vector calling delete on all
+ // elements and sets them to NULL, can only
+ // be used when the TYPE is a pointer type.
+ void removeFromTop(int num); // removes elements from top
+ void print(ostream& out) const;
+
+
+ // Array Reference operator overloading
+ const TYPE& operator[](int index) const { return ref(index); }
+ TYPE& operator[](int index) { return ref(index); }
+
+ // Public copy constructor and assignment operator
+ Vector(const Vector& vec);
+ Vector<TYPE>& operator=(const Vector& vec);
+private:
+
+ void grow(int new_max_size); // Expands vector to new_max_size
+
+ // Data members
+ TYPE* m_vec; // Array to hold the elements
+ int m_size; // Number of elements in use
+ int m_max_size; // Size of allocated array
+};
+
+template <class TYPE>
+ostream& operator<<(ostream& out, const Vector<TYPE>& vec);
+
+// *********************
+
+template <class TYPE>
+Vector<TYPE>::Vector()
+{
+ m_size = 0;
+ m_max_size = 0;
+ m_vec = NULL;
+}
+
+template <class TYPE>
+Vector<TYPE>::Vector(int initial_size)
+{
+ m_size = 0;
+ m_max_size = initial_size;
+ m_vec = NULL;
+ grow(initial_size);
+}
+
+template <class TYPE>
+Vector<TYPE>::~Vector()
+{
+ delete [] m_vec;
+}
+
+template <class TYPE>
+const TYPE& Vector<TYPE>::ref(int index) const
+{
+#ifndef NO_VECTOR_BOUNDS_CHECKS
+ assert(m_size != 0);
+ assert(index < m_size);
+ assert(index >= 0);
+#endif
+ return m_vec[index];
+}
+
+template <class TYPE>
+TYPE& Vector<TYPE>::ref(int index)
+{
+#ifndef NO_VECTOR_BOUNDS_CHECKS
+ assert(m_size != 0);
+ assert(index < m_size);
+ assert(index >= 0);
+#endif
+ return m_vec[index];
+}
+
+
+template <class TYPE>
+void Vector<TYPE>::setSize(int new_size)
+{
+ // FIXME - this should also decrease or shrink the size of the array at some point.
+ if (new_size > m_max_size) {
+ grow(max((m_max_size+1)*2, new_size));
+ }
+ m_size = new_size;
+#ifndef NO_VECTOR_BOUNDS_CHECKS
+ assert(m_size <= m_max_size);
+ assert(m_size >= 0);
+#endif
+}
+
+template <class TYPE>
+inline
+void Vector<TYPE>::increaseSize(int new_size, const TYPE& reset)
+{
+ assert(new_size >= m_size);
+ if (new_size >= m_max_size) {
+ grow(max((m_max_size+1)*2, new_size));
+ }
+ int old_size = m_size;
+ m_size = new_size;
+ for (int j = old_size; j < m_size; j++) {
+ ref(j) = reset;
+ }
+
+#ifndef NO_VECTOR_BOUNDS_CHECKS
+ assert(m_size <= m_max_size);
+ assert(m_size >= 0);
+#endif
+}
+
+template <class TYPE>
+inline
+void Vector<TYPE>::clear()
+{
+ m_size = 0;
+ m_max_size = 0;
+ delete [] m_vec;
+ m_vec = NULL;
+}
+
+template <class TYPE>
+inline
+void Vector<TYPE>::sortVector()
+{
+ sort(&m_vec[0], &m_vec[m_size]);
+}
+
+template <class TYPE>
+inline
+void Vector<TYPE>::insertAtTop(const TYPE& element)
+{
+ setSize(m_size+1);
+ for (int i = m_size-1; i >= 1; i--) {
+ ref(i) = ref(i-1);
+ }
+ ref(0) = element;
+}
+
+template <class TYPE>
+inline
+void Vector<TYPE>::removeFromTop(int num)
+{
+ if (num > m_size) {
+ num = m_size;
+ }
+ for (int i = 0; i < m_size - num; i++) {
+ m_vec[i] = m_vec[i+num];
+ }
+ m_size = m_size - num;
+
+}
+
+template <class TYPE>
+void Vector<TYPE>::insertAtBottom(const TYPE& element)
+{
+ setSize(m_size+1);
+ ref(m_size-1) = element;
+}
+
+template <class TYPE>
+TYPE Vector<TYPE>::sum() const
+{
+ assert(m_size > 0);
+ TYPE sum = ref(0);
+ for(int i=1; i<m_size; i++) {
+ sum += ref(i);
+ }
+ return sum;
+}
+
+template <class TYPE>
+void Vector<TYPE>::deletePointers()
+{
+ assert(m_size >= 0);
+ for(int i=0; i<m_size; i++) {
+ // FIXME this function should be non-member function, otherwise this
+ // prevent template instantiation for non-pointer types
+ //
+ // Also, there is warning of Switch.cc which use void* here
+ delete ref(i);
+ ref(i) = NULL;
+ }
+}
+
+template <class TYPE>
+void Vector<TYPE>::print(ostream& out) const
+{
+ out << "[ ";
+ for(int i=0; i<m_size; i++) {
+ if (i != 0) {
+ out << " ";
+ }
+ out << ref(i);
+ }
+ out << " ]";
+ out << flush;
+}
+
+// Copy constructor
+template <class TYPE>
+Vector<TYPE>::Vector(const Vector& vec)
+{
+ // Setup the new memory
+ m_size = vec.m_size;
+ m_max_size = vec.m_max_size;
+ if (m_max_size != 0) {
+ m_vec = new TYPE[m_max_size];
+ assert(m_vec != NULL);
+ } else {
+ m_vec = NULL;
+ }
+
+ // Copy the elements of the array
+ for(int i=0; i<m_size; i++) {
+ m_vec[i] = vec.m_vec[i]; // Element copy
+ }
+}
+
+template <class TYPE>
+Vector<TYPE>& Vector<TYPE>::operator=(const Vector& vec)
+{
+ if (this == &vec) {
+ // assert(0);
+ } else {
+ // Free the old memory
+ delete [] m_vec;
+
+ // Setup the new memory
+ m_size = vec.m_size;
+ m_max_size = vec.m_max_size;
+
+ if (m_max_size != 0) {
+ m_vec = new TYPE[m_max_size];
+ assert(m_vec != NULL);
+ } else {
+ m_vec = NULL;
+ }
+
+ // Copy the elements of the array
+ for(int i=0; i<m_size; i++) {
+ m_vec[i] = vec.m_vec[i]; // Element copy
+ }
+ }
+ return *this;
+}
+
+template <class TYPE>
+void Vector<TYPE>::grow(int new_max_size)
+{
+ TYPE* temp_vec;
+ m_max_size = new_max_size;
+ if (new_max_size != 0) {
+ temp_vec = new TYPE[new_max_size];
+ assert(temp_vec != NULL);
+ } else {
+ temp_vec = NULL;
+ }
+
+ // Copy the elements of the array
+ for(int i=0; i<m_size; i++) {
+ temp_vec[i] = m_vec[i]; // Element copy
+ }
+ delete [] m_vec;
+ m_vec = temp_vec;
+}
+
+template <class TYPE>
+ostream& operator<<(ostream& out, const Vector<TYPE>& vec)
+{
+ vec.print(out);
+ return out;
+}
+
+#endif //VECTOR_H
diff --git a/src/mem/gems_common/calc_host.sh b/src/mem/gems_common/calc_host.sh
new file mode 100755
index 000000000..f7a6116c1
--- /dev/null
+++ b/src/mem/gems_common/calc_host.sh
@@ -0,0 +1,38 @@
+#!/bin/csh -f
+
+set OS=`uname -s`
+set HOST_ARCH=`uname -m`
+
+switch ($OS)
+ case Linux:
+ set OS_PART=linux
+ breaksw
+ case SunOS:
+ set OS_PART=sol8-64
+ breaksw
+ case OSF1:
+ set OS_PART=tru64-gcc
+ breaksw
+ default:
+ set OS_PART=`echo $OS | sed 's/ /-/g'`
+endsw
+
+switch ($HOST_ARCH)
+ case i586:
+ set ARCH=x86
+ breaksw
+ case i686:
+ set ARCH=x86
+ breaksw
+ case x86_64:
+ set ARCH=amd64
+ breaksw
+ case sun4u:
+ set ARCH=v9
+ breaksw
+ default:
+ set ARCH=`echo $HOST_ARCH | sed 's/ /-/g'`
+endsw
+
+echo $ARCH-$OS_PART
+
diff --git a/src/mem/gems_common/ioutil/attrlex.ll b/src/mem/gems_common/ioutil/attrlex.ll
new file mode 100644
index 000000000..293350b23
--- /dev/null
+++ b/src/mem/gems_common/ioutil/attrlex.ll
@@ -0,0 +1,229 @@
+/*
+ Copyright (C) 1999-2005 by Mark D. Hill and David A. Wood for the
+ Wisconsin Multifacet Project. Contact: gems@cs.wisc.edu
+ http://www.cs.wisc.edu/gems/
+
+ --------------------------------------------------------------------
+
+ This file a component of the Multifacet GEMS (General
+ Execution-driven Multiprocessor Simulator) software toolset
+ originally developed at the University of Wisconsin-Madison.
+
+ Ruby was originally developed primarily by Milo Martin and Daniel
+ Sorin with contributions from Ross Dickson, Carl Mauer, and Manoj
+ Plakal.
+
+ SLICC was originally developed by Milo Martin with substantial
+ contributions from Daniel Sorin.
+
+ Opal was originally developed by Carl Mauer based upon code by
+ Craig Zilles.
+
+ Substantial further development of Multifacet GEMS at the
+ University of Wisconsin was performed by Alaa Alameldeen, Brad
+ Beckmann, Ross Dickson, Pacia Harper, Milo Martin, Michael Marty,
+ Carl Mauer, Kevin Moore, Manoj Plakal, Daniel Sorin, Min Xu, and
+ Luke Yen.
+
+ --------------------------------------------------------------------
+
+ If your use of this software contributes to a published paper, we
+ request that you (1) cite our summary paper that appears on our
+ website (http://www.cs.wisc.edu/gems/) and (2) e-mail a citation
+ for your published paper to gems@cs.wisc.edu.
+
+ If you redistribute derivatives of this software, we request that
+ you notify us and either (1) ask people to register with us at our
+ website (http://www.cs.wisc.edu/gems/) or (2) collect registration
+ information and periodically send it to us.
+
+ --------------------------------------------------------------------
+
+ Multifacet GEMS is free software; you can redistribute it and/or
+ modify it under the terms of version 2 of the GNU General Public
+ License as published by the Free Software Foundation.
+
+ Multifacet GEMS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the Multifacet GEMS; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA
+
+ The GNU General Public License is contained in the file LICENSE.
+
+### END HEADER ###
+*/
+
+
+%option noyywrap
+
+ALPHADIGIT [^\:\,\(\)\n\t\(\) \0\#]
+HEXDIGIT [0-9a-fA-Fx]
+NEWLINE [\n]
+WHITESPACE [ \t]
+
+%{
+
+#ifdef IS_RUBY
+#include "Global.hh"
+#endif
+
+using namespace std;
+#include <string>
+#include <map>
+#include <stdlib.h>
+
+// Maurice
+// extern "C" {
+// #include "simics/api.h"
+// };
+
+#include "FakeSimicsDataTypes.hh"
+
+// CM: simics 1.6.5 API redefines fwrite, much to my chagrin
+#undef fwrite
+#undef printf
+#include "attrparse.h"
+
+#define MAX_INCLUDE_DEPTH 10
+
+/** global result of parsing file */
+extern attr_value_t g_attr_map;
+
+extern int atparse(void);
+
+static int linenum=1; /* the current line number */
+static int colnum=1; /* the current column number */
+static YY_BUFFER_STATE include_stack[MAX_INCLUDE_DEPTH];
+static int include_stack_ptr = 0;
+static char g_relative_include_path[256];
+
+
+// forward declaration of aterror
+void aterror(const char *msg);
+%}
+
+%x SLASHCOMMENT INCLUDE
+
+%%
+
+%{ /* PATTERNS FOR STRING TOKENS */
+%}
+
+"//".*[\n] { linenum++; colnum=1; } /* C++ style comments */
+
+\#include { colnum+=yyleng; BEGIN(INCLUDE); }
+<INCLUDE>{WHITESPACE}* { colnum+=yyleng; }
+<INCLUDE>[^ \t\n]+ {
+ // should really be FILEIO_MAX_FILENAME or MAX_NAME
+ char str[256];
+ if ( include_stack_ptr >= MAX_INCLUDE_DEPTH )
+ {
+ ERROR_OUT( "Includes nested too deeply" );
+ exit( 1 );
+ }
+ include_stack[include_stack_ptr++] = YY_CURRENT_BUFFER;
+
+ yyin = fopen( yytext, "r" );
+ if ( ! yyin ) {
+ sprintf( str, "%s%s", g_relative_include_path, yytext );
+ yyin = fopen( str, "r" );
+ }
+ if ( ! yyin ) {
+ sprintf( str, "%s%s%s", g_relative_include_path, "config/", yytext );
+ yyin = fopen( str, "r" );
+ }
+ if ( ! yyin ) {
+ ERROR_OUT("unable to open included file: %s or %s\n", yytext, str);
+ aterror("file open error.\n");
+ }
+ yy_switch_to_buffer(yy_create_buffer( yyin, YY_BUF_SIZE ));
+ BEGIN(INITIAL);
+ }
+<<EOF>> {
+ if ( --include_stack_ptr < 0 ) {
+ yyterminate();
+ } else {
+ yy_delete_buffer( YY_CURRENT_BUFFER );
+ fclose(yyin);
+ yy_switch_to_buffer(include_stack[include_stack_ptr] );
+ }
+ }
+
+\( { colnum+=yyleng; return (LPAREN); }
+\) { colnum+=yyleng; return (RPAREN); }
+\: { colnum+=yyleng; return (':'); }
+\, { colnum+=yyleng; return (','); }
+{HEXDIGIT}+ {
+ colnum+=yyleng;
+ attr_value_t *val = (attr_value_t *)
+ malloc( sizeof(attr_value_t) );
+ memset( val, 0, sizeof(attr_value_t) );
+ atlval.attrval = val;
+ val->kind = Sim_Val_Integer;
+ val->u.integer = strtoull( yytext, NULL, 0 );
+ return (INTEGER); }
+{ALPHADIGIT}+ {
+ colnum+=yyleng;
+ attr_value_t *val = (attr_value_t *)
+ malloc( sizeof(attr_value_t) );
+ memset( val, 0, sizeof(attr_value_t) );
+ atlval.attrval = val;
+ val->kind = Sim_Val_String;
+ val->u.string = strdup(yytext);
+ return (STRING); }
+
+%{ /* OTHER PATTERNS */
+%}
+
+{WHITESPACE}+ {colnum += yyleng;}
+{NEWLINE} {linenum++; colnum = 1;}
+
+%%
+
+extern "C" void parseInitialize( void )
+{
+ // since no global variables are set in simics, we must do it manually
+ // this is also necessary now that the parser can be used more than once.
+ // (it is used to parse the defaults, and can be used after that)
+ linenum = 1;
+ colnum = 1;
+ include_stack_ptr = 0;
+}
+
+extern "C" int parseAttrFile( FILE *inputFile, const char *relative_include_path, attr_value_t *myTable )
+{
+ parseInitialize();
+ strncpy( g_relative_include_path, relative_include_path, 255 );
+
+ int result;
+ yyin = inputFile;
+ YY_BUFFER_STATE scan_state = yy_create_buffer( yyin, YY_BUF_SIZE );
+ yy_switch_to_buffer( scan_state );
+ result = atparse();
+ *myTable = g_attr_map;
+ yy_delete_buffer( scan_state );
+ return (result);
+}
+
+extern "C" int parseAttrString( const char *str, attr_value_t *myTable )
+{
+ parseInitialize();
+
+ int result;
+ YY_BUFFER_STATE scan_state = yy_scan_string( str );
+ result = atparse();
+ *myTable = g_attr_map;
+ yy_delete_buffer( scan_state );
+ return (result);
+}
+
+extern void aterror(const char *msg)
+{
+ ERROR_OUT("%d:%d: ERROR while parsing config file%s\n", linenum, colnum, msg );
+}
+
diff --git a/src/mem/gems_common/ioutil/attrparse.yy b/src/mem/gems_common/ioutil/attrparse.yy
new file mode 100644
index 000000000..8a0190e06
--- /dev/null
+++ b/src/mem/gems_common/ioutil/attrparse.yy
@@ -0,0 +1,232 @@
+/*
+ Copyright (C) 1999-2005 by Mark D. Hill and David A. Wood for the
+ Wisconsin Multifacet Project. Contact: gems@cs.wisc.edu
+ http://www.cs.wisc.edu/gems/
+
+ --------------------------------------------------------------------
+
+ This file a component of the Multifacet GEMS (General
+ Execution-driven Multiprocessor Simulator) software toolset
+ originally developed at the University of Wisconsin-Madison.
+
+ Ruby was originally developed primarily by Milo Martin and Daniel
+ Sorin with contributions from Ross Dickson, Carl Mauer, and Manoj
+ Plakal.
+
+ SLICC was originally developed by Milo Martin with substantial
+ contributions from Daniel Sorin.
+
+ Opal was originally developed by Carl Mauer based upon code by
+ Craig Zilles.
+
+ Substantial further development of Multifacet GEMS at the
+ University of Wisconsin was performed by Alaa Alameldeen, Brad
+ Beckmann, Ross Dickson, Pacia Harper, Milo Martin, Michael Marty,
+ Carl Mauer, Kevin Moore, Manoj Plakal, Daniel Sorin, Min Xu, and
+ Luke Yen.
+
+ --------------------------------------------------------------------
+
+ If your use of this software contributes to a published paper, we
+ request that you (1) cite our summary paper that appears on our
+ website (http://www.cs.wisc.edu/gems/) and (2) e-mail a citation
+ for your published paper to gems@cs.wisc.edu.
+
+ If you redistribute derivatives of this software, we request that
+ you notify us and either (1) ask people to register with us at our
+ website (http://www.cs.wisc.edu/gems/) or (2) collect registration
+ information and periodically send it to us.
+
+ --------------------------------------------------------------------
+
+ Multifacet GEMS is free software; you can redistribute it and/or
+ modify it under the terms of version 2 of the GNU General Public
+ License as published by the Free Software Foundation.
+
+ Multifacet GEMS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the Multifacet GEMS; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA
+
+ The GNU General Public License is contained in the file LICENSE.
+
+### END HEADER ###
+*/
+
+
+
+%{
+/*------------------------------------------------------------------------*/
+/* Includes */
+/*------------------------------------------------------------------------*/
+
+#ifdef IS_RUBY
+#include "Global.hh"
+#endif
+
+using namespace std;
+#include <string>
+#include <map>
+#include <stdlib.h>
+
+// Maurice
+// extern "C" {
+// #include "simics/api.h"
+// };
+
+#include "FakeSimicsDataTypes.hh"
+
+#include "confio.hh"
+
+// CM FIX: if I wasn't working on a paper: I'd re-write the grammer to
+// be left (or right) recursive, which ever is more efficient
+// This only affects extremely large cache sizes / BP sizes on read in.
+#define YYMAXDEPTH 100000
+
+extern char* attext;
+
+extern void aterror(const char *);
+extern int atlex();
+attr_value_t g_attr_map;
+
+extern void fprintAttr( FILE *fp, attr_value_t attr );
+
+%}
+
+/*------------------------------------------------------------------------*/
+/* Union declarations */
+/*------------------------------------------------------------------------*/
+// The types of the tokens and nonterminals
+%union {
+ attr_value_t *attrval;
+};
+
+%token <attrval> STRING INTEGER
+%token MY_END LPAREN RPAREN
+
+%type <attrval> confmapping confpair attributes attrlist
+%%
+
+conffile : confmapping
+{
+ g_attr_map = *($1);
+ free( $1 )
+}
+
+confmapping : confmapping confpair
+{
+ attr_value_t *newattr = mallocAttribute(1);
+ newattr->kind = Sim_Val_List;
+ if ( $1 == NULL ) {
+ newattr->u.list.size = 1;
+ newattr->u.list.vector = $2;
+ } else {
+ // add the latest mapping to the return mapping
+ uint32 newsize = $1->u.list.size + 1;
+ attr_value_t *vector = mallocAttribute( newsize );
+ newattr->u.list.size = newsize;
+ newattr->u.list.vector = vector;
+ for (uint32 i = 0; i < newsize - 1; i++) {
+ vector[i] = $1->u.list.vector[i];
+ }
+ vector[newsize - 1] = *($2);
+ free( $1->u.list.vector );
+ free( $1 );
+ free( $2 );
+ }
+ $$ = newattr;
+}
+ | // nothing
+{
+ $$ = NULL;
+}
+
+confpair : STRING ':' attributes
+{
+ attr_value_t *newattr = mallocAttribute(1);
+ newattr->kind = Sim_Val_List;
+ newattr->u.list.size = 2;
+ newattr->u.list.vector = mallocAttribute(2);
+ newattr->u.list.vector[0] = *($1);
+ newattr->u.list.vector[1] = *($3);
+ free( $1 );
+ free( $3 );
+ $$ = newattr;
+}
+
+attributes : INTEGER
+{
+ $$ = $1;
+}
+ | STRING
+{
+ $$ = $1;
+}
+ | LPAREN attrlist RPAREN
+{
+ attr_value_t *newattr = mallocAttribute(1);
+ newattr->kind = Sim_Val_List;
+ if ( $2->kind != CONF_ATTR_SINGLE &&
+ $2->kind != CONF_ATTR_PAIR ) {
+ newattr->u.list.size = 1;
+ newattr->u.list.vector = $2;
+ } else {
+ newattr->u.list.size = $2->u.list.size;
+ newattr->u.list.vector = mallocAttribute(newattr->u.list.size);
+ attr_value_t *curattr = $2;
+ uint32 i = 0;
+ while ( i < newattr->u.list.size ) {
+ if (curattr->kind == CONF_ATTR_SINGLE) {
+ newattr->u.list.vector[i] = curattr->u.list.vector[0];
+ i++;
+
+ curattr = NULL;
+ } else if (curattr->kind == CONF_ATTR_PAIR) {
+ newattr->u.list.vector[i] = curattr->u.list.vector[0];
+ i++;
+ if ( i < newattr->u.list.size )
+ curattr = &(curattr->u.list.vector[1]);
+ else
+ curattr = NULL;
+
+ } else {
+ ERROR_OUT("error: unknown kind in pair: %d\n", curattr->kind);
+ ASSERT(0);
+ }
+ }
+ // FIX memory leak: around 600 KB
+ // freeAttribute( $2 ); // with gcc-3.4 this free call tries to free memory from the stack
+ }
+ $$ = newattr;
+}
+
+attrlist : attributes
+{
+ attr_value_t *newattr = mallocAttribute(1);
+ newattr->kind = CONF_ATTR_SINGLE;
+ newattr->u.list.size = 1;
+ newattr->u.list.vector = $1;
+ $$ = newattr;
+}
+ | attributes ',' attrlist
+{
+ // allocate the pair ( x , y ) attribute
+ attr_value_t *newattr = mallocAttribute(1);
+ int newsize = $3->u.list.size + 1;
+ attr_value_t *vector = mallocAttribute(2);
+ newattr->kind = CONF_ATTR_PAIR;
+ newattr->u.list.size = newsize;
+ newattr->u.list.vector = vector;
+
+ // assign the LH attribute
+ vector[0] = *($1);
+ vector[1] = *($3);
+ free( $1 );
+ free( $3 );
+ $$ = newattr;
+}
diff --git a/src/mem/gems_common/ioutil/confio.cc b/src/mem/gems_common/ioutil/confio.cc
new file mode 100644
index 000000000..68d44197a
--- /dev/null
+++ b/src/mem/gems_common/ioutil/confio.cc
@@ -0,0 +1,456 @@
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * saves configuration information for later runs
+ */
+
+/*------------------------------------------------------------------------*/
+/* Includes */
+/*------------------------------------------------------------------------*/
+
+#ifdef IS_OPAL
+#include "hfa.hh"
+#endif
+
+#ifdef IS_RUBY
+#include "Global.hh"
+#define SIM_HALT ASSERT(0)
+#endif
+
+#ifdef IS_TOURMALINE
+#include "Tourmaline_Global.hh"
+#endif
+
+using namespace std;
+#include <string>
+#include <map>
+#include <stdlib.h>
+
+// Maurice
+// extern "C" {
+// #include "global.hh"
+// #include "simics/api.hh"
+//
+// #ifdef SIMICS22X
+// #include "sparc_api.hh"
+// #endif
+// #ifdef SIMICS30
+// #ifdef SPARC
+// #include "sparc.hh"
+// #else
+// #include "x86.hh"
+// #endif
+// #endif
+// };
+
+#include "FakeSimicsDataTypes.hh"
+
+#include "confio.hh"
+
+/*------------------------------------------------------------------------*/
+/* Macro declarations */
+/*------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*/
+/* Variable declarations */
+/*------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*/
+/* Forward declarations */
+/*------------------------------------------------------------------------*/
+
+// C++ Template: explicit instantiation
+template class map<string, confnode_t *>;
+
+// These functions are defined in parser/attrlex.l
+extern "C" int parseAttrFile( FILE *inputFile, const char *relative_include_path, attr_value_t *myTable );
+extern "C" int parseAttrString( const char *str, attr_value_t *myTable );
+
+/*------------------------------------------------------------------------*/
+/* Constructor(s) / destructor */
+/*------------------------------------------------------------------------*/
+
+//**************************************************************************
+confio_t::confio_t( )
+{
+ m_verbose = false;
+}
+
+//**************************************************************************
+confio_t::~confio_t( )
+{
+ ConfTable::iterator iter;
+
+ for ( iter = m_table.begin(); iter != m_table.end(); iter++ ) {
+ confnode_t *cfnode = (*iter).second;
+ free( cfnode );
+ }
+}
+
+/*------------------------------------------------------------------------*/
+/* Public methods */
+/*------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*/
+/* Accessor(s) / mutator(s) */
+/*------------------------------------------------------------------------*/
+
+//**************************************************************************
+int confio_t::register_attribute( const char *name,
+ get_confio_t get_attr, void *get_attr_data,
+ set_confio_t set_attr, void *set_attr_data )
+{
+ confnode_t *newnode;
+ if ( m_table.find(name) == m_table.end() ) {
+ if (m_verbose)
+ DEBUG_OUT(" registering checkpoint attribute: \"%s\"\n", name);
+
+ // add a new entry to the table
+ newnode = (confnode_t *) malloc( sizeof( confnode_t ) );
+ newnode->get_attr = get_attr;
+ newnode->set_attr = set_attr;
+ newnode->set_attr_data = set_attr_data;
+ newnode->get_attr_data = get_attr_data;
+ newnode->attr_is_set = false;
+ string key(name);
+ m_table[key] = newnode;
+ } else {
+ ERROR_OUT(" warning: confio: adding existing conf node: %s\n", name);
+ }
+ return 0;
+}
+
+//**************************************************************************
+void fprintAttr( FILE *fp, attr_value_t attr )
+{
+ switch (attr.kind) {
+ case Sim_Val_Invalid:
+ fprintf(fp, "invalid");
+ break;
+
+ case Sim_Val_String:
+ fprintf(fp, "%s", attr.u.string);
+ break;
+
+ case Sim_Val_Integer:
+ fprintf(fp, "0x%llx", attr.u.integer);
+ break;
+
+ case Sim_Val_Floating:
+ fprintf(fp, "0x%llx", attr.u.integer);
+ break;
+
+ case Sim_Val_List:
+ fprintf(fp, "(");
+ for (uint32 i = 0; i < attr.u.list.size; i++) {
+ fprintAttr(fp, attr.u.list.vector[i]);
+ if (i != attr.u.list.size -1) {
+ fprintf(fp, ", ");
+ }
+ }
+ fprintf(fp, ")");
+ break;
+
+ default:
+ ERROR_OUT("fprintAttr: unknown/unimplemented attribute %d\n", attr.kind);
+ }
+}
+
+//**************************************************************************
+void freeAttribute( attr_value_t *attr )
+{
+ switch (attr->kind) {
+ case Sim_Val_Invalid:
+ break;
+
+ case Sim_Val_String:
+ free( (char *) attr->u.string );
+ break;
+
+ case Sim_Val_Integer:
+ break;
+
+ case Sim_Val_Floating:
+ break;
+
+ case Sim_Val_List:
+ for (uint32 i = 0; i < attr->u.list.size; i++) {
+ freeAttribute( &(attr->u.list.vector[i]) );
+ }
+ free( attr->u.list.vector );
+ break;
+
+ default:
+ ERROR_OUT("freeAttr: unknown/unimplemented attribute %d\n", attr->kind);
+ }
+}
+
+/**
+ * Allocates, and initializes a attribute value.
+ * @param number The number of values to allocate.
+ * @return A pointer to the newly allocated structure.
+ */
+//**************************************************************************
+attr_value_t *mallocAttribute( uint32 number )
+{
+ attr_value_t *newattr = (attr_value_t *) malloc( number *
+ sizeof(attr_value_t) );
+ if ( newattr == NULL ) {
+ ERROR_OUT( "confio: mallocAttribute: out of memory\n" );
+ exit(1);
+ }
+ memset( newattr, 0, number*sizeof(attr_value_t) );
+ return (newattr);
+}
+
+
+//**************************************************************************
+void fprintMap( FILE *fp, attr_value_t attr )
+{
+ attr_value_t name;
+ attr_value_t value;
+
+ if (attr.kind != Sim_Val_List)
+ return;
+
+ for (int i = 0; i < attr.u.list.size; i++) {
+
+ if (attr.u.list.vector[i].kind != Sim_Val_List ||
+ attr.u.list.vector[i].u.list.size != 2)
+ return;
+
+ name = attr.u.list.vector[i].u.list.vector[0];
+ value = attr.u.list.vector[i].u.list.vector[1];
+ fprintf( fp, " %s: ", name.u.string);
+ fprintAttr( fp, value );
+ fprintf( fp, "\n");
+ }
+}
+
+/**
+ * write a configuration file: e.g. save state
+ */
+//**************************************************************************
+int confio_t::writeConfiguration( const char *outputFilename )
+{
+ FILE *fp;
+ ConfTable::iterator iter;
+ confnode_t *cfnode;
+ attr_value_t attr;
+
+ memset( &attr, 0, sizeof(attr_value_t) );
+ if ( outputFilename == NULL ) {
+ fp = stdout;
+ } else {
+ fp = fopen( outputFilename, "w" );
+ if ( fp == NULL ) {
+ ERROR_OUT("error: writeConfiguration: unable to open file %s\n",
+ outputFilename );
+ return (-1);
+ }
+ }
+
+ for (iter = m_table.begin(); iter != m_table.end(); iter++) {
+ fprintf( fp, " %s: ", (*iter).first.c_str() );
+ cfnode = (*iter).second;
+ attr = (*(cfnode->get_attr))( cfnode->get_attr_data, NULL );
+ fprintAttr( fp, attr );
+ fprintf(fp, "\n");
+ }
+
+ if ( outputFilename != NULL ) {
+ // wrote to a file: now close it!
+ fclose( fp );
+ }
+ return 0;
+}
+
+/**
+ * read state from an existing configuration file
+ */
+//**************************************************************************
+int confio_t::readConfiguration( const char *inputFilename,
+ const char *relativeIncludePath )
+{
+ // parse the input stream
+ FILE *fp;
+ char relativeFilename[256];
+
+ fp = fopen( inputFilename, "r" );
+ if ( fp == NULL ) {
+ sprintf( relativeFilename, "%s%s", relativeIncludePath, inputFilename );
+ fp = fopen( relativeFilename, "r" );
+ }
+ if ( fp == NULL ) {
+ sprintf( relativeFilename, "%s%s%s", relativeIncludePath, "config/",
+ inputFilename );
+ fp = fopen( relativeFilename, "r" );
+ }
+ if ( fp == NULL ) {
+ ERROR_OUT("error: readConfiguration: unable to open file %s or %s\n",
+ inputFilename, relativeFilename);
+ return (-1);
+ }
+
+ attr_value_t *myattr = mallocAttribute(1);
+#ifdef MODINIT_VERBOSE
+ DEBUG_OUT("confio_t() parsing conf file\n");
+#endif
+ int rc = parseAttrFile( fp, relativeIncludePath, myattr );
+#ifdef MODINIT_VERBOSE
+ DEBUG_OUT("confio_t() parse completed\n");
+#endif
+ if ( rc == 0 ) {
+ applyConfiguration( myattr );
+ freeAttribute( myattr );
+ free(myattr);
+ }
+
+ fclose( fp );
+#ifdef MODINIT_VERBOSE
+ DEBUG_OUT("confio_t() completed\n");
+#endif
+ return (rc);
+}
+
+/**
+ * read state from a configuration string
+ */
+//**************************************************************************
+int confio_t::readConfigurationString( const char *inputBuffer )
+{
+ if ( inputBuffer == NULL ) {
+ ERROR_OUT( "error: readConfiguration: NULL inputBuffer\n" );
+ return (-1);
+ }
+
+ attr_value_t *myattr = mallocAttribute(1);
+#ifdef MODINIT_VERBOSE
+ DEBUG_OUT("confio_t() parsing conf string\n");
+#endif
+
+ int rc = parseAttrString( inputBuffer, myattr );
+#ifdef MODINIT_VERBOSE
+ DEBUG_OUT("confio_t() parse completed\n");
+#endif
+ if ( rc == 0 ) {
+ applyConfiguration( myattr );
+ freeAttribute( myattr );
+ free(myattr);
+ }
+ return (rc);
+}
+
+//**************************************************************************
+void confio_t::checkInitialization( void )
+{
+ ConfTable::iterator iter;
+ confnode_t *cfnode;
+
+ for (iter = m_table.begin(); iter != m_table.end(); iter++) {
+ cfnode = (*iter).second;
+ if ( !cfnode->attr_is_set ) {
+ DEBUG_OUT(" warning: %s is not set in configuration file.\n", (*iter).first.c_str() );
+ }
+ }
+}
+
+/*------------------------------------------------------------------------*/
+/* Private methods */
+/*------------------------------------------------------------------------*/
+
+//**************************************************************************
+int confio_t::applyConfiguration( attr_value_t *attr )
+{
+ confnode_t *cfnode;
+ attr_value_t name;
+ attr_value_t value;
+ set_error_t seterr;
+
+#ifdef MODINIT_VERBOSE
+ DEBUG_OUT("confio_t() data in memory\n");
+ fprintMap( stdout, *attr );
+#endif
+
+ // apply the configuration the the m_table
+ if (attr->kind != Sim_Val_List ||
+ attr->u.list.size <= 0) {
+ ERROR_OUT("readconfiguration: internal error #1\n");
+ return -1;
+ }
+
+ for (int i = 0; i < attr->u.list.size; i++) {
+
+ if (attr->u.list.vector[i].kind != Sim_Val_List ||
+ attr->u.list.vector[i].u.list.size != 2) {
+ ERROR_OUT("readconfiguration: illegal configuration kind:%d size:%lld\n",
+ attr->u.list.vector[i].kind,
+ attr->u.list.vector[i].u.list.size);
+ continue;
+ }
+
+ name = attr->u.list.vector[i].u.list.vector[0];
+ value = attr->u.list.vector[i].u.list.vector[1];
+ string newstr((char *) name.u.string);
+ if ( m_table.find(newstr) != m_table.end()) {
+
+ // set the value found in the configuration
+ cfnode = m_table[newstr];
+ seterr = (*cfnode->set_attr)( cfnode->set_attr_data, NULL,
+ &(value) );
+ if ( seterr == Sim_Set_Ok ) {
+ cfnode->attr_is_set = true;
+ if (m_verbose)
+ DEBUG_OUT("configuration set for: %s\n", name.u.string);
+ } else {
+ ERROR_OUT("error: \"%s\" unable to set value: %d\n",
+ name.u.string, (int) seterr);
+ }
+ } else {
+ ERROR_OUT("error: \"%s\" not found. unable to set value.\n",
+ name.u.string);
+ }
+ }
+ return 0;
+}
+
+/*------------------------------------------------------------------------*/
+/* Static methods */
+/*------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*/
+/* Global functions */
+/*------------------------------------------------------------------------*/
+
+
+/** [Memo].
+ * [Internal Documentation]
+ */
+//**************************************************************************
+
diff --git a/src/mem/gems_common/ioutil/confio.hh b/src/mem/gems_common/ioutil/confio.hh
new file mode 100644
index 000000000..143c4da8b
--- /dev/null
+++ b/src/mem/gems_common/ioutil/confio.hh
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CONFIO_H_
+#define _CONFIO_H_
+
+/*------------------------------------------------------------------------*/
+/* Includes */
+/*------------------------------------------------------------------------*/
+
+#include "FakeSimicsDataTypes.hh"
+
+/*------------------------------------------------------------------------*/
+/* Macro declarations */
+/*------------------------------------------------------------------------*/
+
+/// constant for attribute parsing: a (x) single value
+const attr_kind_t CONF_ATTR_SINGLE = (attr_kind_t) (Sim_Val_Object + 1);
+/// constant for attribute parsing: a (x,y) pair of values
+const attr_kind_t CONF_ATTR_PAIR = (attr_kind_t) (Sim_Val_Object + 2);
+
+/*------------------------------------------------------------------------*/
+/* Class declaration(s) */
+/*------------------------------------------------------------------------*/
+
+/*
+ * Functions for modifying the micro-architectural configuation of
+ * a class.
+ */
+
+/// function for getting the configuration value
+typedef attr_value_t (*get_confio_t)( void *ptr, void *obj );
+/// function for setting the configuration value
+typedef set_error_t (*set_confio_t)( void *ptr, void *obj,
+ attr_value_t *value );
+
+/// a struture containing the functional callbacks for each conf node
+typedef struct confnode {
+ get_confio_t get_attr;
+ set_confio_t set_attr;
+ void *set_attr_data;
+ void *get_attr_data;
+ bool attr_is_set;
+} confnode_t;
+
+/// a mapping from a string to a configuration structure
+typedef map<string, confnode_t *> ConfTable;
+
+/**
+* Configuration state saving: allows the user to save the micro-architectural
+* state in a text file for later runs. This file is also used to set
+* globals during simulation.
+*
+* @author cmauer
+* @version $Id$
+*/
+class confio_t {
+
+public:
+
+
+ /**
+ * @name Constructor(s) / destructor
+ */
+ //@{
+
+ /**
+ * Constructor: creates object
+ */
+ confio_t();
+
+ /**
+ * Destructor: frees object.
+ */
+ ~confio_t();
+ //@}
+
+ /**
+ * @name Methods
+ */
+ //@{
+ //@}
+
+ /**
+ * @name Accessor(s) / mutator(s)
+ */
+ //@{
+ /**
+ * register a configuration variable with the configuration manager.
+ * @param get_attr A function to get the attribute value
+ * @param get_attr_data Void pointer, available to get_attr
+ * @param set_attr A function to set the attribute value
+ * @param set_attr_data Void pointer, available to set_attr
+ * @return [Description of return value]
+ */
+ int register_attribute( const char *name,
+ get_confio_t get_attr, void *get_attr_data,
+ set_confio_t set_attr, void *set_attr_data );
+
+ /**
+ * Set verbosity of the configuration
+ * @param verbose True causes more info to be printed out, False doesn't
+ */
+ void setVerbosity( bool verbose ) {
+ m_verbose = verbose;
+ }
+
+ /**
+ * write a configuration file: e.g. save state
+ */
+ int writeConfiguration( const char *outputFilename );
+
+ /**
+ * read state from an existing configuration file
+ * @param inputFilename The file to read
+ * @param relativeIncludePath The path to search on 'include' statements
+ */
+ int readConfiguration( const char *inputFilename,
+ const char *relativeIncludePath );
+
+ /**
+ * read state from a string
+ */
+ int readConfigurationString( const char *inputBuffer );
+
+ /**
+ * check that each registered configuration is set (reports a warning if
+ * they are not.)
+ */
+ void checkInitialization( void );
+ //@}
+
+private:
+ /**
+ * Apply an attribute list to the configuration table
+ */
+ int applyConfiguration( attr_value_t *attr );
+
+ /// configuration table: contains a map from a string -> conf node
+ ConfTable m_table;
+
+ /// if false, nothing is printed under normal operation
+ bool m_verbose;
+};
+
+/*------------------------------------------------------------------------*/
+/* Global variables */
+/*------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*/
+/* Global functions */
+/*------------------------------------------------------------------------*/
+
+/**
+ * Allocates an array of attributes.
+ */
+attr_value_t *mallocAttribute( uint32 number );
+
+/**
+ * Walks an attribute tree, freeing all memory under attr. Does not free
+ * attr itself.
+ */
+void freeAttribute( attr_value_t *attr );
+
+#endif /* _CONFIO_H_ */
+
+
diff --git a/src/mem/gems_common/ioutil/embedtext.py b/src/mem/gems_common/ioutil/embedtext.py
new file mode 100644
index 000000000..64e1c97f3
--- /dev/null
+++ b/src/mem/gems_common/ioutil/embedtext.py
@@ -0,0 +1,54 @@
+
+import sys
+
+#---------------------------------------------------------------------------
+
+class embedText:
+ """
+ embedText converts a text file into a file that can be embedded in C
+ using an #include statement, that defines a \"const char *\" pointing
+ to the same text.
+
+ This is useful to embed scripts and configuration files in object files.
+ """
+ def __init__(self, filename):
+ self.filename = filename
+ self.escape = [ "\'", "\"", "\\", "\?" ]
+
+ def write(self, outputfile, varname):
+ # reads the text file in, line by line, converting it to a C string
+ fin = open( self.filename, 'r' )
+ fout= open( outputfile, 'w' )
+ fout.write("static const char *%s =\n" % varname);
+ l = " "
+ while l != "":
+ l = fin.readline()
+
+ # add escape sequences for the characters in escape
+ fout.write("\"")
+ for char in l:
+ if char == "\n":
+ break
+ if char in self.escape:
+ fout.write( "\\" )
+ fout.write( char )
+ else:
+ fout.write( char )
+ fout.write("\\n\"\n");
+ fout.write(";\n");
+ fin.close()
+ fout.close()
+
+#---------------------------------------------------------------------------
+
+if __name__ == "__main__":
+ if len(sys.argv) != 4:
+ print len(sys.argv)
+ print "usage:", sys.argv[0], " input-file output-file varname"
+ sys.exit(1)
+ inputfile = sys.argv[1]
+ outputfile = sys.argv[2]
+ varname = sys.argv[3]
+ print "generating embedded text file: %s from %s\n" % (outputfile, inputfile)
+ inc = embedText( inputfile )
+ inc.write( outputfile, varname )
diff --git a/src/mem/gems_common/ioutil/initvar.cc b/src/mem/gems_common/ioutil/initvar.cc
new file mode 100644
index 000000000..9cccdf64b
--- /dev/null
+++ b/src/mem/gems_common/ioutil/initvar.cc
@@ -0,0 +1,626 @@
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ This file has been modified by Kevin Moore and Dan Nussbaum of the
+ Scalable Systems Research Group at Sun Microsystems Laboratories
+ (http://research.sun.com/scalable/) to support the Adaptive
+ Transactional Memory Test Platform (ATMTP).
+
+ Please send email to atmtp-interest@sun.com with feedback, questions, or
+ to request future announcements about ATMTP.
+
+ ----------------------------------------------------------------------
+
+ File modification date: 2008-02-23
+
+ ----------------------------------------------------------------------
+*/
+
+/*
+ * FileName: initvar.C
+ * Synopsis: implementation of global variable initialization in simics
+ * Author: cmauer
+ * Version: $Id$
+ */
+
+/*------------------------------------------------------------------------*/
+/* Includes */
+/*------------------------------------------------------------------------*/
+
+using namespace std;
+#include <string>
+#include <map>
+#include <stdlib.h>
+
+// Maurice
+// extern "C" {
+// #include "global.hh"
+// #include "simics/api.hh"
+// #ifdef SIMICS22X
+// #include "configuration_api.hh"
+// #endif
+// #ifdef SIMICS30
+// #include "configuration.hh"
+// #endif
+// };
+
+#include "FakeSimicsDataTypes.hh"
+
+#ifdef IS_OPAL
+#include "hfatypes.hh"
+#include "debugio.hh"
+#endif
+
+#ifdef IS_RUBY
+#include "Global.hh"
+#endif
+
+#ifdef IS_TOURMALINE
+#include "Tourmaline_Global.hh"
+#endif
+
+#include "confio.hh"
+#include "initvar.hh"
+
+/*------------------------------------------------------------------------*/
+/* Macro declarations */
+/*------------------------------------------------------------------------*/
+
+#define CONFIG_VAR_FILENAME "config.include"
+
+/*------------------------------------------------------------------------*/
+/* Variable declarations */
+/*------------------------------------------------------------------------*/
+
+// define global "constants" using centralized file
+#define PARAM( NAME ) \
+ int32 NAME;
+#define PARAM_UINT( NAME ) \
+ uint32 NAME;
+#define PARAM_ULONG( NAME ) \
+ uint64 NAME;
+#define PARAM_BOOL( NAME ) \
+ bool NAME;
+#define PARAM_DOUBLE( NAME ) \
+ double NAME;
+#define PARAM_STRING( NAME ) \
+ char *NAME;
+#define PARAM_ARRAY( PTYPE, NAME, ARRAY_SIZE ) \
+ PTYPE NAME[ARRAY_SIZE];
+#include CONFIG_VAR_FILENAME
+#undef PARAM
+#undef PARAM_UINT
+#undef PARAM_ULONG
+#undef PARAM_BOOL
+#undef PARAM_DOUBLE
+#undef PARAM_STRING
+#undef PARAM_ARRAY
+
+/** global initvar object */
+initvar_t *initvar_t::m_inst = NULL;
+
+/*------------------------------------------------------------------------*/
+/* Forward declarations */
+/*------------------------------------------------------------------------*/
+
+static attr_value_t initvar_get_attr( void *ptr, void *obj );
+static set_error_t initvar_set_attr( void *ptr, void *obj,
+ attr_value_t *value );
+
+/*------------------------------------------------------------------------*/
+/* Constructor(s) / destructor */
+/*------------------------------------------------------------------------*/
+
+//**************************************************************************
+initvar_t::initvar_t( const char *name, const char *relativeIncludePath,
+ const char *initializingString,
+ void (*allocate_fn)(void),
+ void (*my_generate_fn)(void),
+ get_attr_t my_get_attr, set_attr_t my_set_attr )
+{
+ m_is_init = false;
+ m_name = (char *) malloc( sizeof(char)*(strlen( name ) + 2) );
+ m_rel_include_path = (char *) malloc( sizeof(char)*(strlen( relativeIncludePath ) + 2) );
+ m_config_filename = NULL;
+ strcpy( m_name, name );
+ strcpy( m_rel_include_path, relativeIncludePath );
+ m_allocate_f = allocate_fn;
+ m_generate_values_f = my_generate_fn;
+ m_my_get_attr = my_get_attr;
+ m_my_set_attr = my_set_attr;
+
+ initvar_t::m_inst = this;
+ init_config_reader( initializingString );
+}
+
+//**************************************************************************
+initvar_t::~initvar_t( )
+{
+#define PARAM( NAME )
+#define PARAM_UINT( NAME )
+#define PARAM_ULONG( NAME )
+#define PARAM_BOOL( NAME )
+#define PARAM_DOUBLE( NAME )
+#define PARAM_STRING( NAME ) \
+ if (NAME != NULL) { \
+ free( NAME ); \
+ NAME = NULL; \
+ }
+#define PARAM_ARRAY( PTYPE, NAME, ARRAY_SIZE )
+#include CONFIG_VAR_FILENAME
+#undef PARAM
+#undef PARAM_UINT
+#undef PARAM_ULONG
+#undef PARAM_BOOL
+#undef PARAM_DOUBLE
+#undef PARAM_STRING
+#undef PARAM_ARRAY
+ if (m_name) {
+ free( m_name );
+ }
+ if (m_rel_include_path) {
+ free( m_rel_include_path );
+ }
+ if (m_config_reader) {
+ delete m_config_reader;
+ }
+ if (m_config_filename) {
+ delete m_config_filename;
+ }
+}
+
+//**************************************************************************
+void initvar_t::init_config_reader( const char *initString )
+{
+ int rc;
+ const char *name;
+
+ m_config_reader = new confio_t();
+ m_config_reader->setVerbosity( false );
+
+ // Initialize the config reader object to identify each parameter
+#define PARAM_UINT PARAM
+#define PARAM_ULONG PARAM
+#define PARAM_BOOL PARAM
+#define PARAM_DOUBLE PARAM
+#define PARAM( NAME ) \
+ name = #NAME; \
+ rc = m_config_reader->register_attribute( name, \
+ initvar_get_attr, (void *) name, \
+ initvar_set_attr, (void *) name );
+#define PARAM_STRING( NAME ) \
+ NAME = NULL; \
+ name = #NAME; \
+ rc = m_config_reader->register_attribute( name, \
+ initvar_get_attr, (void *) name, \
+ initvar_set_attr, (void *) name );
+#define PARAM_ARRAY( PTYPE, NAME, ARRAY_SIZE ) \
+ name = #NAME; \
+ rc = m_config_reader->register_attribute( name, \
+ initvar_get_attr, (void *) name, \
+ initvar_set_attr, (void *) name );
+
+#include CONFIG_VAR_FILENAME
+#undef PARAM
+#undef PARAM_UINT
+#undef PARAM_ULONG
+#undef PARAM_BOOL
+#undef PARAM_DOUBLE
+#undef PARAM_STRING
+#undef PARAM_ARRAY
+
+ // read the default configuration from the embedded text file
+ rc = m_config_reader->readConfigurationString( initString );
+ (*m_generate_values_f)();
+}
+
+/*------------------------------------------------------------------------*/
+/* Public methods */
+/*------------------------------------------------------------------------*/
+
+//**************************************************************************
+void initvar_t::allocate( void )
+{
+ if ( confirm_init() ) {
+ DEBUG_OUT("error: %s initvar::allocate() called twice\n", m_name);
+ return;
+ }
+
+ (*m_generate_values_f)();
+ (*m_allocate_f)();
+ m_is_init = true;
+}
+
+//**************************************************************************
+void initvar_t::checkInitialization( void )
+{
+ m_config_reader->checkInitialization();
+}
+
+//**************************************************************************
+attr_value_t initvar_t::dispatch_get( void *id, void *obj,
+ attr_value_t *idx )
+{
+ const char *command = (const char *) id;
+ if ( !confirm_init() ) {
+ DEBUG_OUT("error: %s is uninitialized. unable to get \'%s\'\n", m_name, command);
+ DEBUG_OUT(" : you must initialize %s with a configuration file first.\n", m_name);
+ DEBUG_OUT(" : use the command \'%s0.init\'\n", m_name);
+
+ attr_value_t ret;
+ ret.kind = Sim_Val_Invalid;
+ ret.u.integer = 0;
+ return ret;
+ }
+
+ return ((*m_my_get_attr)(id, obj, idx));
+}
+
+
+//**************************************************************************
+set_error_t initvar_t::dispatch_set( void *id, void *obj,
+ attr_value_t *val, attr_value_t *idx )
+{
+ const char *command = (const char *) id;
+
+ // DEBUG_OUT("set attribute: %s\n", command);
+ if (!strcmp(command, "init")) {
+ if (val->kind == Sim_Val_String) {
+ if (!strcmp( val->u.string, "" )) {
+ // update generated values, then allocate
+ allocate();
+ } else {
+ read_config( val->u.string );
+ allocate();
+ }
+ return Sim_Set_Ok;
+ } else {
+ return Sim_Set_Need_String;
+ }
+ } else if (!strcmp(command, "readparam")) {
+ if (val->kind == Sim_Val_String) {
+ read_config( val->u.string );
+ return Sim_Set_Ok;
+ } else {
+ return Sim_Set_Need_String;
+ }
+ } else if (!strcmp(command, "saveparam")) {
+ if (val->kind == Sim_Val_String) {
+ FILE *fp = fopen( val->u.string, "w" );
+ if (fp == NULL) {
+ ERROR_OUT("error: unable to open file: %s\n", val->u.string);
+ return Sim_Set_Illegal_Value;
+ }
+ list_param( fp );
+ if (fp != NULL) {
+ fclose( fp );
+ }
+ return Sim_Set_Ok;
+ } else {
+ ERROR_OUT("error: saveparam given wrong type.\n");
+ return Sim_Set_Illegal_Value;
+ }
+ } else if (!strcmp(command, "param")) {
+ if (val->kind == Sim_Val_Integer) {
+ list_param( stdout );
+ return Sim_Set_Ok;
+ } else if ( val->kind == Sim_Val_List &&
+ val->u.list.size == 2 &&
+ val->u.list.vector[0].kind == Sim_Val_String ) {
+ return (set_param( val->u.list.vector[0].u.string,
+ &val->u.list.vector[1] ));
+ } else {
+ DEBUG_OUT("error: set parameter given wrong type.\n");
+ return Sim_Set_Illegal_Value;
+ }
+ }
+
+ if ( !confirm_init() ) {
+ DEBUG_OUT("error: %s is uninitialized. unable to set \'%s\'\n", m_name, id);
+ DEBUG_OUT(" : you must initialize %s with a configuration file first.\n", m_name);
+ DEBUG_OUT(" : use the command \'%s0.init\'\n", m_name);
+ return Sim_Set_Illegal_Value;
+ }
+
+ return (*m_my_set_attr)( id, obj, val, idx );
+}
+
+/*------------------------------------------------------------------------*/
+/* Accessor(s) / mutator(s) */
+/*------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*/
+/* Private methods */
+/*------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*/
+/* Static methods */
+/*------------------------------------------------------------------------*/
+
+//**************************************************************************
+static attr_value_t initvar_get_attr( void *ptr, void *obj )
+{
+ const char *name = (const char *) ptr;
+ attr_value_t ret;
+ memset( &ret, 0, sizeof(attr_value_t) );
+
+#define PARAM_UINT PARAM
+#define PARAM_ULONG PARAM
+#define PARAM_BOOL PARAM
+#define PARAM( NAME ) \
+ if (!strcmp(name, #NAME)) { \
+ ret.kind = Sim_Val_Integer; \
+ ret.u.integer = NAME; \
+ return (ret); \
+ }
+#define PARAM_DOUBLE( NAME ) \
+ if (!strcmp(name, #NAME)) { \
+ ret.kind = Sim_Val_Floating; \
+ ret.u.floating = NAME; \
+ return (ret); \
+ }
+#define PARAM_STRING( NAME ) \
+ if (!strcmp(name, #NAME)) { \
+ ret.kind = Sim_Val_String; \
+ ret.u.string = NAME; \
+ return (ret); \
+ }
+#define PARAM_ARRAY( PTYPE, NAME, ARRAY_SIZE ) \
+ if (!strcmp(name, #NAME)) { \
+ ret.kind = Sim_Val_List; \
+ ret.u.list.size = ARRAY_SIZE; \
+ ret.u.list.vector = mallocAttribute( ARRAY_SIZE ); \
+ for (int i = 0; i < ARRAY_SIZE; i++) { \
+ ret.u.list.vector[i].u.integer = NAME[i]; \
+ } \
+ return (ret); \
+ }
+
+#include CONFIG_VAR_FILENAME
+#undef PARAM
+#undef PARAM_UINT
+#undef PARAM_ULONG
+#undef PARAM_BOOL
+#undef PARAM_DOUBLE
+#undef PARAM_STRING
+#undef PARAM_ARRAY
+
+ DEBUG_OUT("error: %s not found.\n", name);
+ ret.kind = Sim_Val_Invalid;
+ return (ret);
+}
+
+//***************************************************************************
+static set_error_t initvar_set_attr( void *ptr, void *obj,
+ attr_value_t *value )
+{
+ const char *name = (const char *) ptr;
+
+#define PARAM_UINT PARAM
+#define PARAM_ULONG PARAM
+#define PARAM( NAME ) \
+ if (!strcmp(name, #NAME)) { \
+ if ( value->kind != Sim_Val_Integer ) { \
+ ERROR_OUT("error: %s is not an integer\n", name );\
+ return Sim_Set_Need_Integer; \
+ } \
+ NAME = value->u.integer; \
+ return Sim_Set_Ok; \
+ }
+#define PARAM_BOOL( NAME ) \
+ if (!strcmp(name, #NAME)) { \
+ if ( value->kind != Sim_Val_String ) { \
+ ERROR_OUT("error: %s is not an bool string\n", name );\
+ return Sim_Set_Need_String; \
+ } \
+ if (!strcmp(value->u.string, "true")) { \
+ NAME = true; \
+ } else if (!strcmp(value->u.string, "false")) { \
+ NAME = false; \
+ } else { \
+ ERROR_OUT("error: value %s for %s is not an bool string (set to false)\n", value->u.string, name );\
+ NAME = false; \
+ } \
+ return Sim_Set_Ok; \
+ }
+#define PARAM_DOUBLE( NAME ) \
+ if (!strcmp(name, #NAME)) { \
+ if ( value->kind != Sim_Val_String ) { \
+ ERROR_OUT("error: %s is not a float\n", name );\
+ return Sim_Set_Need_Floating; \
+ } \
+ NAME = atof( value->u.string ); \
+ return Sim_Set_Ok; \
+ }
+#define PARAM_STRING( NAME ) \
+ if (!strcmp(name, #NAME)) { \
+ if ( value->kind != Sim_Val_String ) { \
+ ERROR_OUT("error: %s is not an string\n", name ); \
+ return Sim_Set_Need_String; \
+ } \
+ if (NAME != NULL) { \
+ free( NAME ); \
+ } \
+ NAME = strdup( value->u.string ); \
+ return Sim_Set_Ok; \
+ }
+#define PARAM_ARRAY( PTYPE, NAME, ARRAY_SIZE ) \
+ if (!strcmp(name, #NAME)) { \
+ if ( value->kind != Sim_Val_List ) { \
+ ERROR_OUT("error: %s is not an list\n", name ); \
+ return Sim_Set_Need_List; \
+ } \
+ if ( value->u.list.size != ARRAY_SIZE ) { \
+ ERROR_OUT("error: %s has %lld elements (should be %d)\n", name, value->u.list.size, ARRAY_SIZE); \
+ return Sim_Set_Illegal_Value; \
+ } \
+ for (int i = 0; i < ARRAY_SIZE; i++) { \
+ NAME[i] = value->u.list.vector[i].u.integer; \
+ } \
+ return Sim_Set_Ok; \
+ }
+
+#include CONFIG_VAR_FILENAME
+#undef PARAM
+#undef PARAM_UINT
+#undef PARAM_ULONG
+#undef PARAM_BOOL
+#undef PARAM_DOUBLE
+#undef PARAM_STRING
+#undef PARAM_ARRAY
+
+ ERROR_OUT("error: %s not a parameter\n", name);
+ return Sim_Set_Illegal_Value;
+}
+
+//***************************************************************************
+void initvar_t::read_config( const char *parameterFile )
+{
+ DEBUG_OUT("read configuration: %s\n", parameterFile );
+ m_config_filename = strdup( parameterFile );
+ int rc = m_config_reader->readConfiguration( parameterFile,
+ m_rel_include_path );
+ if ( rc < 0 ) {
+ ERROR_OUT("fatal error in read configuration: unable to continue.\n");
+ exit(1);
+ }
+ // update generated values
+ (*m_generate_values_f)();
+}
+
+/** sets one of the parameters */
+//**************************************************************************
+set_error_t initvar_t::set_param( const char *name, attr_value_t *value )
+{
+
+ // [dann 2007-04-04] ATMTP VV
+ //
+ // HACK ALERT: allow setting REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH,
+ // PROFILE_EXCEPTIONS, PROFILE_XACT, ATMTP_DEBUG_LEVEL and
+ // ATMTP_ENABLED after initialization. This works is because ruby's
+ // m_generate_values_f() does nothing -- more particularly, nothing
+ // that depends on any of these parameters is pre-calculated
+ // anywhere.
+ //
+ if (strcmp(name, "REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH") != 0 &&
+ strcmp(name, "PROFILE_EXCEPTIONS") != 0 &&
+ strcmp(name, "PROFILE_XACT") != 0 &&
+ strcmp(name, "ATMTP_DEBUG_LEVEL") != 0 &&
+ strcmp(name, "ATMTP_ENABLED") != 0) {
+ //
+ // [dann 2007-04-04] ATMTP ^^
+ if ( confirm_init() ) {
+ DEBUG_OUT("error: %s is already initialized.\n", m_name);
+ DEBUG_OUT(" : setting parameters after initialization is unsupported\n");
+ return (Sim_Set_Illegal_Value);
+ }
+ // [dann 2007-04-04] ATMTP VV
+ //
+ }
+ //
+ // [dann 2007-04-04] ATMTP ^^
+
+ set_error_t result = initvar_set_attr( (void *) name, NULL, value );
+ (*m_generate_values_f)();
+ return (result);
+}
+
+/** print out a list of valid parameters */
+//**************************************************************************
+void initvar_t::list_param( FILE *fp )
+{
+ if (!fp)
+ fp = stdout;
+
+#define PARAM( NAME ) \
+ fprintf( fp, "%-44.44s: %26d\n", #NAME, NAME );
+#define PARAM_UINT( NAME ) \
+ fprintf( fp, "%-44.44s: %26u\n", #NAME, NAME );
+#define PARAM_ULONG( NAME ) \
+ fprintf( fp, "%-44.44s: %26llu\n", #NAME, NAME );
+#define PARAM_BOOL( NAME ) \
+ if (NAME == true) { \
+ fprintf( fp, "%-44.44s: %26.26s\n", #NAME, "true" ); \
+ } else { \
+ fprintf( fp, "%-44.44s: %26.26s\n", #NAME, "false" );\
+ }
+#define PARAM_DOUBLE( NAME ) \
+ fprintf( fp, "%-44.44s: %26f\n", #NAME, NAME );
+#define PARAM_STRING( NAME ) \
+ if ( NAME != NULL ) { \
+ fprintf( fp, "%-44.44s: %26.26s\n", #NAME, NAME ); \
+ }
+#define PARAM_ARRAY( PTYPE, NAME, ARRAY_SIZE ) \
+ fprintf( fp, "%-44.44s: (", #NAME ); \
+ for (int i = 0; i < ARRAY_SIZE; i++) { \
+ if ( i != 0 ) { \
+ fprintf( fp, ", " ); \
+ } \
+ fprintf( fp, "%d", NAME[i] ); \
+ } \
+ fprintf( fp, ")\n" );
+
+#include CONFIG_VAR_FILENAME
+#undef PARAM
+#undef PARAM_UINT
+#undef PARAM_ULONG
+#undef PARAM_BOOL
+#undef PARAM_DOUBLE
+#undef PARAM_STRING
+#undef PARAM_ARRAY
+}
+
+//**************************************************************************
+const char *initvar_t::get_config_name( void )
+{
+ if (m_config_filename == NULL) {
+ return "default";
+ }
+ return m_config_filename;
+}
+
+/*------------------------------------------------------------------------*/
+/* Global functions */
+/*------------------------------------------------------------------------*/
+
+//**************************************************************************
+attr_value_t initvar_dispatch_get( void *id, void *obj,
+ attr_value_t *idx )
+{
+ initvar_t *init_obj = initvar_t::m_inst;
+ return (init_obj->dispatch_get( id, obj, idx ));
+}
+
+//**************************************************************************
+set_error_t initvar_dispatch_set( void *id, void *obj,
+ attr_value_t *val, attr_value_t *idx )
+{
+ initvar_t *init_obj = initvar_t::m_inst;
+ return (init_obj->dispatch_set( id, obj, val, idx ));
+}
diff --git a/src/mem/gems_common/ioutil/initvar.hh b/src/mem/gems_common/ioutil/initvar.hh
new file mode 100644
index 000000000..8dea8dfc1
--- /dev/null
+++ b/src/mem/gems_common/ioutil/initvar.hh
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _INCLUDE_H_
+#define _INCLUDE_H_
+
+/*------------------------------------------------------------------------*/
+/* Includes */
+/*------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*/
+/* Macro declarations */
+/*------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*/
+/* Class declaration(s) */
+/*------------------------------------------------------------------------*/
+
+/**
+* This class deals with initializing the global variables in the object,
+* setting the varibles (from the command line), printing the configuration,
+* and saving it to a file.
+*
+* Before including this file, you must define the variable CONFIG_VAR_FILENAME
+* to define which variables are to be used.
+*
+* @see confio_t
+* @author cmauer
+* @version $Id$
+*/
+class initvar_t {
+public:
+ /**
+ * @name Constructor(s) / destructor
+ */
+ //@{
+
+ /**
+ * Constructor: creates object
+ * @param name The name of this object
+ * @param relativeIncludePath The relative path to config files
+ * @param initializingString A string (with value pairs) for initialization
+ * @param allocate_f A ptr to the allocate function
+ * @param generate_values A ptr to the generate values function
+ * @param my_get_attr A ptr to the get attribute function
+ * @param my_set_attr A ptr to the set attribute function
+ */
+ initvar_t( const char *name, const char *relativeIncludePath,
+ const char *initializingString,
+ void (*allocate_fn)(void),
+ void (*my_generate_fn)(void),
+ get_attr_t my_get_attr, set_attr_t my_set_attr );
+
+ /**
+ * Destructor: frees object.
+ */
+ ~initvar_t();
+ //@}
+
+ /**
+ * @name Methods
+ */
+ //@{
+ /// calls the allocation routine explicitly (used by the tester)
+ void allocate( void );
+
+ /// checks to see if all vars have been initialized
+ void checkInitialization( void );
+
+ /// list all parameters: to a file (or stdout if file is NULL)
+ void list_param( FILE *fp );
+
+ /// returns the name of the last config file to be read ("default" is none)
+ const char *get_config_name( void );
+
+ /// calls through to the get_attr function, if object is initialized
+ attr_value_t dispatch_get( void *id, void *obj,
+ attr_value_t *idx );
+
+ /** adds initialization attributes, calls through to the set_attr function,
+ * if object is initialized.
+ */
+ set_error_t dispatch_set( void *id, void *obj,
+ attr_value_t *val, attr_value_t *idx );
+ //@}
+ /// (single) instance of the init var object
+ static initvar_t *m_inst;
+
+protected:
+ ///returns true if the variables are initialized
+ bool confirm_init( void ) {
+ return m_is_init;
+ }
+
+ ///read a configuration file
+ void read_config( const char *parameterFile );
+
+ /// set a parameter to be a particular value
+ set_error_t set_param( const char *name, attr_value_t *value );
+
+ /// initializes the configuration reader
+ void init_config_reader( const char *initString );
+
+ /// bool value (true if initialized)
+ bool m_is_init;
+
+ /// configuration reader
+ confio_t *m_config_reader;
+
+ /// a pointer to a string (corresponding to this objects name)
+ char *m_name;
+
+ /// a pointer to a string (representing the last config file read)
+ char *m_config_filename;
+
+ /// the relative include path to the configuration files
+ char *m_rel_include_path;
+
+ /// a pointer to the allocation function
+ void (*m_allocate_f)(void);
+
+ /// a pointer to the generate values function
+ void (*m_generate_values_f)(void);
+
+ /// a pointer to the session get function
+ get_attr_t m_my_get_attr;
+ /// a pointer to the session set function
+ set_attr_t m_my_set_attr;
+};
+
+
+/*------------------------------------------------------------------------*/
+/* Global variables */
+/*------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*/
+/* Global functions */
+/*------------------------------------------------------------------------*/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+///provides a dispatch mechanism that catches a few commands to get variables
+attr_value_t initvar_dispatch_get( void *id, void *obj,
+ attr_value_t *idx );
+
+///provides a dispatch mechanism that catches a few commands to set variables
+set_error_t initvar_dispatch_set( void *id, void *obj,
+ attr_value_t *val, attr_value_t *idx );
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif /* _INCLUDE_H_ */
diff --git a/src/mem/gems_common/ioutil/vardecl.hh b/src/mem/gems_common/ioutil/vardecl.hh
new file mode 100644
index 000000000..21bc62d02
--- /dev/null
+++ b/src/mem/gems_common/ioutil/vardecl.hh
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VARDECL_H_
+#define _VARDECL_H_
+
+/*------------------------------------------------------------------------*/
+/* Includes */
+/*------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*/
+/* Macro declarations */
+/*------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*/
+/* Class declaration(s) */
+/*------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*/
+/* Global variables */
+/*------------------------------------------------------------------------*/
+
+#define PARAM( NAME ) \
+ extern int32 NAME;
+#define PARAM_UINT( NAME ) \
+ extern uint32 NAME;
+#define PARAM_ULONG( NAME ) \
+ extern uint64 NAME;
+#define PARAM_BOOL( NAME ) \
+ extern bool NAME;
+#define PARAM_DOUBLE( NAME ) \
+ extern double NAME;
+#define PARAM_STRING( NAME ) \
+ extern char *NAME;
+#define PARAM_ARRAY( PTYPE, NAME, ARRAY_SIZE ) \
+ extern PTYPE NAME[ARRAY_SIZE];
+#include CONFIG_VAR_FILENAME
+#undef PARAM
+#undef PARAM_UINT
+#undef PARAM_ULONG
+#undef PARAM_BOOL
+#undef PARAM_DOUBLE
+#undef PARAM_STRING
+#undef PARAM_ARRAY
+
+/*------------------------------------------------------------------------*/
+/* Global functions */
+/*------------------------------------------------------------------------*/
+
+#endif /* _VARDECL_H_ */
diff --git a/src/mem/gems_common/std-includes.hh b/src/mem/gems_common/std-includes.hh
new file mode 100644
index 000000000..619214f1d
--- /dev/null
+++ b/src/mem/gems_common/std-includes.hh
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef INCLUDES_H
+#define INCLUDES_H
+
+#include <cstring>
+#include <iomanip>
+#include <iostream>
+#include <fstream>
+#include <sstream>
+#include <string>
+#include <ext/hash_map>
+#include <stdio.h>
+#include <math.h>
+#include <time.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <cassert>
+
+using namespace std;
+using namespace __gnu_cxx;
+
+#endif //INCLUDES_H
diff --git a/src/mem/gems_common/util.cc b/src/mem/gems_common/util.cc
new file mode 100644
index 000000000..c5b8f22b5
--- /dev/null
+++ b/src/mem/gems_common/util.cc
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#include "assert.h"
+#include "util.hh"
+
+// Split a string into a head and tail strings on the specified
+// character. Return the head and the string passed in is modified by
+// removing the head, leaving just the tail.
+
+string string_split(string& str, char split_character)
+{
+ string head = "";
+ string tail = "";
+
+ uint counter = 0;
+ while(counter < str.size()) {
+ if (str[counter] == split_character) {
+ counter++;
+ break;
+ } else {
+ head += str[counter];
+ }
+ counter++;
+ }
+
+ while(counter < str.size()) {
+ tail += str[counter];
+ counter++;
+ }
+ str = tail;
+ return head;
+}
+
+string bool_to_string(bool value)
+{
+ if (value) {
+ return "true";
+ } else {
+ return "false";
+ }
+}
+
+string int_to_string(int n, bool zero_fill, int width)
+{
+ ostringstream sstr;
+ if(zero_fill) {
+ sstr << setw(width) << setfill('0') << n;
+ } else {
+ sstr << n;
+ }
+ string str = sstr.str();
+ return str;
+}
+
+float string_to_float(string& str)
+{
+ stringstream sstr(str);
+ float ret;
+ sstr >> ret;
+ return ret;
+}
+
+// Log functions
+int log_int(long long n)
+{
+ assert(n > 0);
+ int counter = 0;
+ while (n >= 2) {
+ counter++;
+ n = n>>(long long)(1);
+ }
+ return counter;
+}
+
+bool is_power_of_2(long long n)
+{
+ return (n == ((long long)(1) << log_int(n)));
+}
+
diff --git a/src/mem/gems_common/util.hh b/src/mem/gems_common/util.hh
new file mode 100644
index 000000000..d9e9fec3e
--- /dev/null
+++ b/src/mem/gems_common/util.hh
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#ifndef UTIL_H
+#define UTIL_H
+
+#include "std-includes.hh"
+
+string string_split(string& str, char split_character);
+string bool_to_string(bool value);
+string int_to_string(int n, bool zero_fill = false, int width = 0);
+float string_to_float(string& str);
+int log_int(long long n);
+bool is_power_of_2(long long n);
+
+// Min and Max functions (since they are extern inline, they are as
+// fast as macros)
+
+extern inline
+int max(int n1, int n2)
+{
+ if (n1 > n2) {
+ return n1;
+ } else {
+ return n2;
+ }
+}
+
+extern inline
+int min(int n1, int n2)
+{
+ if (n1 < n2) {
+ return n1;
+ } else {
+ return n2;
+ }
+}
+
+#endif //UTIL_H
diff --git a/src/mem/protocol/LogTM.sm b/src/mem/protocol/LogTM.sm
new file mode 100644
index 000000000..02c6656ac
--- /dev/null
+++ b/src/mem/protocol/LogTM.sm
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ This file has been modified by Kevin Moore and Dan Nussbaum of the
+ Scalable Systems Research Group at Sun Microsystems Laboratories
+ (http://research.sun.com/scalable/) to support the Adaptive
+ Transactional Memory Test Platform (ATMTP).
+
+ Please send email to atmtp-interest@sun.com with feedback, questions, or
+ to request future announcements about ATMTP.
+
+ ----------------------------------------------------------------------
+
+ File modification date: 2008-02-23
+
+ ----------------------------------------------------------------------
+*/
+
+external_type(PartialAddressFilter, desc="Bloom filter for tracking transaction locks."){
+ bool isRead(Address);
+ bool isWrite(Address);
+
+ void addEntry(Address, bool);
+ void clear();
+}
+
+external_type(TransactionInterfaceManager) {
+ bool shouldNackLoad(Address, uint64, MachineID);
+ bool shouldNackStore(Address, uint64, MachineID);
+ bool checkReadWriteSignatures(Address);
+ bool checkWriteSignatures(Address);
+
+ void notifySendNack(Address, uint64, MachineID);
+ void notifyReceiveNack(int, Address, uint64, uint64, MachineID);
+ void notifyReceiveNackFinal(int, Address);
+
+ uint64 getTimestamp(int);
+ uint64 getOldestTimestamp();
+
+ bool existGlobalLoadConflict(int, Address);
+ bool existGlobalStoreConflict(int, Address);
+
+ void profileTransactionMiss(int, bool);
+
+ void xactReplacement(Address);
+
+ /* DEPRECATED */
+ bool existLoadConflict(Address);
+ bool existStoreConflict(Address);
+ bool isInReadFilterSummary(Address);
+ bool isInWriteFilterSummary(Address);
+ bool isTokenOwner(int);
+ void setAbortFlag(int, Address);
+ void setEnemyProcessor(int, MachineID);
+ bool isRemoteOlder(uint64);
+
+}
diff --git a/src/mem/protocol/MESI_CMP_directory-L1cache.sm b/src/mem/protocol/MESI_CMP_directory-L1cache.sm
new file mode 100644
index 000000000..8f2096666
--- /dev/null
+++ b/src/mem/protocol/MESI_CMP_directory-L1cache.sm
@@ -0,0 +1,867 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MSI_MOSI_CMP_directory-L1cache.sm 1.10 05/01/19 15:55:40-06:00 beckmann@s0-28.cs.wisc.edu $
+ *
+ */
+
+
+machine(L1Cache, "MSI Directory L1 Cache CMP") {
+
+ // NODE L1 CACHE
+ // From this node's L1 cache TO the network
+ // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+ MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false";
+ // a local L1 -> this L2 bank
+ MessageBuffer responseFromL1Cache, network="To", virtual_network="3", ordered="false";
+ MessageBuffer unblockFromL1Cache, network="To", virtual_network="4", ordered="false";
+
+
+ // To this node's L1 cache FROM the network
+ // a L2 bank -> this L1
+ MessageBuffer requestToL1Cache, network="From", virtual_network="1", ordered="false";
+ // a L2 bank -> this L1
+ MessageBuffer responseToL1Cache, network="From", virtual_network="3", ordered="false";
+
+ // STATES
+ enumeration(State, desc="Cache states", default="L1Cache_State_I") {
+ // Base states
+ NP, desc="Not present in either cache";
+ I, desc="a L1 cache entry Idle";
+ S, desc="a L1 cache entry Shared";
+ E, desc="a L1 cache entry Exclusive";
+ M, desc="a L1 cache entry Modified", format="!b";
+
+ // Transient States
+ IS, desc="L1 idle, issued GETS, have not seen response yet";
+ IM, desc="L1 idle, issued GETX, have not seen response yet";
+ SM, desc="L1 idle, issued GETX, have not seen response yet";
+ IS_I, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
+
+ M_I, desc="L1 replacing, waiting for ACK";
+ E_I, desc="L1 replacing, waiting for ACK";
+
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ // L1 events
+ Load, desc="Load request from the home processor";
+ Ifetch, desc="I-fetch request from the home processor";
+ Store, desc="Store request from the home processor";
+
+ Inv, desc="Invalidate request from L2 bank";
+
+ // internal generated request
+ L1_Replacement, desc="L1 Replacement", format="!r";
+
+ // other requests
+ Fwd_GETX, desc="GETX from other processor";
+ Fwd_GETS, desc="GETS from other processor";
+ Fwd_GET_INSTR, desc="GET_INSTR from other processor";
+
+ Data, desc="Data for processor";
+ Data_Exclusive, desc="Data for processor";
+ DataS_fromL1, desc="data for GETS request, need to unblock directory";
+ Data_all_Acks, desc="Data for processor, all acks";
+
+ Ack, desc="Ack for processor";
+ Ack_all, desc="Last ack for processor";
+
+ WB_Ack, desc="Ack for replacement";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
+ State CacheState, desc="cache state";
+ DataBlock DataBlk, desc="data for the block";
+ bool Dirty, default="false", desc="data is dirty";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Address Address, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ bool Dirty, default="false", desc="data is dirty";
+ bool isPrefetch, desc="Set if this was caused by a prefetch";
+ int pendingAcks, default="0", desc="number of pending acks";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
+
+ CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
+ CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
+
+ MessageBuffer mandatoryQueue, ordered="false", rank="100", abstract_chip_ptr="true";
+
+ Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+
+ int cache_state_to_int(State state);
+
+ // inclusive cache returns L1 entries only
+ Entry getL1CacheEntry(Address addr), return_by_ref="yes" {
+ if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory[addr];
+ } else {
+ return L1IcacheMemory[addr];
+ }
+ }
+
+ void changeL1Permission(Address addr, AccessPermission permission) {
+ if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory.changePermission(addr, permission);
+ } else if(L1IcacheMemory.isTagPresent(addr)) {
+ return L1IcacheMemory.changePermission(addr, permission);
+ } else {
+ error("cannot change permission, L1 block not present");
+ }
+ }
+
+ bool isL1CacheTagPresent(Address addr) {
+ return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
+ }
+
+ State getState(Address addr) {
+ if((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == true){
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(addr);
+ }
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+
+ if(L1_TBEs.isPresent(addr)) {
+ return L1_TBEs[addr].TBEState;
+ } else if (isL1CacheTagPresent(addr)) {
+ return getL1CacheEntry(addr).CacheState;
+ }
+ return State:NP;
+ }
+
+
+ void setState(Address addr, State state) {
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+
+ // MUST CHANGE
+ if(L1_TBEs.isPresent(addr)) {
+ L1_TBEs[addr].TBEState := state;
+ }
+
+ if (isL1CacheTagPresent(addr)) {
+ getL1CacheEntry(addr).CacheState := state;
+
+ // Set permission
+ if (state == State:I) {
+ changeL1Permission(addr, AccessPermission:Invalid);
+ } else if (state == State:S || state == State:E) {
+ changeL1Permission(addr, AccessPermission:Read_Only);
+ } else if (state == State:M) {
+ changeL1Permission(addr, AccessPermission:Read_Write);
+ } else {
+ changeL1Permission(addr, AccessPermission:Busy);
+ }
+ }
+ }
+
+ Event mandatory_request_type_to_event(CacheRequestType type) {
+ if (type == CacheRequestType:LD) {
+ return Event:Load;
+ } else if (type == CacheRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
+ return Event:Store;
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+
+
+ out_port(requestIntraChipL1Network_out, RequestMsg, requestFromL1Cache);
+ out_port(responseIntraChipL1Network_out, ResponseMsg, responseFromL1Cache);
+ out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
+
+ // Response IntraChip L1 Network - response msg to this L1 cache
+ in_port(responseIntraChipL1Network_in, ResponseMsg, responseToL1Cache) {
+ if (responseIntraChipL1Network_in.isReady()) {
+ peek(responseIntraChipL1Network_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Data_Exclusive, in_msg.Address);
+ } else if(in_msg.Type == CoherenceResponseType:DATA) {
+ if ( (getState(in_msg.Address) == State:IS || getState(in_msg.Address) == State:IS_I) &&
+ machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache ) {
+
+ trigger(Event:DataS_fromL1, in_msg.Address);
+
+ } else if ( (L1_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0 ) {
+ trigger(Event:Data_all_Acks, in_msg.Address);
+ } else {
+ trigger(Event:Data, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:ACK) {
+ if ( (L1_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0 ) {
+ trigger(Event:Ack_all, in_msg.Address);
+ } else {
+ trigger(Event:Ack, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
+ trigger(Event:WB_Ack, in_msg.Address);
+ } else {
+ error("Invalid L1 response type");
+ }
+ }
+ }
+ }
+
+ // Request InterChip network - request from this L1 cache to the shared L2
+ in_port(requestIntraChipL1Network_in, RequestMsg, requestToL1Cache) {
+ if(requestIntraChipL1Network_in.isReady()) {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceRequestType:INV) {
+ trigger(Event:Inv, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:UPGRADE) {
+ // upgrade transforms to GETX due to race
+ trigger(Event:Fwd_GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:Fwd_GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
+ trigger(Event:Fwd_GET_INSTR, in_msg.Address);
+ } else {
+ error("Invalid forwarded request type");
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue betweens Node's CPU and it's L1 caches
+ in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady()) {
+ peek(mandatoryQueue_in, CacheMsg) {
+
+ // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
+
+ if (in_msg.Type == CacheRequestType:IFETCH) {
+ // ** INSTRUCTION ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_Replacement, in_msg.Address);
+ }
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 asks the L2 for it.
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ // No room in the L1, so we need to make room in the L1
+ trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+ } else {
+ // *** DATA ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_Replacement, in_msg.Address);
+ }
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 ask the L2 for it
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ // No room in the L1, so we need to make room in the L1
+ trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+ action(a_issueGETS, "a", desc="Issue GETS") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GET_INSTR;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+
+ action(b_issueGETX, "b", desc="Issue GETX") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ DEBUG_EXPR(machineID);
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(c_issueUPGRADE, "c", desc="Issue GETX") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:UPGRADE;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(d_sendDataToRequestor, "d", desc="send data to requestor") {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL1CacheEntry(address).Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL1CacheEntry(address).Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := L1_TBEs[address].DataBlk;
+ out_msg.Dirty := L1_TBEs[address].Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := L1_TBEs[address].DataBlk;
+ out_msg.Dirty := L1_TBEs[address].Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL1CacheEntry(address).Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ }
+ }
+
+ action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := L1_TBEs[address].DataBlk;
+ out_msg.Dirty := L1_TBEs[address].Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ }
+ }
+
+ action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.AckCount := 1;
+ }
+ }
+ }
+
+
+ action(g_issuePUTX, "g", desc="send data to the L2 cache") {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL1CacheEntry(address).Dirty;
+ out_msg.Requestor:= machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ if (getL1CacheEntry(address).Dirty) {
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
+ enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
+ enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+
+
+ action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
+ DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
+ sequencer.readCallback(address, getL1CacheEntry(address).DataBlk);
+ }
+
+ action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
+ DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
+ sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk);
+ getL1CacheEntry(address).Dirty := true;
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
+ check_allocate(L1_TBEs);
+ L1_TBEs.allocate(address);
+ L1_TBEs[address].isPrefetch := false;
+ L1_TBEs[address].Dirty := getL1CacheEntry(address).Dirty;
+ L1_TBEs[address].DataBlk := getL1CacheEntry(address).DataBlk;
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue();
+ }
+
+ action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") {
+ profileMsgDelay(2, requestIntraChipL1Network_in.dequeue_getDelayCycles());
+ }
+
+ action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") {
+ profileMsgDelay(3, responseIntraChipL1Network_in.dequeue_getDelayCycles());
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+ L1_TBEs.deallocate(address);
+ }
+
+ action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
+ peek(responseIntraChipL1Network_in, ResponseMsg) {
+ getL1CacheEntry(address).DataBlk := in_msg.DataBlk;
+ getL1CacheEntry(address).Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(q_updateAckCount, "q", desc="Update ack count") {
+ peek(responseIntraChipL1Network_in, ResponseMsg) {
+ L1_TBEs[address].pendingAcks := L1_TBEs[address].pendingAcks - in_msg.AckCount;
+ APPEND_TRANSITION_COMMENT(in_msg.AckCount);
+ APPEND_TRANSITION_COMMENT(" p: ");
+ APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
+ }
+ }
+
+ action(z_stall, "z", desc="Stall") {
+ }
+
+ action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L1DcacheMemory.deallocate(address);
+ } else {
+ L1IcacheMemory.deallocate(address);
+ }
+ }
+
+ action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
+ if (L1DcacheMemory.isTagPresent(address) == false) {
+ L1DcacheMemory.allocate(address);
+ }
+ }
+
+ action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
+ if (L1IcacheMemory.isTagPresent(address) == false) {
+ L1IcacheMemory.allocate(address);
+ }
+ }
+
+ action(zz_recycleRequestQueue, "zz", desc="recycle L1 request queue") {
+ requestIntraChipL1Network_in.recycle();
+ }
+
+ action(z_recycleMandatoryQueue, "\z", desc="recycle L1 request queue") {
+ mandatoryQueue_in.recycle();
+ }
+
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // Transitions for Load/Store/Replacement/WriteBack from transient states
+ transition({IS, IM, IS_I, M_I, E_I, SM}, {Load, Ifetch, Store, L1_Replacement}) {
+ z_recycleMandatoryQueue;
+ }
+
+ // Transitions from Idle
+ transition({NP,I}, L1_Replacement) {
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition({NP,I}, Load, IS) {
+ oo_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP,I}, Ifetch, IS) {
+ pp_allocateL1ICacheBlock;
+ i_allocateTBE;
+ ai_issueGETINSTR;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP,I}, Store, IM) {
+ oo_allocateL1DCacheBlock;
+ i_allocateTBE;
+ b_issueGETX;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP, I}, Inv) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ // Transitions from Shared
+ transition(S, {Load,Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Store, SM) {
+ i_allocateTBE;
+ c_issueUPGRADE;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, L1_Replacement, I) {
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition(S, Inv, I) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ // Transitions from Exclusive
+
+ transition(E, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(E, Store, M) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(E, L1_Replacement, M_I) {
+ // silent E replacement??
+ i_allocateTBE;
+ g_issuePUTX; // send data, but hold in case forwarded request
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition(E, Inv, I) {
+ // don't send data
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition(E, Fwd_GETX, I) {
+ d_sendDataToRequestor;
+ l_popRequestQueue;
+ }
+
+ transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
+ d_sendDataToRequestor;
+ d2_sendDataToL2;
+ l_popRequestQueue;
+ }
+
+ // Transitions from Modified
+ transition(M, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Store) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, L1_Replacement, M_I) {
+ i_allocateTBE;
+ g_issuePUTX; // send data, but hold in case forwarded request
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition(M_I, WB_Ack, I) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(M, Inv, I) {
+ f_sendDataToL2;
+ l_popRequestQueue;
+ }
+
+ transition(M_I, Inv, I) {
+ ft_sendDataToL2_fromTBE;
+ s_deallocateTBE;
+ l_popRequestQueue;
+ }
+
+ transition(M, Fwd_GETX, I) {
+ d_sendDataToRequestor;
+ l_popRequestQueue;
+ }
+
+ transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
+ d_sendDataToRequestor;
+ d2_sendDataToL2;
+ l_popRequestQueue;
+ }
+
+ transition(M_I, Fwd_GETX, I) {
+ dt_sendDataToRequestor_fromTBE;
+ s_deallocateTBE;
+ l_popRequestQueue;
+ }
+
+ transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, I) {
+ dt_sendDataToRequestor_fromTBE;
+ d2t_sendDataToL2_fromTBE;
+ s_deallocateTBE;
+ l_popRequestQueue;
+ }
+
+ // Transitions from IS
+ transition({IS, IS_I}, Inv, IS_I) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition(IS, Data_all_Acks, S) {
+ u_writeDataToL1Cache;
+ h_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IS_I, Data_all_Acks, I) {
+ u_writeDataToL1Cache;
+ h_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+
+ transition(IS, DataS_fromL1, S) {
+ u_writeDataToL1Cache;
+ j_sendUnblock;
+ h_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IS_I, DataS_fromL1, I) {
+ u_writeDataToL1Cache;
+ j_sendUnblock;
+ h_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // directory is blocked when sending exclusive data
+ transition(IS_I, Data_Exclusive, E) {
+ u_writeDataToL1Cache;
+ h_load_hit;
+ jj_sendExclusiveUnblock;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IS, Data_Exclusive, E) {
+ u_writeDataToL1Cache;
+ h_load_hit;
+ jj_sendExclusiveUnblock;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // Transitions from IM
+ transition({IM, SM}, Inv, IM) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition(IM, Data, SM) {
+ u_writeDataToL1Cache;
+ q_updateAckCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IM, Data_all_Acks, M) {
+ u_writeDataToL1Cache;
+ hh_store_hit;
+ jj_sendExclusiveUnblock;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // transitions from SM
+ transition({SM, IM}, Ack) {
+ q_updateAckCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(SM, Ack_all, M) {
+ jj_sendExclusiveUnblock;
+ hh_store_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+}
+
+
+
diff --git a/src/mem/protocol/MESI_CMP_directory-L2cache.sm b/src/mem/protocol/MESI_CMP_directory-L2cache.sm
new file mode 100644
index 000000000..43c37e832
--- /dev/null
+++ b/src/mem/protocol/MESI_CMP_directory-L2cache.sm
@@ -0,0 +1,1036 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MSI_MOSI_CMP_directory-L2cache.sm 1.12 05/01/19 15:55:40-06:00 beckmann@s0-28.cs.wisc.edu $
+ *
+ */
+
+machine(L2Cache, "MOSI Directory L2 Cache CMP") {
+
+ // L2 BANK QUEUES
+ // From local bank of L2 cache TO the network
+ MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="2", ordered="false"; // this L2 bank -> Memory
+ MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false"; // this L2 bank -> a local L1
+ MessageBuffer responseFromL2Cache, network="To", virtual_network="3", ordered="false"; // this L2 bank -> a local L1 || Memory
+
+ // FROM the network to this local bank of L2 cache
+ MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="false"; // a local L1 -> this L2 bank
+ MessageBuffer responseToL2Cache, network="From", virtual_network="3", ordered="false"; // a local L1 || Memory -> this L2 bank
+ MessageBuffer unblockToL2Cache, network="From", virtual_network="4", ordered="false"; // a local L1 || Memory -> this L2 bank
+
+ // STATES
+ enumeration(State, desc="L2 Cache states", default="L2Cache_State_NP") {
+ // Base states
+ NP, desc="Not present in either cache";
+ SS, desc="L2 cache entry Shared, also present in one or more L1s";
+ M, desc="L2 cache entry Modified, not present in any L1s", format="!b";
+ MT, desc="L2 cache entry Modified in a local L1, assume L2 copy stale", format="!b";
+
+ // L2 replacement
+ M_I, desc="L2 cache replacing, have all acks, sent dirty data to memory, waiting for ACK from memory";
+ MT_I, desc="L2 cache replacing, getting data from exclusive";
+ MCT_I, desc="L2 cache replacing, clean in L2, getting data or ack from exclusive";
+ I_I, desc="L2 replacing clean data, need to inv sharers and then drop data";
+ S_I, desc="L2 replacing dirty data, collecting acks from L1s";
+
+ // Transient States for fetching data from memory
+ ISS, desc="L2 idle, got single L1_GETS, issued memory fetch, have not seen response yet";
+ IS, desc="L2 idle, got L1_GET_INSTR or multiple L1_GETS, issued memory fetch, have not seen response yet";
+ IM, desc="L2 idle, got L1_GETX, issued memory fetch, have not seen response(s) yet";
+
+ // Blocking states
+ SS_MB, desc="Blocked for L1_GETX from SS";
+ MT_MB, desc="Blocked for L1_GETX from MT";
+ M_MB, desc="Blocked for L1_GETX from M";
+
+ MT_IIB, desc="Blocked for L1_GETS from MT, waiting for unblock and data";
+ MT_IB, desc="Blocked for L1_GETS from MT, got unblock, waiting for data";
+ MT_SB, desc="Blocked for L1_GETS from MT, got data, waiting for unblock";
+
+ }
+
+ // EVENTS
+ enumeration(Event, desc="L2 Cache events") {
+ // L2 events
+
+ // events initiated by the local L1s
+ L1_GET_INSTR, desc="a L1I GET INSTR request for a block maped to us";
+ L1_GETS, desc="a L1D GETS request for a block maped to us";
+ L1_GETX, desc="a L1D GETX request for a block maped to us";
+ L1_UPGRADE, desc="a L1D GETX request for a block maped to us";
+
+ L1_PUTX, desc="L1 replacing data";
+ L1_PUTX_old, desc="L1 replacing data, but no longer sharer";
+
+ Fwd_L1_GETX, desc="L1 did not have data, so we supply";
+ Fwd_L1_GETS, desc="L1 did not have data, so we supply";
+ Fwd_L1_GET_INSTR, desc="L1 did not have data, so we supply";
+
+ // events initiated by this L2
+ L2_Replacement, desc="L2 Replacement", format="!r";
+ L2_Replacement_clean, desc="L2 Replacement, but data is clean", format="!r";
+
+ // events from memory controller
+ Mem_Data, desc="data from memory", format="!r";
+ Mem_Ack, desc="ack from memory", format="!r";
+
+ // M->S data writeback
+ WB_Data, desc="data from L1";
+ WB_Data_clean, desc="clean data from L1";
+ Ack, desc="writeback ack";
+ Ack_all, desc="writeback ack";
+
+ Unblock, desc="Unblock from L1 requestor";
+ Unblock_Cancel, desc="Unblock from L1 requestor (FOR XACT MEMORY)";
+ Exclusive_Unblock, desc="Unblock from L1 requestor";
+
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ NetDest Sharers, desc="tracks the L1 shares on-chip";
+ MachineID Exclusive, desc="Exclusive holder of block";
+ DataBlock DataBlk, desc="data for the block";
+ bool Dirty, default="false", desc="data is dirty";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Address Address, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ bool Dirty, default="false", desc="Data is Dirty";
+
+ NetDest L1_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
+ MachineID L1_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
+ bool isPrefetch, desc="Set if this was caused by a prefetch";
+
+ int pendingAcks, desc="number of pending acks for invalidates during writeback";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ void setMRU(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
+
+ CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)';
+
+ // inclusive cache, returns L2 entries only
+ Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
+ return L2cacheMemory[addr];
+ }
+
+ void changeL2Permission(Address addr, AccessPermission permission) {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory.changePermission(addr, permission);
+ }
+ }
+
+ string getCoherenceRequestTypeStr(CoherenceRequestType type) {
+ return CoherenceRequestType_to_string(type);
+ }
+
+ bool isL2CacheTagPresent(Address addr) {
+ return (L2cacheMemory.isTagPresent(addr));
+ }
+
+ bool isOneSharerLeft(Address addr, MachineID requestor) {
+ assert(L2cacheMemory[addr].Sharers.isElement(requestor));
+ return (L2cacheMemory[addr].Sharers.count() == 1);
+ }
+
+ bool isSharer(Address addr, MachineID requestor) {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory[addr].Sharers.isElement(requestor);
+ } else {
+ return false;
+ }
+ }
+
+ void addSharer(Address addr, MachineID requestor) {
+ DEBUG_EXPR(machineID);
+ DEBUG_EXPR(requestor);
+ DEBUG_EXPR(addr);
+ assert(map_L1CacheMachId_to_L2Cache(addr, requestor) == machineID);
+ L2cacheMemory[addr].Sharers.add(requestor);
+ }
+
+ State getState(Address addr) {
+ if(L2_TBEs.isPresent(addr)) {
+ return L2_TBEs[addr].TBEState;
+ } else if (isL2CacheTagPresent(addr)) {
+ return getL2CacheEntry(addr).CacheState;
+ }
+ return State:NP;
+ }
+
+ string getStateStr(Address addr) {
+ return L2Cache_State_to_string(getState(addr));
+ }
+
+ // when is this called
+ void setState(Address addr, State state) {
+
+ // MUST CHANGE
+ if (L2_TBEs.isPresent(addr)) {
+ L2_TBEs[addr].TBEState := state;
+ }
+
+ if (isL2CacheTagPresent(addr)) {
+ getL2CacheEntry(addr).CacheState := state;
+
+ // Set permission
+ if (state == State:SS ) {
+ changeL2Permission(addr, AccessPermission:Read_Only);
+ } else if (state == State:M) {
+ changeL2Permission(addr, AccessPermission:Read_Write);
+ } else if (state == State:MT) {
+ changeL2Permission(addr, AccessPermission:Stale);
+ } else {
+ changeL2Permission(addr, AccessPermission:Busy);
+ }
+ }
+ }
+
+ Event L1Cache_request_type_to_event(CoherenceRequestType type, Address addr, MachineID requestor) {
+ if(type == CoherenceRequestType:GETS) {
+ return Event:L1_GETS;
+ } else if(type == CoherenceRequestType:GET_INSTR) {
+ return Event:L1_GET_INSTR;
+ } else if (type == CoherenceRequestType:GETX) {
+ return Event:L1_GETX;
+ } else if (type == CoherenceRequestType:UPGRADE) {
+ if ( isL2CacheTagPresent(addr) && getL2CacheEntry(addr).Sharers.isElement(requestor) ) {
+ return Event:L1_UPGRADE;
+ } else {
+ return Event:L1_GETX;
+ }
+ } else if (type == CoherenceRequestType:PUTX) {
+ if (isSharer(addr, requestor)) {
+ return Event:L1_PUTX;
+ } else {
+ return Event:L1_PUTX_old;
+ }
+ } else {
+ DEBUG_EXPR(addr);
+ DEBUG_EXPR(type);
+ error("Invalid L1 forwarded request type");
+ }
+ }
+
+ // ** OUT_PORTS **
+
+ out_port(L1RequestIntraChipL2Network_out, RequestMsg, L1RequestFromL2Cache);
+ out_port(DirRequestIntraChipL2Network_out, RequestMsg, DirRequestFromL2Cache);
+ out_port(responseIntraChipL2Network_out, ResponseMsg, responseFromL2Cache);
+
+
+ // Response IntraChip L2 Network - response msg to this particular L2 bank
+ in_port(responseIntraChipL2Network_in, ResponseMsg, responseToL2Cache) {
+ if (responseIntraChipL2Network_in.isReady()) {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ // test wether it's from a local L1 or an off chip source
+ assert(in_msg.Destination.isElement(machineID));
+ if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
+ if(in_msg.Type == CoherenceResponseType:DATA) {
+ if (in_msg.Dirty) {
+ trigger(Event:WB_Data, in_msg.Address);
+ } else {
+ trigger(Event:WB_Data_clean, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:ACK) {
+ if ((L2_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0) {
+ trigger(Event:Ack_all, in_msg.Address);
+ } else {
+ trigger(Event:Ack, in_msg.Address);
+ }
+ } else {
+ error("unknown message type");
+ }
+
+ } else { // external message
+ if(in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
+ trigger(Event:Mem_Data, in_msg.Address); // L2 now has data and all off-chip acks
+ } else if(in_msg.Type == CoherenceResponseType:MEMORY_ACK) {
+ trigger(Event:Mem_Ack, in_msg.Address); // L2 now has data and all off-chip acks
+ } else {
+ error("unknown message type");
+ }
+ }
+ }
+ } // if not ready, do nothing
+ }
+
+ // L1 Request
+ in_port(L1RequestIntraChipL2Network_in, RequestMsg, L1RequestToL2Cache) {
+ if(L1RequestIntraChipL2Network_in.isReady()) {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.Requestor);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(in_msg.Destination);
+ assert(machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache);
+ assert(in_msg.Destination.isElement(machineID));
+ if (L2cacheMemory.isTagPresent(in_msg.Address)) {
+ // The L2 contains the block, so proceeded with handling the request
+ trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.Requestor), in_msg.Address);
+ } else {
+ if (L2cacheMemory.cacheAvail(in_msg.Address)) {
+ // L2 does't have the line, but we have space for it in the L2
+ trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.Requestor), in_msg.Address);
+ } else {
+ // No room in the L2, so we need to make room before handling the request
+ if (L2cacheMemory[ L2cacheMemory.cacheProbe(in_msg.Address) ].Dirty ) {
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ trigger(Event:L2_Replacement_clean, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache) {
+ if(L1unblockNetwork_in.isReady()) {
+ peek(L1unblockNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceResponseType:EXCLUSIVE_UNBLOCK) {
+ trigger(Event:Exclusive_Unblock, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
+ trigger(Event:Unblock, in_msg.Address);
+ } else {
+ error("unknown unblock message");
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+
+ action(a_issueFetchToMemory, "a", desc="fetch data from memory") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+
+ action(b_forwardRequestToExclusive, "b", desc="Forward request to the exclusive L1") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(L2cacheMemory[address].Exclusive);
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+ }
+
+ action(c_exclusiveReplacement, "c", desc="Send data to memory") {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(ct_exclusiveReplacementFromTBE, "ct", desc="Send data to memory") {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.DataBlk := L2_TBEs[address].DataBlk;
+ out_msg.Dirty := L2_TBEs[address].Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+
+ action(d_sendDataToRequestor, "d", desc="Send data from cache to reqeustor") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+
+ out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
+ if (getL2CacheEntry(address).Sharers.isElement(in_msg.Requestor)) {
+ out_msg.AckCount := out_msg.AckCount + 1;
+ }
+ }
+ }
+ }
+
+ action(dd_sendExclusiveDataToRequestor, "dd", desc="Send data from cache to reqeustor") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+
+ out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
+ if (getL2CacheEntry(address).Sharers.isElement(in_msg.Requestor)) {
+ out_msg.AckCount := out_msg.AckCount + 1;
+ }
+ }
+ }
+ }
+
+ action(ds_sendSharedDataToRequestor, "ds", desc="Send data from cache to reqeustor") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.AckCount := 0;
+ }
+ }
+ }
+
+ action(e_sendDataToGetSRequestors, "e", desc="Send data from cache to all GetS IDs") {
+ assert(L2_TBEs[address].L1_GetS_IDs.count() > 0);
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(ex_sendExclusiveDataToGetSRequestors, "ex", desc="Send data from cache to all GetS IDs") {
+ assert(L2_TBEs[address].L1_GetS_IDs.count() == 1);
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+
+ action(ee_sendDataToGetXRequestor, "ee", desc="Send data from cache to GetX ID") {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ DEBUG_EXPR(out_msg.Address);
+ DEBUG_EXPR(out_msg.Destination);
+ DEBUG_EXPR(out_msg.DataBlk);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+
+ action(f_sendInvToSharers, "f", desc="invalidate sharers for L2 replacement") {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := machineID;
+ out_msg.Destination := L2cacheMemory[address].Sharers;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+
+ action(fw_sendFwdInvToSharers, "fw", desc="invalidate sharers for request") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination := L2cacheMemory[address].Sharers;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+ }
+
+
+ action(fwm_sendFwdInvToSharersMinusRequestor, "fwm", desc="invalidate sharers for request, requestor is sharer") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination := L2cacheMemory[address].Sharers;
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+ }
+
+ // OTHER ACTIONS
+ action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
+ check_allocate(L2_TBEs);
+ L2_TBEs.allocate(address);
+ L2_TBEs[address].L1_GetS_IDs.clear();
+ L2_TBEs[address].DataBlk := getL2CacheEntry(address).DataBlk;
+ L2_TBEs[address].Dirty := getL2CacheEntry(address).Dirty;
+ L2_TBEs[address].pendingAcks := getL2CacheEntry(address).Sharers.count();
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
+ L2_TBEs.deallocate(address);
+ }
+
+ action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
+ profileMsgDelay(0, L1RequestIntraChipL2Network_in.dequeue_getDelayCycles());
+ }
+
+ action(k_popUnblockQueue, "k", desc="Pop incoming unblock queue") {
+ profileMsgDelay(0, L1unblockNetwork_in.dequeue_getDelayCycles());
+ }
+
+
+ action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
+ profileMsgDelay(3, responseIntraChipL2Network_in.dequeue_getDelayCycles());
+ }
+
+
+ action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
+ getL2CacheEntry(address).Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(mr_writeDataToCacheFromRequest, "mr", desc="Write data from response queue to cache") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
+ getL2CacheEntry(address).Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(q_updateAck, "q", desc="update pending ack count") {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ L2_TBEs[address].pendingAcks := L2_TBEs[address].pendingAcks - in_msg.AckCount;
+ APPEND_TRANSITION_COMMENT(in_msg.AckCount);
+ APPEND_TRANSITION_COMMENT(" p: ");
+ APPEND_TRANSITION_COMMENT(L2_TBEs[address].pendingAcks);
+ }
+ }
+
+ action(qq_writeDataToTBE, "\qq", desc="Write data from response queue to TBE") {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ L2_TBEs[address].DataBlk := in_msg.DataBlk;
+ L2_TBEs[address].Dirty := in_msg.Dirty;
+ }
+ }
+
+
+ action(z_stall, "z", desc="Stall") {
+ }
+
+
+ action(ss_recordGetSL1ID, "\s", desc="Record L1 GetS for load response") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2_TBEs[address].L1_GetS_IDs.add(in_msg.Requestor);
+ }
+ }
+
+ action(xx_recordGetXL1ID, "\x", desc="Record L1 GetX for store response") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2_TBEs[address].L1_GetX_ID := in_msg.Requestor;
+ }
+ }
+
+ action(set_setMRU, "\set", desc="set the MRU entry") {
+ L2cacheMemory.setMRU(address);
+ }
+
+ action(qq_allocateL2CacheBlock, "\q", desc="Set L2 cache tag equal to tag of block B.") {
+ if (L2cacheMemory.isTagPresent(address) == false) {
+ L2cacheMemory.allocate(address);
+ }
+ }
+
+ action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ L2cacheMemory.deallocate(address);
+ }
+
+ action(t_sendWBAck, "t", desc="Send writeback ACK") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:WB_ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(ts_sendInvAckToUpgrader, "ts", desc="Send ACK to upgrader") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // upgrader doesn't get ack from itself, hence the + 1
+ out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count() + 1;
+ }
+ }
+ }
+
+ action(uu_profileMiss, "\u", desc="Profile the demand miss") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ //profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, L1CacheMachIDToProcessorNum(in_msg.Requestor));
+ }
+ }
+
+ action(ww_profileMissNoDir, "\w", desc="Profile this transition at the L2 because Dir won't see the request") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ // profile_request(in_msg.L1CacheStateStr, getStateStr(address), "NA", getCoherenceRequestTypeStr(in_msg.Type));
+ }
+ }
+
+
+
+ action(nn_addSharer, "\n", desc="Add L1 sharer to list") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ addSharer(address, in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT( getL2CacheEntry(address).Sharers );
+ }
+ }
+
+ action(nnu_addSharerFromUnblock, "\nu", desc="Add L1 sharer to list") {
+ peek(L1unblockNetwork_in, ResponseMsg) {
+ addSharer(address, in_msg.Sender);
+ }
+ }
+
+
+ action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from list") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2cacheMemory[address].Sharers.remove(in_msg.Requestor);
+ }
+ }
+
+ action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2cacheMemory[address].Sharers.clear();
+ }
+ }
+
+ action(mm_markExclusive, "\m", desc="set the exclusive owner") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2cacheMemory[address].Sharers.clear();
+ L2cacheMemory[address].Exclusive := in_msg.Requestor;
+ addSharer(address, in_msg.Requestor);
+ }
+ }
+
+ action(mmu_markExclusiveFromUnblock, "\mu", desc="set the exclusive owner") {
+ peek(L1unblockNetwork_in, ResponseMsg) {
+ L2cacheMemory[address].Sharers.clear();
+ L2cacheMemory[address].Exclusive := in_msg.Sender;
+ addSharer(address, in_msg.Sender);
+ }
+ }
+
+ action(zz_recycleL1RequestQueue, "zz", desc="recycle L1 request queue") {
+ L1RequestIntraChipL2Network_in.recycle();
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+
+ //===============================================
+ // BASE STATE - I
+
+ // Transitions from I (Idle)
+ transition({NP, IS, ISS, IM, SS, M, M_I, MT_I, MCT_I, I_I, S_I, SS_MB, M_MB, MT_IIB, MT_IB, MT_SB}, L1_PUTX) {
+ jj_popL1RequestQueue;
+ }
+
+ transition({NP, SS, M, MT, M_I, MT_I, MCT_I, I_I, S_I, IS, ISS, IM, SS_MB, MT_MB, M_MB, MT_IIB, MT_IB, MT_SB}, L1_PUTX_old) {
+ jj_popL1RequestQueue;
+ }
+
+ transition({IM, IS, ISS, SS_MB, M_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, {L2_Replacement, L2_Replacement_clean}) {
+ zz_recycleL1RequestQueue;
+ }
+
+ transition({SS_MB, M_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE}) {
+ zz_recycleL1RequestQueue;
+ }
+
+
+ transition(NP, L1_GETS, ISS) {
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ nn_addSharer;
+ i_allocateTBE;
+ ss_recordGetSL1ID;
+ a_issueFetchToMemory;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition(NP, L1_GET_INSTR, IS) {
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ nn_addSharer;
+ i_allocateTBE;
+ ss_recordGetSL1ID;
+ a_issueFetchToMemory;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition(NP, L1_GETX, IM) {
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ // nn_addSharer;
+ i_allocateTBE;
+ xx_recordGetXL1ID;
+ a_issueFetchToMemory;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+
+ // transitions from IS/IM
+
+ transition(ISS, Mem_Data, MT_MB) {
+ m_writeDataToCache;
+ ex_sendExclusiveDataToGetSRequestors;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IS, Mem_Data, SS) {
+ m_writeDataToCache;
+ e_sendDataToGetSRequestors;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IM, Mem_Data, MT_MB) {
+ m_writeDataToCache;
+ ee_sendDataToGetXRequestor;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition({IS, ISS}, {L1_GETS, L1_GET_INSTR}, IS) {
+ nn_addSharer;
+ ss_recordGetSL1ID;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition({IS, ISS}, L1_GETX) {
+ zz_recycleL1RequestQueue;
+ }
+
+ transition(IM, {L1_GETX, L1_GETS, L1_GET_INSTR}) {
+ zz_recycleL1RequestQueue;
+ }
+
+ // transitions from SS
+ transition(SS, {L1_GETS, L1_GET_INSTR}) {
+ ds_sendSharedDataToRequestor;
+ nn_addSharer;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+
+ transition(SS, L1_GETX, SS_MB) {
+ d_sendDataToRequestor;
+ // fw_sendFwdInvToSharers;
+ fwm_sendFwdInvToSharersMinusRequestor;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(SS, L1_UPGRADE, SS_MB) {
+ fwm_sendFwdInvToSharersMinusRequestor;
+ ts_sendInvAckToUpgrader;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(SS, L2_Replacement_clean, I_I) {
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(SS, L2_Replacement, S_I) {
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(M, L1_GETX, MT_MB) {
+ d_sendDataToRequestor;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(M, L1_GET_INSTR, SS) {
+ d_sendDataToRequestor;
+ nn_addSharer;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(M, L1_GETS, MT_MB) {
+ dd_sendExclusiveDataToRequestor;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(M, L2_Replacement, M_I) {
+ i_allocateTBE;
+ c_exclusiveReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(M, L2_Replacement_clean, M_I) {
+ rr_deallocateL2CacheBlock;
+ }
+
+
+ // transitions from MT
+
+ transition(MT, L1_GETX, MT_MB) {
+ b_forwardRequestToExclusive;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+
+ transition(MT, {L1_GETS, L1_GET_INSTR}, MT_IIB) {
+ b_forwardRequestToExclusive;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(MT, L2_Replacement, MT_I) {
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(MT, L2_Replacement_clean, MCT_I) {
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(MT, L1_PUTX, M) {
+ ll_clearSharers;
+ mr_writeDataToCacheFromRequest;
+ t_sendWBAck;
+ jj_popL1RequestQueue;
+ }
+
+
+ // transitions from blocking states
+ transition(SS_MB, Unblock_Cancel, SS) {
+ k_popUnblockQueue;
+ }
+
+ transition(MT_MB, Unblock_Cancel, MT) {
+ k_popUnblockQueue;
+ }
+
+ transition(MT_IB, Unblock_Cancel, MT) {
+ k_popUnblockQueue;
+ }
+
+ transition(SS_MB, Exclusive_Unblock, MT) {
+ // update actual directory
+ mmu_markExclusiveFromUnblock;
+ k_popUnblockQueue;
+ }
+
+ transition({M_MB, MT_MB}, Exclusive_Unblock, MT) {
+ // update actual directory
+ mmu_markExclusiveFromUnblock;
+ k_popUnblockQueue;
+ }
+
+ transition(MT_IIB, Unblock, MT_IB) {
+ nnu_addSharerFromUnblock;
+ k_popUnblockQueue;
+ }
+
+ transition(MT_IIB, {WB_Data, WB_Data_clean}, MT_SB) {
+ m_writeDataToCache;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(MT_IB, {WB_Data, WB_Data_clean}, SS) {
+ m_writeDataToCache;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(MT_SB, Unblock, SS) {
+ nnu_addSharerFromUnblock;
+ k_popUnblockQueue;
+ }
+
+ // writeback states
+ transition({I_I, S_I, MT_I, MCT_I, M_I}, {L1_GETX, L1_UPGRADE, L1_GETS, L1_GET_INSTR}) {
+ zz_recycleL1RequestQueue;
+ }
+
+ transition(I_I, Ack) {
+ q_updateAck;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(I_I, Ack_all, NP) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition({MT_I, MCT_I}, WB_Data, M_I) {
+ qq_writeDataToTBE;
+ ct_exclusiveReplacementFromTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(MCT_I, WB_Data_clean, NP) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // L1 never changed Dirty data
+ transition(MT_I, Ack_all, M_I) {
+ ct_exclusiveReplacementFromTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // clean data that L1 exclusive never wrote
+ transition(MCT_I, Ack_all, NP) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // drop this because L1 will send data again
+ // the reason we don't accept is that the request virtual network may be completely backed up
+ // transition(MT_I, L1_PUTX) {
+ // jj_popL1RequestQueue;
+ //}
+
+ // possible race between unblock and immediate replacement
+ transition(MT_MB, L1_PUTX) {
+ zz_recycleL1RequestQueue;
+ }
+
+ transition(MT_I, WB_Data_clean, NP) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(S_I, Ack) {
+ q_updateAck;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(S_I, Ack_all, M_I) {
+ ct_exclusiveReplacementFromTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(M_I, Mem_Ack, NP) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+}
+
diff --git a/src/mem/protocol/MESI_CMP_directory-mem.sm b/src/mem/protocol/MESI_CMP_directory-mem.sm
new file mode 100644
index 000000000..1fcd234fe
--- /dev/null
+++ b/src/mem/protocol/MESI_CMP_directory-mem.sm
@@ -0,0 +1,166 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
+ */
+
+
+machine(Directory, "Token protocol") {
+
+ MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
+ MessageBuffer responseToDir, network="From", virtual_network="3", ordered="false";
+ MessageBuffer responseFromDir, network="To", virtual_network="3", ordered="false";
+
+ // STATES
+ enumeration(State, desc="Directory states", default="Directory_State_I") {
+ // Base states
+ I, desc="Owner";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ Fetch, desc="A GETX arrives";
+ Data, desc="A GETS arrives";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+
+ State getState(Address addr) {
+ return State:I;
+ }
+
+ void setState(Address addr, State state) {
+ }
+
+ // ** OUT_PORTS **
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+
+ // ** IN_PORTS **
+
+ in_port(requestNetwork_in, RequestMsg, requestToDir) {
+ if (requestNetwork_in.isReady()) {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:Fetch, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:Fetch, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(responseNetwork_in, ResponseMsg, responseToDir) {
+ if (responseNetwork_in.isReady()) {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
+ trigger(Event:Data, in_msg.Address);
+ } else {
+ DEBUG_EXPR(in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Actions
+ action(a_sendAck, "a", desc="Send ack to L2") {
+ peek(responseNetwork_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Sender);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(d_sendData, "d", desc="Send data to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
+ requestNetwork_in.dequeue();
+ }
+
+ action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
+ responseNetwork_in.dequeue();
+ }
+
+ action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ // TRANSITIONS
+
+ transition(I, Fetch) {
+ d_sendData;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(I, Data) {
+ m_writeDataToMemory;
+ a_sendAck;
+ k_popIncomingResponseQueue;
+ }
+}
diff --git a/src/mem/protocol/MESI_CMP_directory-msg.sm b/src/mem/protocol/MESI_CMP_directory-msg.sm
new file mode 100644
index 000000000..c2d02b59d
--- /dev/null
+++ b/src/mem/protocol/MESI_CMP_directory-msg.sm
@@ -0,0 +1,112 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MSI_MOSI_CMP_directory-msg.sm 1.5 05/01/19 15:48:37-06:00 mikem@royal16.cs.wisc.edu $
+ *
+ */
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ GETX, desc="Get eXclusive";
+ UPGRADE, desc="UPGRADE to exclusive";
+ GETS, desc="Get Shared";
+ GET_INSTR, desc="Get Instruction";
+ INV, desc="INValidate";
+ PUTX, desc="replacement message";
+}
+
+// CoherenceResponseType
+enumeration(CoherenceResponseType, desc="...") {
+ MEMORY_ACK, desc="Ack from memory controller";
+ DATA, desc="Data";
+ DATA_EXCLUSIVE, desc="Data";
+ MEMORY_DATA, desc="Data";
+ ACK, desc="Generic invalidate ack";
+ WB_ACK, desc="writeback ack";
+ UNBLOCK, desc="unblock";
+ EXCLUSIVE_UNBLOCK, desc="exclusive unblock";
+}
+
+// RequestMsg
+structure(RequestMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ AccessModeType AccessMode, desc="user/supervisor access type";
+ MachineID Requestor , desc="What component request";
+ NetDest Destination, desc="What components receive the request, includes MachineType and num";
+ MessageSizeType MessageSize, desc="size category of the message";
+ DataBlock DataBlk, desc="Data for the cache line (if PUTX)";
+ bool Dirty, default="false", desc="Dirty bit";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+}
+
+// ResponseMsg
+structure(ResponseMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
+ MachineID Sender, desc="What component sent the data";
+ NetDest Destination, desc="Node to whom the data is sent";
+ DataBlock DataBlk, desc="Data for the cache line";
+ bool Dirty, default="false", desc="Dirty bit";
+ int AckCount, default="0", desc="number of acks in this message";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+/*
+GenericRequestType convertToGenericType(CoherenceRequestType type) {
+ if(type == CoherenceRequestType:PUTX) {
+ return GenericRequestType:PUTX;
+ } else if(type == CoherenceRequestType:GETS) {
+ return GenericRequestType:GETS;
+ } else if(type == CoherenceRequestType:GET_INSTR) {
+ return GenericRequestType:GET_INSTR;
+ } else if(type == CoherenceRequestType:GETX) {
+ return GenericRequestType:GETX;
+ } else if(type == CoherenceRequestType:UPGRADE) {
+ return GenericRequestType:UPGRADE;
+ } else if(type == CoherenceRequestType:PUTS) {
+ return GenericRequestType:PUTS;
+ } else if(type == CoherenceRequestType:INV) {
+ return GenericRequestType:INV;
+ } else if(type == CoherenceRequestType:INV_S) {
+ return GenericRequestType:INV_S;
+ } else if(type == CoherenceRequestType:L1_DG) {
+ return GenericRequestType:DOWNGRADE;
+ } else if(type == CoherenceRequestType:WB_ACK) {
+ return GenericRequestType:WB_ACK;
+ } else if(type == CoherenceRequestType:EXE_ACK) {
+ return GenericRequestType:EXE_ACK;
+ } else {
+ DEBUG_EXPR(type);
+ error("invalid CoherenceRequestType");
+ }
+}
+*/
+
diff --git a/src/mem/protocol/MESI_CMP_directory.slicc b/src/mem/protocol/MESI_CMP_directory.slicc
new file mode 100644
index 000000000..34303f97e
--- /dev/null
+++ b/src/mem/protocol/MESI_CMP_directory.slicc
@@ -0,0 +1,5 @@
+MESI_CMP_directory-msg.sm
+MESI_CMP_directory-L2cache.sm
+MESI_CMP_directory-L1cache.sm
+MESI_CMP_directory-mem.sm
+standard_CMP-protocol.sm
diff --git a/src/mem/protocol/MESI_CMP_filter_directory-L1cache.sm b/src/mem/protocol/MESI_CMP_filter_directory-L1cache.sm
new file mode 100644
index 000000000..468cf3c1c
--- /dev/null
+++ b/src/mem/protocol/MESI_CMP_filter_directory-L1cache.sm
@@ -0,0 +1,1800 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ This file has been modified by Kevin Moore and Dan Nussbaum of the
+ Scalable Systems Research Group at Sun Microsystems Laboratories
+ (http://research.sun.com/scalable/) to support the Adaptive
+ Transactional Memory Test Platform (ATMTP).
+
+ Please send email to atmtp-interest@sun.com with feedback, questions, or
+ to request future announcements about ATMTP.
+
+ ----------------------------------------------------------------------
+
+ File modification date: 2008-02-23
+
+ ----------------------------------------------------------------------
+*/
+
+/*
+ * $Id$
+ *
+ */
+
+
+machine(L1Cache, "MESI Directory L1 Cache CMP") {
+
+ // NODE L1 CACHE
+ // From this node's L1 cache TO the network
+ // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+ MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false";
+ // a local L1 -> this L2 bank
+ MessageBuffer responseFromL1Cache, network="To", virtual_network="3", ordered="false";
+ MessageBuffer unblockFromL1Cache, network="To", virtual_network="4", ordered="false";
+
+
+ // To this node's L1 cache FROM the network
+ // a L2 bank -> this L1
+ MessageBuffer requestToL1Cache, network="From", virtual_network="1", ordered="false";
+ // a L2 bank -> this L1
+ MessageBuffer responseToL1Cache, network="From", virtual_network="3", ordered="false";
+
+ // STATES
+ enumeration(State, desc="Cache states", default="L1Cache_State_I") {
+ // Base states
+ NP, desc="Not present in either cache";
+ I, desc="a L1 cache entry Idle";
+ S, desc="a L1 cache entry Shared";
+ E, desc="a L1 cache entry Exclusive";
+ M, desc="a L1 cache entry Modified", format="!b";
+
+ // Transient States
+ IS, desc="L1 idle, issued GETS, have not seen response yet";
+ IM, desc="L1 idle, issued GETX, have not seen response yet";
+ SM, desc="L1 idle, issued GETX, have not seen response yet";
+ IS_I, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
+ IS_S, desc="L1 idle, issued GETS, L2 sent us data but responses from filters have not arrived";
+ IS_E, desc="L1 idle, issued GETS, L2 sent us exclusive data, but responses from filters have not arrived";
+ IM_M, desc="L1 idle, issued GETX, L2 sent us data, but responses from filters have not arrived";
+
+ M_I, desc="L1 replacing, waiting for ACK";
+ E_I, desc="L1 replacing, waiting for ACK";
+
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ // L1 events
+ Load, desc="Load request from the home processor";
+ Ifetch, desc="I-fetch request from the home processor";
+ Store, desc="Store request from the home processor";
+
+ Replace, desc="lower level cache replaced this line, also need to invalidate to maintain inclusion";
+ Inv, desc="Invalidate request from L2 bank";
+ Inv_X, desc="Invalidate request from L2 bank, trans CONFLICT";
+
+ // internal generated request
+ L1_Replacement, desc="L1 Replacement", format="!r";
+ L1_Replacement_XACT, desc="L1 Replacement of trans. data", format="!r";
+
+ // other requests
+ Fwd_GETX, desc="GETX from other processor";
+ Fwd_GETS, desc="GETS from other processor";
+ Fwd_GET_INSTR, desc="GET_INSTR from other processor";
+
+ //Data, desc="Data for processor";
+ L2_Data, desc="Data for processor, from L2";
+ L2_Data_all_Acks, desc="Data for processor, from L2, all acks";
+ L2_Exclusive_Data, desc="Exlusive Data for processor, from L2";
+ L2_Exclusive_Data_all_Acks, desc="Exlusive Data for processor, from L2, all acks";
+ DataS_fromL1, desc="data for GETS request, need to unblock directory";
+ Data_all_Acks, desc="Data for processor, all acks";
+
+ Ack, desc="Ack for processor";
+ Ack_all, desc="Last ack for processor";
+
+ WB_Ack, desc="Ack for replacement";
+
+ // Transactional responses/requests
+ Nack, desc="Nack for processor";
+ Nack_all, desc="Last Nack for processor";
+ Check_Write_Filter, desc="Check the write filter";
+ Check_Read_Write_Filter, desc="Check the read and write filters";
+
+ //Fwd_GETS_T, desc="A GetS from another processor, part of a trans, but not a conflict";
+ Fwd_GETS_X, desc="A GetS from another processor, trans CONFLICT";
+ Fwd_GETX_X, desc="A GetS from another processor, trans CONFLICT";
+ Fwd_GET_INSTR_X, desc="A GetInstr from another processor, trans CONFLICT";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
+ State CacheState, desc="cache state";
+ DataBlock DataBlk, desc="data for the block";
+ bool Dirty, default="false", desc="data is dirty";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Address Address, desc="Line address for this TBE";
+ Address PhysicalAddress, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ bool Dirty, default="false", desc="data is dirty";
+ bool isPrefetch, desc="Set if this was caused by a prefetch";
+ int pendingAcks, default="0", desc="number of pending acks";
+ int ThreadID, default="0", desc="SMT thread issuing the request";
+
+ bool RemoveLastOwnerFromDir, default="false", desc="The forwarded data was being replaced";
+ MachineID LastOwnerID, desc="What component forwarded (last owned) the data"; // For debugging
+
+ // for Transactional Memory
+ uint64 Timestamp, default="0", desc="Timestamp of request";
+ bool nack, default="false", desc="has this request been nacked?";
+ NetDest Nackers, desc="The nodes which sent a NACK to us";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
+
+ CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
+ CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
+
+
+ MessageBuffer mandatoryQueue, ordered="false", rank="100", abstract_chip_ptr="true";
+
+ Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+ TransactionInterfaceManager xact_mgr, abstract_chip_ptr="true", constructor_hack="i";
+
+ // triggerQueue used to indicate when all acks/nacks have been received
+ MessageBuffer triggerQueue, ordered="false";
+
+ int cache_state_to_int(State state);
+
+ // inclusive cache returns L1 entries only
+ Entry getL1CacheEntry(Address addr), return_by_ref="yes" {
+ if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory[addr];
+ } else {
+ return L1IcacheMemory[addr];
+ }
+ }
+
+ void changeL1Permission(Address addr, AccessPermission permission) {
+ if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory.changePermission(addr, permission);
+ } else if(L1IcacheMemory.isTagPresent(addr)) {
+ return L1IcacheMemory.changePermission(addr, permission);
+ } else {
+ error("cannot change permission, L1 block not present");
+ }
+ }
+
+ bool isL1CacheTagPresent(Address addr) {
+ return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
+ }
+
+ State getState(Address addr) {
+ if((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == true){
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(addr);
+ }
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+
+ if(L1_TBEs.isPresent(addr)) {
+ return L1_TBEs[addr].TBEState;
+ } else if (isL1CacheTagPresent(addr)) {
+ return getL1CacheEntry(addr).CacheState;
+ }
+ return State:NP;
+ }
+
+
+ // For detecting read/write conflicts on requests from remote processors
+ bool shouldNackLoad(Address addr, uint64 remote_timestamp, MachineID remote_id){
+ return xact_mgr.shouldNackLoad(addr, remote_timestamp, remote_id);
+ }
+
+ bool shouldNackStore(Address addr, uint64 remote_timestamp, MachineID remote_id){
+ return xact_mgr.shouldNackStore(addr, remote_timestamp, remote_id);
+ }
+
+ // For querying read/write signatures on current processor
+ bool checkReadWriteSignatures(Address addr){
+ return xact_mgr.checkReadWriteSignatures(addr);
+ }
+
+ bool checkWriteSignatures(Address addr){
+ return xact_mgr.checkWriteSignatures(addr);
+ }
+
+ void setState(Address addr, State state) {
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+
+ // MUST CHANGE
+ if(L1_TBEs.isPresent(addr)) {
+ L1_TBEs[addr].TBEState := state;
+ }
+
+ if (isL1CacheTagPresent(addr)) {
+ getL1CacheEntry(addr).CacheState := state;
+
+ // Set permission
+ if (state == State:I) {
+ changeL1Permission(addr, AccessPermission:Invalid);
+ } else if (state == State:S || state == State:E) {
+ changeL1Permission(addr, AccessPermission:Read_Only);
+ } else if (state == State:M) {
+ changeL1Permission(addr, AccessPermission:Read_Write);
+ } else {
+ changeL1Permission(addr, AccessPermission:Busy);
+ }
+ }
+ }
+
+ Event mandatory_request_type_to_event(CacheRequestType type) {
+ if (type == CacheRequestType:LD) {
+ return Event:Load;
+ } else if (type == CacheRequestType:LD_XACT) {
+ return Event:Load;
+ } else if (type == CacheRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
+ return Event:Store;
+ } else if((type == CacheRequestType:ST_XACT) || (type == CacheRequestType:LDX_XACT) ) {
+ return Event:Store;
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+
+
+ void printRequest(CacheMsg in_msg){
+ DEBUG_EXPR("Regquest msg: ");
+ DEBUG_EXPR(machineID);
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.PhysicalAddress);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(in_msg.ProgramCounter);
+ DEBUG_EXPR(in_msg.AccessMode);
+ DEBUG_EXPR(in_msg.Size);
+ DEBUG_EXPR(in_msg.Prefetch);
+ DEBUG_EXPR(in_msg.Version);
+ DEBUG_EXPR(in_msg.LogicalAddress);
+ DEBUG_EXPR(in_msg.ThreadID);
+ DEBUG_EXPR(in_msg.Timestamp);
+ DEBUG_EXPR(in_msg.ExposedAction);
+ }
+
+ out_port(requestIntraChipL1Network_out, RequestMsg, requestFromL1Cache);
+ out_port(responseIntraChipL1Network_out, ResponseMsg, responseFromL1Cache);
+ out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
+ if (triggerQueue_in.isReady()) {
+ peek(triggerQueue_in, TriggerMsg) {
+ if (in_msg.Type == TriggerType:ALL_ACKS) {
+ if (L1_TBEs[in_msg.Address].nack == true){
+ trigger(Event:Nack_all, in_msg.Address);
+ } else {
+ trigger(Event:Ack_all, in_msg.Address);
+ }
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Response IntraChip L1 Network - response msg to this L1 cache
+ in_port(responseIntraChipL1Network_in, ResponseMsg, responseToL1Cache) {
+ if (responseIntraChipL1Network_in.isReady()) {
+ peek(responseIntraChipL1Network_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if(in_msg.Type == CoherenceResponseType:L2_DATA_EXCLUSIVE) {
+ if( in_msg.AckCount == 0 ){
+ trigger(Event:L2_Exclusive_Data_all_Acks, in_msg.Address);
+ }
+ else{
+ trigger(Event:L2_Exclusive_Data, in_msg.Address);
+ }
+ } else if(in_msg.Type == CoherenceResponseType:L2_DATA) {
+ if( in_msg.AckCount == 0 ){
+ trigger(Event:L2_Data_all_Acks, in_msg.Address);
+ }
+ else{
+ trigger(Event:L2_Data, in_msg.Address);
+ }
+ } else if(in_msg.Type == CoherenceResponseType:DATA) {
+ if ( (getState(in_msg.Address) == State:IS || getState(in_msg.Address) == State:IS_I) &&
+ machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache ) {
+
+ trigger(Event:DataS_fromL1, in_msg.Address);
+ } else if ( (L1_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0 ) {
+ trigger(Event:Data_all_Acks, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Ack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:NACK) {
+ trigger(Event:Nack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
+ trigger(Event:WB_Ack, in_msg.Address);
+ } else {
+ error("Invalid L1 response type");
+ }
+ }
+ }
+ }
+
+ // Request InterChip network - request from this L1 cache to the shared L2
+ in_port(requestIntraChipL1Network_in, RequestMsg, requestToL1Cache) {
+ if(requestIntraChipL1Network_in.isReady()) {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceRequestType:INV) {
+ // check whether we have a inter-proc conflict
+ if(shouldNackStore(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor) == false){
+ trigger(Event:Inv, in_msg.Address);
+ }
+ else{
+ // there's a conflict
+ trigger(Event:Inv_X, in_msg.Address);
+ }
+ } else if(in_msg.Type == CoherenceRequestType:INV_ESCAPE) {
+ // we cannot NACK this
+ trigger(Event:Inv, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:UPGRADE) {
+ // check whether we have a conflict
+ if(shouldNackStore(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor) == true){
+ trigger(Event:Fwd_GETX_X, in_msg.Address);
+ }
+ else{
+ // else no conflict
+ // upgrade transforms to GETX due to race
+ trigger(Event:Fwd_GETX, in_msg.Address);
+ }
+ } else if(in_msg.Type == CoherenceRequestType:GETX_ESCAPE) {
+ // no need for filter checks
+ trigger(Event:Fwd_GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ // check whether we have a conflict
+ if(shouldNackLoad(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor) == true){
+ trigger(Event:Fwd_GETS_X, in_msg.Address);
+ }
+ else{
+ // else no conflict
+ trigger(Event:Fwd_GETS, in_msg.Address);
+ }
+ } else if(in_msg.Type == CoherenceRequestType:GETS_ESCAPE) {
+ // no need for filter checks
+ trigger(Event:Fwd_GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
+ if(shouldNackLoad(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor) == true){
+ trigger(Event:Fwd_GET_INSTR_X, in_msg.Address);
+ }
+ else{
+ // else no conflict
+ trigger(Event:Fwd_GET_INSTR, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GET_INSTR_ESCAPE) {
+ // no need for filter checks
+ trigger(Event:Fwd_GET_INSTR, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:REPLACE) {
+ trigger(Event:Replace, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:CHECK_WRITE_FILTER) {
+ trigger(Event:Check_Write_Filter, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:CHECK_READ_WRITE_FILTER) {
+ trigger(Event:Check_Read_Write_Filter, in_msg.Address);
+ } else {
+ error("Invalid forwarded request type");
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue betweens Node's CPU and it's L1 caches
+ in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady()) {
+ peek(mandatoryQueue_in, CacheMsg) {
+
+ // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
+
+ if (in_msg.Type == CacheRequestType:IFETCH) {
+ // ** INSTRUCTION ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // check whether block is transactional
+ if (checkReadWriteSignatures(in_msg.Address) == true){
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_Replacement_XACT, in_msg.Address);
+ }
+ else{
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_Replacement, in_msg.Address);
+ }
+ }
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 asks the L2 for it.
+ printRequest(in_msg);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
+ printRequest(in_msg);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ // check whether block is transactional
+ if(checkReadWriteSignatures( L1IcacheMemory.cacheProbe(in_msg.Address) ) == true){
+ // No room in the L1, so we need to make room in the L1
+ trigger(Event:L1_Replacement_XACT, L1IcacheMemory.cacheProbe(in_msg.Address));
+ }
+ else{
+ // No room in the L1, so we need to make room in the L1
+ trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+ }
+ } else {
+ // *** DATA ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // check whether block is transactional
+ if(checkReadWriteSignatures(in_msg.Address) == true){
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_Replacement_XACT, in_msg.Address);
+ }
+ else{
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_Replacement, in_msg.Address);
+ }
+ }
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 ask the L2 for it
+ printRequest(in_msg);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
+ printRequest(in_msg);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ // check whether block is transactional
+ if(checkReadWriteSignatures( L1DcacheMemory.cacheProbe(in_msg.Address) ) == true){
+ // No room in the L1, so we need to make room in the L1
+ trigger(Event:L1_Replacement_XACT, L1DcacheMemory.cacheProbe(in_msg.Address));
+ }
+ else{
+ // No room in the L1, so we need to make room in the L1
+ trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+ action(a_issueGETS, "a", desc="Issue GETS") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ if(in_msg.ExposedAction){
+ out_msg.Type := CoherenceRequestType:GETS_ESCAPE;
+ }
+ else{
+ out_msg.Type := CoherenceRequestType:GETS;
+ }
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ // either return transactional timestamp or current time
+ out_msg.Timestamp := in_msg.Timestamp;
+ APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
+ }
+ }
+ }
+
+ action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ if(in_msg.ExposedAction){
+ out_msg.Type := CoherenceRequestType:GET_INSTR_ESCAPE;
+ }
+ else{
+ out_msg.Type := CoherenceRequestType:GET_INSTR;
+ }
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ // either return transactional timestamp or current time
+ out_msg.Timestamp := in_msg.Timestamp;
+ APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
+ }
+ }
+ }
+
+
+ action(b_issueGETX, "b", desc="Issue GETX") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ if(in_msg.ExposedAction){
+ out_msg.Type := CoherenceRequestType:GETX_ESCAPE;
+ }
+ else{
+ out_msg.Type := CoherenceRequestType:GETX;
+ }
+ out_msg.Requestor := machineID;
+ DEBUG_EXPR(machineID);
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ // either return transactional timestamp or current time
+ out_msg.Timestamp := in_msg.Timestamp;
+ APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
+ }
+ }
+ }
+
+ action(c_issueUPGRADE, "c", desc="Issue GETX") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceRequestType:UPGRADE;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ // either return transactional timestamp or current time
+ out_msg.Timestamp := in_msg.Timestamp;
+ APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
+ }
+ }
+ }
+
+ /****************************BEGIN Transactional Actions*************************/
+ // send a NACK to requestor - the equivalent of a NACKed data response
+ // Note we don't have to track the ackCount here because we only send data NACKs when
+ // we are exclusive with the data. Otherwise the L2 will source the data (and set the ackCount
+ // appropriately)
+ action(e_sendNackToRequestor, "en", desc="send nack to requestor (could be L2 or L1)") {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceResponseType:NACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // send oldest timestamp (or current time if no thread in transaction)
+ out_msg.Timestamp := xact_mgr.getOldestTimestamp();
+ APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ // ackCount is by default 0
+ }
+ // also inform driver about sending NACK
+ xact_mgr.notifySendNack(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor);
+ }
+ }
+
+ // send a NACK when L2 wants us to invalidate ourselves
+ action(fi_sendInvNack, "fin", desc="send data to the L2 cache") {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceResponseType:NACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // send oldest timestamp (or current time if no thread in transaction)
+ out_msg.Timestamp := xact_mgr.getOldestTimestamp();
+ out_msg.AckCount := 1;
+ APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ }
+ // also inform driver about sending NACK
+ xact_mgr.notifySendNack(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor);
+ }
+ }
+
+ // for when we want to check our Write filters
+ action(a_checkWriteFilter, "awf", desc="Check our write filter for conflicts") {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ // For correct conflict detection, should call shouldNackLoad() NOT
+ // checkWriteSignatures()
+ if(shouldNackLoad(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor) == true){
+ // conflict - send a NACK
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceResponseType:NACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // send oldest timestamp (or current time if no thread in transaction)
+ out_msg.Timestamp := xact_mgr.getOldestTimestamp();
+ out_msg.AckCount := 1;
+ APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ }
+ // also inform driver about sending NACK
+ xact_mgr.notifySendNack(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor);
+ }
+ else{
+ // no conflict - send ACK
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.AckCount := 1;
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ }
+ }
+ }
+ }
+
+ // for when we want to check our Read + Write filters
+ action(a_checkReadWriteFilter, "arwf", desc="Check our write filter for conflicts") {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ // For correct conflict detection, we should call shouldNackStore() NOT
+ // checkReadWriteSignatures()
+ if(shouldNackStore(in_msg.PhysicalAddress, in_msg.Timestamp,in_msg.Requestor ) == true){
+ // conflict - send a NACK
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceResponseType:NACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // send oldest timestamp (or current time if no thread in transaction)
+ out_msg.Timestamp := xact_mgr.getOldestTimestamp();
+ out_msg.AckCount := 1;
+ APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ }
+ // also inform driver about sending NACK
+ xact_mgr.notifySendNack(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor);
+ }
+ else{
+ // no conflict - send ACK
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.AckCount := 1;
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ }
+ }
+ }
+ }
+
+ action(r_notifyReceiveNack, "nrn", desc="Notify the driver when a nack is received"){
+ peek(responseIntraChipL1Network_in, ResponseMsg) {
+ xact_mgr.notifyReceiveNack(L1_TBEs[address].ThreadID, in_msg.PhysicalAddress, L1_TBEs[address].Timestamp, in_msg.Timestamp, in_msg.Sender);
+ }
+ }
+
+ // Used to driver to take abort or retry action
+ action(r_notifyReceiveNackFinal, "nrnf", desc="Notify the driver when the final nack is received"){
+ xact_mgr.notifyReceiveNackFinal(L1_TBEs[address].ThreadID, L1_TBEs[address].PhysicalAddress);
+ }
+
+ // this version uses physical address stored in TBE
+
+ action(x_tbeSetPrefetch, "xp", desc="Set the prefetch bit in the TBE."){
+ peek(mandatoryQueue_in, CacheMsg) {
+ if(in_msg.Prefetch == PrefetchBit:No){
+ L1_TBEs[address].isPrefetch := false;
+ }
+ else{
+ assert(in_msg.Prefetch == PrefetchBit:Yes);
+ L1_TBEs[address].isPrefetch := true;
+ }
+ }
+ }
+
+ action(x_tbeSetPhysicalAddress, "ia", desc="Sets the physical address field of the TBE"){
+ peek(mandatoryQueue_in, CacheMsg) {
+ L1_TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
+ L1_TBEs[address].ThreadID := in_msg.ThreadID;
+ L1_TBEs[address].Timestamp := in_msg.Timestamp;
+ }
+ }
+
+ // Send unblock cancel to L2 (for nacked requests that blocked directory)
+ action(jj_sendUnblockCancel, "\jc", desc="send unblock to the L2 cache") {
+ enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK_CANCEL;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // also pass along list of NACKers
+ out_msg.Nackers := L1_TBEs[address].Nackers;
+ }
+ }
+
+ //same as ACK case, but sets the NACK flag for TBE entry
+ action(q_updateNackCount, "qn", desc="Update ack count") {
+ peek(responseIntraChipL1Network_in, ResponseMsg) {
+ // mark this request as having been NACKed
+ L1_TBEs[address].nack := true;
+ APPEND_TRANSITION_COMMENT(" before pendingAcks: ");
+ APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
+ L1_TBEs[address].pendingAcks := L1_TBEs[address].pendingAcks - in_msg.AckCount;
+ L1_TBEs[address].Nackers.add(in_msg.Sender);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.AckCount);
+
+ APPEND_TRANSITION_COMMENT(" after pendingAcks: ");
+ APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
+ APPEND_TRANSITION_COMMENT(" sender: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Sender);
+ if (L1_TBEs[address].pendingAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := TriggerType:ALL_ACKS;
+ APPEND_TRANSITION_COMMENT(" Triggering All_Acks");
+ }
+ }
+ }
+ }
+
+ action(q_profileOverflow, "po", desc="profile the overflowed block"){
+ profileOverflow(address, machineID);
+ }
+
+ action(qq_xactReplacement, "\q", desc="replaced a transactional block"){
+ xact_mgr.xactReplacement(address);
+ }
+
+ action(p_profileRequest, "pcc", desc="Profile request msg") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ APPEND_TRANSITION_COMMENT(" request: Timestamp: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
+ APPEND_TRANSITION_COMMENT(" PA: ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ APPEND_TRANSITION_COMMENT(" Type: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Type);
+ APPEND_TRANSITION_COMMENT(" VPC: ");
+ APPEND_TRANSITION_COMMENT(in_msg.ProgramCounter);
+ APPEND_TRANSITION_COMMENT(" Mode: ");
+ APPEND_TRANSITION_COMMENT(in_msg.AccessMode);
+ APPEND_TRANSITION_COMMENT(" PF: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Prefetch);
+ APPEND_TRANSITION_COMMENT(" VA: ");
+ APPEND_TRANSITION_COMMENT(in_msg.LogicalAddress);
+ APPEND_TRANSITION_COMMENT(" Thread: ");
+ APPEND_TRANSITION_COMMENT(in_msg.ThreadID);
+ APPEND_TRANSITION_COMMENT(" Exposed: ");
+ APPEND_TRANSITION_COMMENT(in_msg.ExposedAction);
+ }
+ }
+
+ /********************************END Transactional Actions************************/
+
+ action(d_sendDataToRequestor, "d", desc="send data to requestor") {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL1CacheEntry(address).Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL1CacheEntry(address).Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := L1_TBEs[address].DataBlk;
+ out_msg.Dirty := L1_TBEs[address].Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.RemoveLastOwnerFromDir := true;
+ out_msg.LastOwnerID := machineID;
+ }
+ }
+ }
+
+ action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := L1_TBEs[address].DataBlk;
+ out_msg.Dirty := L1_TBEs[address].Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL1CacheEntry(address).Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ }
+ }
+
+ action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := L1_TBEs[address].DataBlk;
+ out_msg.Dirty := L1_TBEs[address].Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ }
+ }
+
+ action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.AckCount := 1;
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ }
+ }
+ }
+
+
+ action(g_issuePUTX, "g", desc="send data to the L2 cache") {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL1CacheEntry(address).Dirty;
+ out_msg.Requestor:= machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ if (getL1CacheEntry(address).Dirty) {
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(g_issuePUTS, "gs", desc="send clean data to the L2 cache") {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTS;
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL1CacheEntry(address).Dirty;
+ out_msg.Requestor:= machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ if (getL1CacheEntry(address).Dirty) {
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ // used to determine whether to set sticky-M or sticky-S state in directory (M or SS in L2)
+ action(g_issuePUTXorPUTS, "gxs", desc="send data to the L2 cache") {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ if(checkWriteSignatures(address) == true){
+ // we should set sticky-M
+ out_msg.Type := CoherenceRequestType:PUTX;
+ }
+ else{
+ // we should set sticky-S
+ out_msg.Type := CoherenceRequestType:PUTS;
+ }
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL1CacheEntry(address).Dirty;
+ out_msg.Requestor:= machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ if (getL1CacheEntry(address).Dirty) {
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
+ enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // inform L2 whether request was transactional
+ //out_msg.Transactional := L1_TBEs[address].Trans;
+ out_msg.Transactional := checkReadWriteSignatures(address);
+
+ out_msg.RemoveLastOwnerFromDir := L1_TBEs[address].RemoveLastOwnerFromDir;
+ out_msg.LastOwnerID := L1_TBEs[address].LastOwnerID;
+ }
+ }
+
+ action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
+ enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // inform L2 whether request was transactional
+ // out_msg.Transactional := L1_TBEs[address].Trans;
+ out_msg.Transactional := checkReadWriteSignatures(address);
+
+ out_msg.RemoveLastOwnerFromDir := L1_TBEs[address].RemoveLastOwnerFromDir;
+ out_msg.LastOwnerID := L1_TBEs[address].LastOwnerID;
+ }
+ }
+
+
+
+ action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
+ DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
+ sequencer.readCallback(address, getL1CacheEntry(address).DataBlk);
+ }
+
+ action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
+ DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
+ sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk);
+ getL1CacheEntry(address).Dirty := true;
+ }
+
+ action(h_load_conflict, "hc", desc="Notify sequencer of conflict on load") {
+ sequencer.readConflictCallback(address);
+ }
+
+ action(hh_store_conflict, "\hc", desc="If not prefetch, notify sequencer that store completed.") {
+ sequencer.writeConflictCallback(address);
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
+ check_allocate(L1_TBEs);
+ L1_TBEs.allocate(address);
+ L1_TBEs[address].isPrefetch := false;
+ L1_TBEs[address].Dirty := getL1CacheEntry(address).Dirty;
+ L1_TBEs[address].DataBlk := getL1CacheEntry(address).DataBlk;
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue();
+ }
+
+ action(j_popTriggerQueue, "jp", desc="Pop trigger queue.") {
+ triggerQueue_in.dequeue();
+ }
+
+ action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") {
+ profileMsgDelay(2, requestIntraChipL1Network_in.dequeue_getDelayCycles());
+ }
+
+ action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") {
+ profileMsgDelay(3, responseIntraChipL1Network_in.dequeue_getDelayCycles());
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+ L1_TBEs.deallocate(address);
+ }
+
+ action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
+ peek(responseIntraChipL1Network_in, ResponseMsg) {
+ getL1CacheEntry(address).DataBlk := in_msg.DataBlk;
+ getL1CacheEntry(address).Dirty := in_msg.Dirty;
+ if (machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
+ L1_TBEs[address].RemoveLastOwnerFromDir := in_msg.RemoveLastOwnerFromDir;
+ L1_TBEs[address].LastOwnerID := in_msg.LastOwnerID;
+ }
+ }
+ }
+
+ action(q_updateAckCount, "q", desc="Update ack count") {
+ peek(responseIntraChipL1Network_in, ResponseMsg) {
+ APPEND_TRANSITION_COMMENT(" before pendingAcks: ");
+ APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
+ L1_TBEs[address].pendingAcks := L1_TBEs[address].pendingAcks - in_msg.AckCount;
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.AckCount);
+ APPEND_TRANSITION_COMMENT(" after pendingAcks: ");
+ APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
+ APPEND_TRANSITION_COMMENT(" sender: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Sender);
+ if (L1_TBEs[address].pendingAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := TriggerType:ALL_ACKS;
+ APPEND_TRANSITION_COMMENT(" Triggering All_Acks");
+ }
+ }
+ }
+ }
+
+ action(z_stall, "z", desc="Stall") {
+ }
+
+ action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L1DcacheMemory.deallocate(address);
+ } else {
+ L1IcacheMemory.deallocate(address);
+ }
+ }
+
+ action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
+ if (L1DcacheMemory.isTagPresent(address) == false) {
+ L1DcacheMemory.allocate(address);
+ // reset trans bit
+ }
+ }
+
+ action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
+ if (L1IcacheMemory.isTagPresent(address) == false) {
+ L1IcacheMemory.allocate(address);
+ // reset trans bit
+ }
+ }
+
+ action(zz_recycleRequestQueue, "zz", desc="recycle L1 request queue") {
+ requestIntraChipL1Network_in.recycle();
+ }
+
+ action(z_recycleMandatoryQueue, "\z", desc="recycle L1 request queue") {
+ mandatoryQueue_in.recycle();
+ }
+
+ action(uu_profileMiss, "\u", desc="Profile the demand miss") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ profile_L1Cache_miss(in_msg, id);
+ }
+ }
+
+ action(uuu_profileTransactionLoadMiss, "\uu", desc="Profile Miss") {
+ xact_mgr.profileTransactionMiss(L1_TBEs[address].ThreadID, true);
+ }
+
+ action(uuu_profileTransactionStoreMiss, "\uuu", desc="Profile Miss") {
+ xact_mgr.profileTransactionMiss(L1_TBEs[address].ThreadID, false);
+ }
+
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // For filter responses
+ transition({NP, I, S, E, M, IS, IM, SM, IS_I, IS_S, IS_E, IM_M, M_I, E_I}, Check_Write_Filter){
+ a_checkWriteFilter;
+ l_popRequestQueue;
+ }
+
+ transition({NP, I, S, E, M, IS, IM, SM, IS_I, IS_S, IS_E, IM_M, M_I, E_I}, Check_Read_Write_Filter){
+ a_checkReadWriteFilter;
+ l_popRequestQueue;
+ }
+
+ // Transitions for Load/Store/Replacement/WriteBack from transient states
+ transition({IS, IM, IS_I, IS_S, IS_E, IM_M, M_I, E_I}, {Load, Ifetch, Store, L1_Replacement, L1_Replacement_XACT}) {
+ z_recycleMandatoryQueue;
+ }
+
+ // Transitions from Idle
+ transition({NP,I}, {L1_Replacement, L1_Replacement_XACT}) {
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition({NP,I}, Load, IS) {
+ p_profileRequest;
+ oo_allocateL1DCacheBlock;
+ i_allocateTBE;
+ x_tbeSetPrefetch;
+ x_tbeSetPhysicalAddress;
+ a_issueGETS;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP,I}, Ifetch, IS) {
+ p_profileRequest;
+ pp_allocateL1ICacheBlock;
+ i_allocateTBE;
+ x_tbeSetPhysicalAddress;
+ ai_issueGETINSTR;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP,I}, Store, IM) {
+ p_profileRequest;
+ oo_allocateL1DCacheBlock;
+ i_allocateTBE;
+ x_tbeSetPrefetch;
+ x_tbeSetPhysicalAddress;
+ b_issueGETX;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP, I}, Inv) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ // Transactional invalidates to blocks in NP or I are
+ // transactional blocks that have been silently replaced
+ // FALSE POSITIVE - can't tell whether block was never in our read/write set or was replaced
+ transition({NP, I}, Inv_X) {
+ fi_sendInvNack;
+ l_popRequestQueue;
+ }
+
+ // for L2 replacements. This happens due to our silent replacements.
+ transition({NP, I}, Replace) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ // Transitions from Shared
+ transition(S, {Load,Ifetch}) {
+ p_profileRequest;
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Store, IM) {
+ p_profileRequest;
+ i_allocateTBE;
+ x_tbeSetPrefetch;
+ x_tbeSetPhysicalAddress;
+ b_issueGETX;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, L1_Replacement, I) {
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition(S, L1_Replacement_XACT, I) {
+ q_profileOverflow;
+ qq_xactReplacement;
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition(S, Inv, I) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition(S, Inv_X) {
+ fi_sendInvNack;
+ l_popRequestQueue;
+ }
+
+ // for L2 replacements.
+ transition(S, Replace, I){
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ // Transitions from Exclusive
+
+ transition(E, {Load, Ifetch}) {
+ p_profileRequest;
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(E, Store, M) {
+ p_profileRequest;
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(E, L1_Replacement, M_I) {
+ // The data is clean
+ i_allocateTBE;
+ g_issuePUTX; // send data, but hold in case forwarded request
+ ff_deallocateL1CacheBlock;
+ }
+
+ // we can't go to M_I here because we need to maintain transactional read isolation on this line, and M_I allows GETS and GETXs to
+ // be serviced. For correctness we need to make sure we are marked as a transactional reader (if we never read transactionally written data back exclusively) or transactional writer
+ transition(E, L1_Replacement_XACT, E_I) {
+ q_profileOverflow;
+ qq_xactReplacement;
+ // The data is clean
+ i_allocateTBE;
+ g_issuePUTXorPUTS; // send data and hold, but do not release on forwarded requests
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition(E, Inv, I) {
+ // don't send data
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition(E, Inv_X){
+ fi_sendInvNack;
+ l_popRequestQueue;
+ }
+
+ // for L2 replacements
+ transition(E, Replace, I) {
+ // don't send data
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition(E, Fwd_GETX, I) {
+ d_sendDataToRequestor;
+ l_popRequestQueue;
+ }
+
+ transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
+ d_sendDataToRequestor;
+ d2_sendDataToL2;
+ l_popRequestQueue;
+ }
+
+ // If we see Fwd_GETS_X this is a FALSE POSITIVE, since we never
+ // modified this block
+ transition(E, {Fwd_GETS_X, Fwd_GETX_X, Fwd_GET_INSTR_X}){
+ // send NACK instead of data
+ e_sendNackToRequestor;
+ l_popRequestQueue;
+ }
+
+ // Transitions from Modified
+ transition(M, {Load, Ifetch}) {
+ p_profileRequest;
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Store) {
+ p_profileRequest;
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, L1_Replacement, M_I) {
+ i_allocateTBE;
+ g_issuePUTX; // send data, but hold in case forwarded request
+ ff_deallocateL1CacheBlock;
+ }
+
+ // in order to prevent releasing isolation of transactional data (either written to just read) we need to
+ // mark ourselves as a transactional reader (e.g. SS state in L2) or transactional writer (e.g. M state in L2). We need to transition to the same E_I
+ // state as for transactional replacements from E state, and ignore all requests.
+ transition(M, L1_Replacement_XACT, E_I) {
+ q_profileOverflow;
+ qq_xactReplacement;
+ i_allocateTBE;
+ g_issuePUTXorPUTS; // send data, but do not release on forwarded requests
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition({M_I, E_I}, WB_Ack, I) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(M, Inv, I) {
+ f_sendDataToL2;
+ l_popRequestQueue;
+ }
+
+ // for L2 replacement
+ transition(M, Replace, I) {
+ f_sendDataToL2;
+ l_popRequestQueue;
+ }
+
+ transition(M, Inv_X){
+ fi_sendInvNack;
+ l_popRequestQueue;
+ }
+
+ transition(E_I, Inv) {
+ // ack requestor's GETX, but wait for WB_Ack from L2
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ // maintain isolation on M or E replacements
+ // took out M_I, since L2 transitions to M upon PUTX, and we should no longer receives invalidates
+ transition(E_I, Inv_X) {
+ fi_sendInvNack;
+ l_popRequestQueue;
+ }
+
+ // allow L2 to get data while we replace
+ transition({M_I, E_I}, Replace, I) {
+ ft_sendDataToL2_fromTBE;
+ s_deallocateTBE;
+ l_popRequestQueue;
+ }
+
+ transition(M, Fwd_GETX, I) {
+ d_sendDataToRequestor;
+ l_popRequestQueue;
+ }
+
+ transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
+ d_sendDataToRequestor;
+ d2_sendDataToL2;
+ l_popRequestQueue;
+ }
+
+ transition(M, {Fwd_GETX_X, Fwd_GETS_X, Fwd_GET_INSTR_X}) {
+ // send NACK instead of data
+ e_sendNackToRequestor;
+ l_popRequestQueue;
+ }
+
+ // for simplicity we ignore all other requests while we wait for L2 to receive the clean data. Otherwise we will incorrectly transfer
+ // ownership and not mark ourselves as a transactional sharer in the L2 directory
+ transition(E_I, {Fwd_GETX, Fwd_GETS, Fwd_GET_INSTR, Fwd_GETS_X, Fwd_GETX_X, Fwd_GET_INSTR_X}) {
+ // send NACK instead of data
+ e_sendNackToRequestor;
+ l_popRequestQueue;
+ }
+
+ transition(M_I, Fwd_GETX, I) {
+ dt_sendDataToRequestor_fromTBE;
+ s_deallocateTBE;
+ l_popRequestQueue;
+ }
+
+ transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, I) {
+ dt_sendDataToRequestor_fromTBE;
+ d2t_sendDataToL2_fromTBE;
+ s_deallocateTBE;
+ l_popRequestQueue;
+ }
+
+ // don't release isolation on forwarded conflicting requests
+ transition(M_I, {Fwd_GETS_X, Fwd_GETX_X, Fwd_GET_INSTR_X}) {
+ // send NACK instead of data
+ e_sendNackToRequestor;
+ l_popRequestQueue;
+ }
+
+ // Transitions from IS
+ transition({IS, IS_I}, Inv, IS_I) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ // Only possible when L2 sends us data in SS state. No conflict is possible, so no need to unblock L2
+ transition(IS, L2_Data_all_Acks, S) {
+ u_writeDataToL1Cache;
+ // unblock L2 because it blocks on GETS
+ j_sendUnblock;
+ uuu_profileTransactionLoadMiss;
+ h_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // Made the L2 block on GETS requests, so we are guaranteed to have no races with GETX
+ // We only get into this transition if the writer had to retry his GETX request that invalidated us, and L2 went back to SS
+ transition(IS_I, L2_Data_all_Acks, S) {
+ u_writeDataToL1Cache;
+ // unblock L2 because it blocks on GETS
+ j_sendUnblock;
+ uuu_profileTransactionLoadMiss;
+ h_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // for L2 replacements
+ transition({IS, IS_I}, Replace, IS_I) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ // These transitions are for when L2 sends us data, because it has exclusive copy, but L1 filter responses have not arrived
+ transition({IS, IS_I}, Ack){
+ q_updateAckCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition({IS, IS_I}, Nack) {
+ r_notifyReceiveNack;
+ q_updateNackCount;
+ o_popIncomingResponseQueue;
+ }
+
+ // IS_I also allowed because L2 Inv beat our GETS request, and now L2 is in NP state, ready to service our GETS.
+ transition({IS, IS_I}, L2_Data, IS_S) {
+ u_writeDataToL1Cache;
+ // This message carries the inverse of the ack count
+ q_updateAckCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IS_S, Ack){
+ q_updateAckCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IS_S, Nack) {
+ r_notifyReceiveNack;
+ q_updateNackCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IS_S, Ack_all, S){
+ // tell L2 we succeeded
+ j_sendUnblock;
+ uuu_profileTransactionLoadMiss;
+ h_load_hit;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+ // retry request from I
+ transition(IS_S, Nack_all, I){
+ ff_deallocateL1CacheBlock;
+ // This is also the final NACK
+ r_notifyReceiveNackFinal;
+ // tell L2 we failed
+ jj_sendUnblockCancel;
+ h_load_conflict;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+ // L2 is trying to give us exclusive data
+ // we can go to E because L2 is guaranteed to have only copy (ie no races from other L1s possible)
+ transition({IS, IS_I}, L2_Exclusive_Data, IS_E) {
+ u_writeDataToL1Cache;
+ // This message carries the inverse of the ack count
+ q_updateAckCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition({IS, IS_I}, L2_Exclusive_Data_all_Acks, E){
+ u_writeDataToL1Cache;
+ // tell L2 we succeeded
+ jj_sendExclusiveUnblock;
+ uuu_profileTransactionLoadMiss;
+ h_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IS_E, Ack){
+ q_updateAckCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IS_E, Nack) {
+ r_notifyReceiveNack;
+ q_updateNackCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IS_E, Ack_all, E){
+ // tell L2 we succeeded
+ jj_sendExclusiveUnblock;
+ uuu_profileTransactionLoadMiss;
+ h_load_hit;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+ // retry request from I
+ transition(IS_E, Nack_all, I){
+ ff_deallocateL1CacheBlock;
+ // This is also the final NACK
+ r_notifyReceiveNackFinal;
+ // need to tell L2 we failed
+ jj_sendUnblockCancel;
+ h_load_conflict;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+ // Normal case - when L2 doesn't have exclusive line, but L1 has line.
+ // We got NACKed . Try again in state I
+ // IMPORTANT: filters are NOT checked when L2 is in SS, because nobody has modified the line.
+ // For this transition we only receive NACKs from the exclusive writer
+ transition({IS, IS_I}, Nack_all, I) {
+ // This is also the final NACK
+ r_notifyReceiveNackFinal;
+ // L2 is blocked when L1 is exclusive
+ jj_sendUnblockCancel;
+ h_load_conflict;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+ transition({IS, IS_I}, Inv_X) {
+ fi_sendInvNack;
+ l_popRequestQueue;
+ }
+
+ transition(IS, DataS_fromL1, S) {
+ u_writeDataToL1Cache;
+ j_sendUnblock;
+ uuu_profileTransactionLoadMiss;
+ h_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // This occurs when there is a race between our GETS and another L1's GETX, and the GETX wins
+ // The L2 is now blocked because our request was forwarded to exclusive L1 (ie MT_IIB)
+ transition(IS_I, DataS_fromL1, S) {
+ u_writeDataToL1Cache;
+ j_sendUnblock;
+ uuu_profileTransactionLoadMiss;
+ h_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // Transitions from IM
+ transition({IM, SM}, Inv, IM) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition({IM, SM}, Inv_X) {
+ fi_sendInvNack;
+ l_popRequestQueue;
+ }
+
+ // for L2 replacements
+ transition({IM, SM}, Replace, IM) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ // only possible when L1 exclusive sends us the line
+ transition(IM, Data_all_Acks, M) {
+ u_writeDataToL1Cache;
+ jj_sendExclusiveUnblock;
+ uuu_profileTransactionStoreMiss;
+ hh_store_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // L2 is trying to give us data
+ // Don't go to SM because we do not want a S copy on failure. This might cause conflicts for older writers that
+ // nacked us.
+ transition(IM, L2_Data, IM_M) {
+ u_writeDataToL1Cache;
+ // This message carries the inverse of the ack count
+ q_updateAckCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IM, L2_Data_all_Acks, M){
+ u_writeDataToL1Cache;
+ // tell L2 we succeeded
+ jj_sendExclusiveUnblock;
+ uuu_profileTransactionStoreMiss;
+ hh_store_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IM_M, Ack){
+ q_updateAckCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IM_M, Nack) {
+ r_notifyReceiveNack;
+ q_updateNackCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IM_M, Ack_all, M){
+ // tell L2 we succeeded
+ jj_sendExclusiveUnblock;
+ uuu_profileTransactionStoreMiss;
+ hh_store_hit;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+ // retry request from I
+ transition(IM_M, Nack_all, I){
+ ff_deallocateL1CacheBlock;
+ // This is also the final NACK
+ r_notifyReceiveNackFinal;
+ // need to tell L2 we failed
+ jj_sendUnblockCancel;
+ hh_store_conflict;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+ // transitions from SM
+ transition({SM, IM}, Ack) {
+ q_updateAckCount;
+ o_popIncomingResponseQueue;
+ }
+
+ // instead of Data we receive Nacks
+ transition({SM, IM}, Nack) {
+ r_notifyReceiveNack;
+ // mark this request as being NACKed
+ q_updateNackCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(SM, Ack_all, M) {
+ jj_sendExclusiveUnblock;
+ uuu_profileTransactionStoreMiss;
+ hh_store_hit;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+ // retry in state S
+ transition(SM, Nack_all, S){
+ // This is the final nack
+ r_notifyReceiveNackFinal;
+ // unblock the L2
+ jj_sendUnblockCancel;
+ hh_store_conflict;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+ // retry in state I
+ transition(IM, Nack_all, I){
+ // This is the final NACK
+ r_notifyReceiveNackFinal;
+ // unblock the L2
+ jj_sendUnblockCancel;
+ hh_store_conflict;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+}
+
+
+
diff --git a/src/mem/protocol/MESI_CMP_filter_directory-L2cache.sm b/src/mem/protocol/MESI_CMP_filter_directory-L2cache.sm
new file mode 100644
index 000000000..9085ae33f
--- /dev/null
+++ b/src/mem/protocol/MESI_CMP_filter_directory-L2cache.sm
@@ -0,0 +1,2123 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MSI_MOSI_CMP_directory-L2cache.sm 1.12 05/01/19 15:55:40-06:00 beckmann@s0-28.cs.wisc.edu $
+ *
+ */
+
+machine(L2Cache, "MESI Directory L2 Cache CMP") {
+
+ // L2 BANK QUEUES
+ // From local bank of L2 cache TO the network
+ MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="2", ordered="false"; // this L2 bank -> Memory
+ MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false"; // this L2 bank -> a local L1
+ MessageBuffer responseFromL2Cache, network="To", virtual_network="3", ordered="false"; // this L2 bank -> a local L1 || Memory
+
+ // FROM the network to this local bank of L2 cache
+ MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="false"; // a local L1 -> this L2 bank
+ MessageBuffer responseToL2Cache, network="From", virtual_network="3", ordered="false"; // a local L1 || Memory -> this L2 bank
+ MessageBuffer unblockToL2Cache, network="From", virtual_network="4", ordered="false"; // a local L1 || Memory -> this L2 bank
+
+ // STATES
+ enumeration(State, desc="L2 Cache states", default="L2Cache_State_NP") {
+ // Base states
+ NP, desc="Not present in either cache";
+ SS, desc="L2 cache entry Shared, also present in one or more L1s";
+ M, desc="L2 cache entry Modified, not present in any L1s", format="!b";
+ MT, desc="L2 cache entry Modified in a local L1, assume L2 copy stale", format="!b";
+
+ // L2 replacement
+ M_I, desc="L2 cache replacing, have all acks, sent dirty data to memory, waiting for ACK from memory";
+ MT_I, desc="L2 cache replacing, getting data from exclusive";
+ MCT_I, desc="L2 cache replacing, clean in L2, getting data or ack from exclusive";
+ I_I, desc="L2 replacing clean data, need to inv sharers and then drop data";
+ S_I, desc="L2 replacing dirty data, collecting acks from L1s";
+
+ // Transient States for fetching data from memory
+ ISS, desc="L2 idle, got single L1_GETS, issued memory fetch, have not seen response yet";
+ IS, desc="L2 idle, got L1_GET_INSTR or multiple L1_GETS, issued memory fetch, have not seen response yet";
+ IM, desc="L2 idle, got L1_GETX, issued memory fetch, have not seen response(s) yet";
+
+ // Blocking states
+ SS_MB, desc="Blocked for L1_GETX from SS";
+ SS_SSB, desc="Blocked for L1_GETS from SS";
+ MT_MB, desc="Blocked for L1_GETX from MT";
+ M_MB, desc="Blocked for L1_GETX from M";
+ ISS_MB, desc="Blocked for L1_GETS or L1_GETX from NP, received Mem Data";
+ IS_SSB, desc="Blocked for L1_GET_INSTR from NP, received Mem Data";
+ M_SSB, desc="Blocked for L1_GET_INSTR from M";
+
+ MT_IIB, desc="Blocked for L1_GETS from MT, waiting for unblock and data";
+ MT_IB, desc="Blocked for L1_GETS from MT, got unblock, waiting for data";
+ MT_SB, desc="Blocked for L1_GETS from MT, got data, waiting for unblock";
+
+ // for resolving PUTX/PUTS races
+ PB_MT, desc="Going to MT, got data and unblock, waiting for PUT";
+ PB_SS, desc="Going to SS, got unblock, waiting for PUT";
+ PB_MT_IB, desc="Blocked from MT, got unblock, waiting for data and PUT";
+
+ }
+
+ // EVENTS
+ enumeration(Event, desc="L2 Cache events") {
+ // L2 events
+
+ // events initiated by the local L1s
+ L1_GET_INSTR, desc="a L1I GET INSTR request for a block maped to us";
+ L1_GET_INSTR_ESCAPE, desc="a L1I GET INSTR in an escape action request for a block mapped to us";
+ L1_GETS, desc="a L1D GETS request for a block maped to us";
+ L1_GETS_ESCAPE, desc="a L1D GETS in an escape action request for a block mapped to us";
+ L1_GETX, desc="a L1D GETX request for a block maped to us";
+ L1_GETX_ESCAPE, desc="a L1D GETX in an escape action request for a block mapped to us";
+ L1_UPGRADE, desc="a L1D GETX request for a block maped to us";
+
+ L1_PUTX, desc="L1 replacing data";
+ L1_PUTX_old, desc="L1 replacing data, but no longer sharer";
+ L1_PUTS, desc="L1 replacing clean data";
+ L1_PUTS_old, desc="L1 replacing clean data, but no longer sharer";
+ L1_PUT_PENDING, desc="L1 PUT msg pending (recycled)";
+
+ Fwd_L1_GETX, desc="L1 did not have data, so we supply";
+ Fwd_L1_GETS, desc="L1 did not have data, so we supply";
+ Fwd_L1_GET_INSTR, desc="L1 did not have data, so we supply";
+
+ // events initiated by this L2
+ L2_Replacement, desc="L2 Replacement", format="!r";
+ L2_Replacement_XACT, desc="L2 Replacement of trans. data", format="!r";
+ L2_Replacement_clean, desc="L2 Replacement, but data is clean", format="!r";
+ L2_Replacement_clean_XACT, desc="L2 Replacement of trans. data, but data is clean", format="!r";
+
+ // events from memory controller
+ Mem_Data, desc="data from memory", format="!r";
+ Mem_Ack, desc="ack from memory", format="!r";
+
+ // M->S data writeback
+ WB_Data, desc="data from L1";
+ WB_Data_clean, desc="clean data from L1";
+ Ack, desc="writeback ack";
+ Ack_all, desc="writeback ack";
+ // For transactional memory
+ Nack, desc="filter indicates conflict";
+ Nack_all, desc="all filters have responded, at least one conflict";
+
+ Unblock, desc="Unblock from L1 requestor";
+ Unblock_Cancel, desc="Unblock from L1 requestor (FOR XACT MEMORY)";
+ Exclusive_Unblock, desc="Unblock from L1 requestor";
+
+ Unblock_WaitPUTold, desc="Unblock from L1 requestor, last requestor was replacing so wait for PUT msg";
+ Exclusive_Unblock_WaitPUTold, desc="Unblock from L1 requestor, last requestor was replacing so wait for PUT msg";
+
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ NetDest Sharers, desc="tracks the L1 shares on-chip";
+ MachineID Exclusive, desc="Exclusive holder of block";
+ DataBlock DataBlk, desc="data for the block";
+ bool Dirty, default="false", desc="data is dirty";
+
+ bool Trans, desc="dummy bit for debugging";
+ bool Read, desc="LogTM R bit";
+ bool Write, desc="LogTM W bit";
+ bool L2Miss, desc="Was this block sourced from memory";
+ int L1PutsPending, default="0", desc="how many PUT_ are pending for this entry (being recyled)";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Address Address, desc="Line address for this TBE";
+ Address PhysicalAddress, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ bool Dirty, default="false", desc="Data is Dirty";
+
+ NetDest L1_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
+ MachineID L1_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
+ bool isPrefetch, desc="Set if this was caused by a prefetch";
+
+ int pendingAcks, desc="number of pending acks for invalidates during writeback";
+ bool nack, default="false", desc="has this request been NACKed?";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ void setMRU(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
+
+ CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)';
+
+ // inclusive cache, returns L2 entries only
+ Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
+ return L2cacheMemory[addr];
+ }
+
+ void changeL2Permission(Address addr, AccessPermission permission) {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory.changePermission(addr, permission);
+ }
+ }
+
+ string getCoherenceRequestTypeStr(CoherenceRequestType type) {
+ return CoherenceRequestType_to_string(type);
+ }
+
+ bool isL2CacheTagPresent(Address addr) {
+ return (L2cacheMemory.isTagPresent(addr));
+ }
+
+ bool isOneSharerLeft(Address addr, MachineID requestor) {
+ assert(L2cacheMemory[addr].Sharers.isElement(requestor));
+ return (L2cacheMemory[addr].Sharers.count() == 1);
+ }
+
+ bool isSharer(Address addr, MachineID requestor) {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory[addr].Sharers.isElement(requestor);
+ } else {
+ return false;
+ }
+ }
+
+ void addSharer(Address addr, MachineID requestor) {
+ DEBUG_EXPR(machineID);
+ DEBUG_EXPR(requestor);
+ DEBUG_EXPR(addr);
+ assert(map_L1CacheMachId_to_L2Cache(addr, requestor) == machineID);
+ L2cacheMemory[addr].Sharers.add(requestor);
+ }
+
+ State getState(Address addr) {
+ if(L2_TBEs.isPresent(addr)) {
+ return L2_TBEs[addr].TBEState;
+ } else if (isL2CacheTagPresent(addr)) {
+ return getL2CacheEntry(addr).CacheState;
+ }
+ return State:NP;
+ }
+
+ string getStateStr(Address addr) {
+ return L2Cache_State_to_string(getState(addr));
+ }
+
+ // when is this called
+ void setState(Address addr, State state) {
+
+ // MUST CHANGE
+ if (L2_TBEs.isPresent(addr)) {
+ L2_TBEs[addr].TBEState := state;
+ }
+
+ if (isL2CacheTagPresent(addr)) {
+ getL2CacheEntry(addr).CacheState := state;
+
+ // Set permission
+ if (state == State:SS ) {
+ changeL2Permission(addr, AccessPermission:Read_Only);
+ } else if (state == State:M) {
+ changeL2Permission(addr, AccessPermission:Read_Write);
+ } else if (state == State:MT) {
+ changeL2Permission(addr, AccessPermission:Stale);
+ } else {
+ changeL2Permission(addr, AccessPermission:Busy);
+ }
+ }
+ }
+
+ Event L1Cache_request_type_to_event(CoherenceRequestType type, Address addr, MachineID requestor) {
+ if (L2cacheMemory.isTagPresent(addr)){ /* Present */
+ if(getL2CacheEntry(addr).L1PutsPending > 0 && /* At least one PUT pending */
+ (getL2CacheEntry(addr).CacheState == State:SS || getL2CacheEntry(addr).CacheState == State:MT || getL2CacheEntry(addr).CacheState == State:M )) { /* Base state */
+
+ /* Only allow PUTX/PUTS to go on */
+ if (type != CoherenceRequestType:PUTX &&
+ type != CoherenceRequestType:PUTS) {
+ return Event:L1_PUT_PENDING; // Don't serve any req until the wb is serviced
+ }
+ }
+ }
+ if(type == CoherenceRequestType:GETS) {
+ return Event:L1_GETS;
+ } else if(type == CoherenceRequestType:GETS_ESCAPE) {
+ return Event:L1_GETS_ESCAPE;
+ } else if(type == CoherenceRequestType:GET_INSTR) {
+ return Event:L1_GET_INSTR;
+ } else if(type == CoherenceRequestType:GET_INSTR_ESCAPE) {
+ return Event:L1_GET_INSTR_ESCAPE;
+ } else if (type == CoherenceRequestType:GETX) {
+ return Event:L1_GETX;
+ } else if(type == CoherenceRequestType:GETX_ESCAPE) {
+ return Event:L1_GETX_ESCAPE;
+ } else if (type == CoherenceRequestType:UPGRADE) {
+ if ( isL2CacheTagPresent(addr) && getL2CacheEntry(addr).Sharers.isElement(requestor) ) {
+ return Event:L1_UPGRADE;
+ } else {
+ return Event:L1_GETX;
+ }
+ } else if (type == CoherenceRequestType:PUTX) {
+ if ( isL2CacheTagPresent(addr) && getL2CacheEntry(addr).L1PutsPending > 0) {
+ getL2CacheEntry(addr).L1PutsPending := getL2CacheEntry(addr).L1PutsPending - 1;
+ DEBUG_EXPR("PUTX PutSPending ");
+ DEBUG_EXPR(getL2CacheEntry(addr).L1PutsPending);
+ }
+ if (isSharer(addr, requestor)) {
+ return Event:L1_PUTX;
+ } else {
+ return Event:L1_PUTX_old;
+ }
+ } else if (type == CoherenceRequestType:PUTS) {
+ if ( isL2CacheTagPresent(addr) && getL2CacheEntry(addr).L1PutsPending > 0) {
+ getL2CacheEntry(addr).L1PutsPending := getL2CacheEntry(addr).L1PutsPending - 1;
+ DEBUG_EXPR("PUTS PutSPending ");
+ DEBUG_EXPR(getL2CacheEntry(addr).L1PutsPending);
+ }
+ if (isSharer(addr, requestor)) {
+ return Event:L1_PUTS;
+ } else {
+ return Event:L1_PUTS_old;
+ }
+ } else {
+ DEBUG_EXPR(addr);
+ DEBUG_EXPR(type);
+ error("Invalid L1 forwarded request type");
+ }
+ }
+
+ // ** OUT_PORTS **
+
+ out_port(L1RequestIntraChipL2Network_out, RequestMsg, L1RequestFromL2Cache);
+ out_port(DirRequestIntraChipL2Network_out, RequestMsg, DirRequestFromL2Cache);
+ out_port(responseIntraChipL2Network_out, ResponseMsg, responseFromL2Cache);
+
+
+ // Response IntraChip L2 Network - response msg to this particular L2 bank
+ in_port(responseIntraChipL2Network_in, ResponseMsg, responseToL2Cache) {
+ if (responseIntraChipL2Network_in.isReady()) {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ // test wether it's from a local L1 or an off chip source
+ assert(in_msg.Destination.isElement(machineID));
+ if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
+ if(in_msg.Type == CoherenceResponseType:DATA) {
+ if (in_msg.Dirty) {
+ trigger(Event:WB_Data, in_msg.Address);
+ } else {
+ trigger(Event:WB_Data_clean, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:ACK) {
+ if ((L2_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0) {
+ // check whether any previous responses have been NACKs
+ if(L2_TBEs[in_msg.Address].nack == false) {
+ trigger(Event:Ack_all, in_msg.Address);
+ }
+ else {
+ // at least one nack received
+ trigger(Event:Nack_all, in_msg.Address);
+ }
+ } else {
+ trigger(Event:Ack, in_msg.Address);
+ }
+ // for NACKs
+ } else if (in_msg.Type == CoherenceResponseType:NACK) {
+ if ((L2_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0) {
+ trigger(Event:Nack_all, in_msg.Address);
+ } else {
+ trigger(Event:Nack, in_msg.Address);
+ }
+ } else {
+ error("unknown message type");
+ }
+
+ } else { // external message
+ if(in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
+ trigger(Event:Mem_Data, in_msg.Address); // L2 now has data and all off-chip acks
+ } else if(in_msg.Type == CoherenceResponseType:MEMORY_ACK) {
+ trigger(Event:Mem_Ack, in_msg.Address); // L2 now has data and all off-chip acks
+ } else {
+ error("unknown message type");
+ }
+ }
+ }
+ } // if not ready, do nothing
+ }
+
+ // L1 Request
+ in_port(L1RequestIntraChipL2Network_in, RequestMsg, L1RequestToL2Cache) {
+ if(L1RequestIntraChipL2Network_in.isReady()) {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ /*
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.Requestor);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(in_msg.Destination);
+ */
+ assert(machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache);
+ assert(in_msg.Destination.isElement(machineID));
+ if (L2cacheMemory.isTagPresent(in_msg.Address)) {
+ // The L2 contains the block, so proceeded with handling the request
+ trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.Requestor), in_msg.Address);
+ } else {
+ if (L2cacheMemory.cacheAvail(in_msg.Address)) {
+ // L2 does't have the line, but we have space for it in the L2
+ trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.Requestor), in_msg.Address);
+ } else {
+ // No room in the L2, so we need to make room before handling the request
+ if (L2cacheMemory[ L2cacheMemory.cacheProbe(in_msg.Address) ].Dirty ) {
+ // check whether block is transactional
+ if(L2cacheMemory[ L2cacheMemory.cacheProbe(in_msg.Address) ].Trans == true){
+ trigger(Event:L2_Replacement_XACT, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ else{
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ } else {
+ // check whether block is transactional
+ if(L2cacheMemory[ L2cacheMemory.cacheProbe(in_msg.Address) ].Trans == true){
+ trigger(Event:L2_Replacement_clean_XACT, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ else{
+ trigger(Event:L2_Replacement_clean, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache) {
+ if(L1unblockNetwork_in.isReady()) {
+ peek(L1unblockNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceResponseType:EXCLUSIVE_UNBLOCK) {
+ if (in_msg.RemoveLastOwnerFromDir == true) {
+ if (isSharer(in_msg.Address,in_msg.LastOwnerID)) {
+ trigger(Event:Exclusive_Unblock_WaitPUTold, in_msg.Address);
+ }
+ else { // PUT arrived, requestor already removed from dir
+ trigger(Event:Exclusive_Unblock, in_msg.Address);
+ }
+ }
+ else {
+ trigger(Event:Exclusive_Unblock, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
+ if (in_msg.RemoveLastOwnerFromDir == true) {
+ if (isSharer(in_msg.Address,in_msg.LastOwnerID)) {
+ trigger(Event:Unblock_WaitPUTold, in_msg.Address);
+ }
+ else { // PUT arrived, requestor already removed from dir
+ trigger(Event:Unblock, in_msg.Address);
+ }
+ }
+ else {
+ trigger(Event:Unblock, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_CANCEL) {
+ trigger(Event:Unblock_Cancel, in_msg.Address);
+ } else {
+ error("unknown unblock message");
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+
+ action(a_issueFetchToMemory, "a", desc="fetch data from memory") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+
+ action(b_forwardRequestToExclusive, "b", desc="Forward request to the exclusive L1") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(L2cacheMemory[address].Exclusive);
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ // also pass along timestamp
+ out_msg.Timestamp := in_msg.Timestamp;
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ }
+ }
+ }
+
+ action(c_exclusiveReplacement, "c", desc="Send data to memory") {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(ct_exclusiveReplacementFromTBE, "ct", desc="Send data to memory") {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.DataBlk := L2_TBEs[address].DataBlk;
+ out_msg.Dirty := L2_TBEs[address].Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+
+ //************Transactional memory actions **************
+ //broadcast a write filter lookup request to all L1s except for the requestor
+ action(a_checkL1WriteFiltersExceptRequestor, "wr", desc="Broadcast a Write Filter lookup request"){
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceRequestType:CHECK_WRITE_FILTER;
+ // make L1s forward responses to requestor
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination := getLocalL1IDs(machineID);
+ // don't broadcast to requestor
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // also pass along timestamp of requestor
+ out_msg.Timestamp := in_msg.Timestamp;
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ }
+ }
+ }
+
+ //broadcast a read + write filter lookup request to all L1s except for the requestor
+ action(a_checkL1ReadWriteFiltersExceptRequestor, "rwr", desc="Broadcast a Read + Write Filter lookup request"){
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceRequestType:CHECK_READ_WRITE_FILTER;
+ // make L1 forward responses to requestor
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination := getLocalL1IDs(machineID);
+ // don't broadcast to requestor
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // also pass along timestamp of requestor
+ out_msg.Timestamp := in_msg.Timestamp;
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ }
+ }
+ }
+
+ // These are to send out filter checks to those NACKers in our sharers or exclusive ptr list
+ action(a_checkNackerL1WriteFiltersExceptRequestor, "wrn", desc="Broadcast a Write Filter lookup request"){
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ // check if the L2 miss bit is set - if it is, send check filter requests to those in our Sharers list only
+ if(getL2CacheEntry(address).L2Miss == true){
+ // check whether we are the only sharer on the list. If so, no need to broadcast.
+ if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
+ // no filter check needed
+ APPEND_TRANSITION_COMMENT("L2 Miss: No need to check L1 write filter ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ }
+ else{
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceRequestType:CHECK_WRITE_FILTER;
+ // make L1s forward responses to requestor
+ out_msg.Requestor := in_msg.Requestor;
+ assert(getL2CacheEntry(address).Sharers.count() > 0);
+ out_msg.Destination := getL2CacheEntry(address).Sharers;
+ // don't broadcast to requestor
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // also pass along timestamp of requestor
+ out_msg.Timestamp := in_msg.Timestamp;
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ APPEND_TRANSITION_COMMENT(" requestor: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" dest: ");
+ APPEND_TRANSITION_COMMENT(out_msg.Destination);
+ }
+ }
+ }
+ else{
+ // This is a read request, so check whether we have a writer
+ if(getL2CacheEntry(address).Sharers.count() == 0 && getL2CacheEntry(address).Exclusive != in_msg.Requestor){
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
+ // we have a writer, and it is not us
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceRequestType:CHECK_WRITE_FILTER;
+ // make L1s forward responses to requestor
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(getL2CacheEntry(address).Exclusive);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // also pass along timestamp of requestor
+ out_msg.Timestamp := in_msg.Timestamp;
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ APPEND_TRANSITION_COMMENT(" requestor: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" dest: ");
+ APPEND_TRANSITION_COMMENT(out_msg.Destination);
+ }
+ }
+ else{
+ APPEND_TRANSITION_COMMENT("L1 replacement: No need to check L1 write filter");
+ APPEND_TRANSITION_COMMENT(" requestor: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" exclusive: ");
+ APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
+ // we should not have any sharers
+ assert( getL2CacheEntry(address).Sharers.count() == 0 );
+ }
+ }
+ }
+ }
+
+ action(a_checkNackerL1ReadWriteFiltersExceptRequestor, "wrrn", desc="Broadcast a Read + Write Filter lookup request"){
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ // check if the L2 miss bit is set - if it is, send check filter requests to those in our Sharers list only
+ if(getL2CacheEntry(address).L2Miss == true){
+ // check whether we are the only sharer on the list. If so, no need to broadcast.
+ if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
+ // no filter check needed
+ APPEND_TRANSITION_COMMENT("L2 Miss: No need to check L1 read/write filter");
+ APPEND_TRANSITION_COMMENT(" requestor: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ }
+ else{
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceRequestType:CHECK_READ_WRITE_FILTER;
+ // make L1s forward responses to requestor
+ out_msg.Requestor := in_msg.Requestor;
+ assert(getL2CacheEntry(address).Sharers.count() > 0);
+ out_msg.Destination := getL2CacheEntry(address).Sharers;
+ // don't broadcast to requestor
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // also pass along timestamp of requestor
+ out_msg.Timestamp := in_msg.Timestamp;
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ APPEND_TRANSITION_COMMENT(" requestor: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" dest: ");
+ APPEND_TRANSITION_COMMENT(out_msg.Destination);
+ }
+ }
+ }
+ else{
+ // This is a write request, so check whether we have readers not including us or a writer that is not us
+ if(getL2CacheEntry(address).Sharers.count() == 0 && getL2CacheEntry(address).Exclusive != in_msg.Requestor){
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
+ // we have a writer, and it is not us
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceRequestType:CHECK_READ_WRITE_FILTER;
+ // make L1s forward responses to requestor
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(getL2CacheEntry(address).Exclusive);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // also pass along timestamp of requestor
+ out_msg.Timestamp := in_msg.Timestamp;
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ APPEND_TRANSITION_COMMENT(" requestor: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" dest: ");
+ APPEND_TRANSITION_COMMENT(out_msg.Destination);
+ }
+ }
+ else if(getL2CacheEntry(address).Sharers.count() > 0){
+ // this should never happen - since we allow silent S replacements but we always track exclusive L1
+ assert(false);
+ if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
+ // no filter check needed
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" L1 replacement: No need to check L1 read/write filter - we are only reader");
+ }
+ else{
+ // reader(s) exist that is not us
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceRequestType:CHECK_READ_WRITE_FILTER;
+ // make L1s forward responses to requestor
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination := getL2CacheEntry(address).Sharers;
+ // don't check our own filter
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // also pass along timestamp of requestor
+ out_msg.Timestamp := in_msg.Timestamp;
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ APPEND_TRANSITION_COMMENT(" requestor: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" dest: ");
+ APPEND_TRANSITION_COMMENT(out_msg.Destination);
+ }
+ }
+ }
+ else{
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
+ APPEND_TRANSITION_COMMENT(" L1 replacement: No need to check L1 read/write filter");
+ }
+ }
+ }
+ }
+
+ // send data but force L1 requestor to wait for filter responses
+ action(f_sendDataToGetSRequestor, "f", desc="Send data from cache to reqeustor") {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := L2_TBEs[address].PhysicalAddress;
+ out_msg.Type := CoherenceResponseType:L2_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+
+ // wait for the filter responses from other L1s
+ out_msg.AckCount := 0 - (numberOfL1CachePerChip() - 1);
+ }
+ }
+
+ // send exclusive data
+ action(f_sendExclusiveDataToGetSRequestor, "fx", desc="Send data from cache to reqeustor") {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := L2_TBEs[address].PhysicalAddress;
+ out_msg.Type := CoherenceResponseType:L2_DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+
+ // wait for the filter responses from other L1s
+ out_msg.AckCount := 0 - (numberOfL1CachePerChip() - 1);
+ }
+ }
+
+ action(f_sendDataToGetXRequestor, "fxx", desc="Send data from cache to reqeustor") {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := L2_TBEs[address].PhysicalAddress;
+ out_msg.Type := CoherenceResponseType:L2_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID); // internal nodes
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+
+ // wait for the filter responses from other L1s
+ out_msg.AckCount := 0 - (numberOfL1CachePerChip() - 1);
+ }
+ }
+
+ action(f_sendDataToRequestor, "fd", desc="Send data from cache to reqeustor") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceResponseType:L2_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+
+ // different ack counts for different situations
+ if(in_msg.Type == CoherenceRequestType:GET_INSTR_ESCAPE || in_msg.Type == CoherenceRequestType:GETX_ESCAPE){
+ // no acks needed
+ out_msg.AckCount := 0;
+ }
+ else{
+
+ // ORIGINAL
+ if( false ) {
+ out_msg.AckCount := 0 - (numberOfL1CachePerChip() - 1);
+ }
+
+ else{
+ // NEW***
+ // differentiate btw read and write requests
+ if(in_msg.Type == CoherenceRequestType:GET_INSTR){
+ if(getL2CacheEntry(address).L2Miss == true){
+ // check whether we are the only sharer on the list. If so, no need to broadcast.
+ if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
+ // no filter check needed
+ APPEND_TRANSITION_COMMENT("We are only sharer");
+ out_msg.AckCount := 0;
+ }
+ else{
+ // wait for ACKs from the other NACKers
+ out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
+ if(isSharer(address, in_msg.Requestor)){
+ // don't include us
+ out_msg.AckCount := out_msg.AckCount + 1;
+ }
+ APPEND_TRANSITION_COMMENT("Nackers exist");
+ }
+ }
+ else{
+ // This is a read request, so check whether we have a writer
+ if(getL2CacheEntry(address).Sharers.count() == 0 && getL2CacheEntry(address).Exclusive != in_msg.Requestor){
+ // we have a writer and it is not us
+ out_msg.AckCount := 0 - 1;
+
+ APPEND_TRANSITION_COMMENT(" Writer exists ");
+ APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
+ }
+ else{
+ // we should have no sharers!
+ assert(getL2CacheEntry(address).Sharers.count() == 0);
+ assert(getL2CacheEntry(address).Exclusive == in_msg.Requestor);
+
+ APPEND_TRANSITION_COMMENT(" Sharers or we are writer exist, ok to read ");
+ APPEND_TRANSITION_COMMENT(" sharers: ");
+ APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
+ APPEND_TRANSITION_COMMENT(" exclusive: ");
+ APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
+ out_msg.AckCount := 0;
+ }
+ }
+ }
+ else if(in_msg.Type == CoherenceRequestType:GETX){
+ if(getL2CacheEntry(address).L2Miss == true){
+ // check whether we are the only sharer on the list. If so, no need to broadcast.
+ if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
+ // no filter check needed
+ APPEND_TRANSITION_COMMENT(" L2Miss and we are only sharer ");
+ out_msg.AckCount := 0;
+ }
+ else{
+ // nackers exist
+ out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
+ if(isSharer(address, in_msg.Requestor)){
+ // don't include us
+ out_msg.AckCount := out_msg.AckCount + 1;
+ }
+ APPEND_TRANSITION_COMMENT("Nackers exist");
+ }
+ }
+ else{
+ // This is a write request, so check whether we have readers not including us or a writer that is not us
+ if(getL2CacheEntry(address).Sharers.count() == 0 && getL2CacheEntry(address).Exclusive != in_msg.Requestor){
+ // we have a writer and it is not us
+ out_msg.AckCount := 0 - 1;
+
+ APPEND_TRANSITION_COMMENT(" Writer exists ");
+ APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
+
+ }
+ else if(getL2CacheEntry(address).Sharers.count() > 0){
+ // this shouldn't be possible - we always track exclusive owner, but allow silent S replacements
+ assert(false);
+
+ if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
+ // no filter check needed
+ APPEND_TRANSITION_COMMENT(" L1 replacement: No need to check L1 read/write filter - we are only reader");
+ out_msg.AckCount := 0;
+ }
+ else{
+ // reader(s) exist that is not us
+ out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
+ if(isSharer(address, in_msg.Requestor)){
+ // don't include us
+ out_msg.AckCount := out_msg.AckCount + 1;
+ }
+ APPEND_TRANSITION_COMMENT(" Readers exist ");
+ APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
+ }
+ }
+ else{
+ // we should always have no sharers!
+ assert(getL2CacheEntry(address).Sharers.count() == 0);
+ assert(getL2CacheEntry(address).Exclusive == in_msg.Requestor);
+
+ out_msg.AckCount := 0;
+
+ APPEND_TRANSITION_COMMENT(" sharers: ");
+ APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
+ APPEND_TRANSITION_COMMENT(" exclusive: ");
+ APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
+ APPEND_TRANSITION_COMMENT(" L1 replacement: No need to check L1 read/write filter");
+ }
+ }
+ } // for GETX
+ else{
+ // unknown request type
+ APPEND_TRANSITION_COMMENT(in_msg.Type);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ assert(false);
+ }
+ }
+ } // for original vs new code
+ APPEND_TRANSITION_COMMENT(" requestor: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" AckCount: ");
+ APPEND_TRANSITION_COMMENT(out_msg.AckCount);
+ }
+ }
+ }
+
+ action(f_sendExclusiveDataToRequestor, "fdx", desc="Send data from cache to reqeustor") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceResponseType:L2_DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+
+ // different ack counts depending on situation
+ // IMPORTANT: assuming data sent exclusively for GETS request
+ if(in_msg.Type == CoherenceRequestType:GETS_ESCAPE){
+ // no acks needed
+ out_msg.AckCount := 0;
+ }
+ else{
+
+ // ORIGINAL :
+ if( false ){
+ // request filter checks from all L1s
+ out_msg.AckCount := 0 - (numberOfL1CachePerChip() - 1);
+ }
+ else{
+ // NEW***
+ if(getL2CacheEntry(address).L2Miss == true){
+ // check whether we are the only sharer on the list. If so, no need to broadcast.
+ if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
+ // no filter check needed
+ APPEND_TRANSITION_COMMENT("We are only sharer");
+ out_msg.AckCount := 0;
+ }
+ else{
+ // wait for ACKs from the other NACKers
+ out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
+ if(isSharer(address, in_msg.Requestor)){
+ // don't include us
+ out_msg.AckCount := out_msg.AckCount + 1;
+ }
+ APPEND_TRANSITION_COMMENT("Nackers exist");
+ }
+ }
+ else{
+ // This is a read request, so check whether we have a writer
+ if(getL2CacheEntry(address).Sharers.count() == 0 && getL2CacheEntry(address).Exclusive != in_msg.Requestor){
+ // we have a writer and it is not us
+ out_msg.AckCount := 0 - 1;
+
+ APPEND_TRANSITION_COMMENT(" Writer exists ");
+ APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
+ }
+ else{
+ // we should always have no sharers!
+ APPEND_TRANSITION_COMMENT(address);
+ APPEND_TRANSITION_COMMENT(" requestor: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" sharers: ");
+ APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
+
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(" requestor: ");
+ DEBUG_EXPR(in_msg.Requestor);
+ DEBUG_EXPR(" sharers: ");
+ DEBUG_EXPR(getL2CacheEntry(address).Sharers);
+
+ assert(getL2CacheEntry(address).Sharers.count() == 0);
+ assert(getL2CacheEntry(address).Exclusive == in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" Sharers exist or we are writer, ok to read ");
+ out_msg.AckCount := 0;
+ }
+ }
+ } // for orginal vs new code
+ }
+
+ APPEND_TRANSITION_COMMENT(" requestor: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" AckCount: ");
+ APPEND_TRANSITION_COMMENT(out_msg.AckCount);
+ }
+ }
+ }
+
+ // send an accumulated ACK to requestor when we don't care about checking filters (for escape actions)
+ action(f_sendAccumulatedAckToRequestor, "faa", desc="Send ACKs to requestor") {
+ // special case: don't send ACK if uniprocessor, since we don't need it (just send data)
+ if((numberOfL1CachePerChip() - 1) > 0){
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // count all L1s except requestor
+ out_msg.AckCount := numberOfL1CachePerChip() - 1;
+ APPEND_TRANSITION_COMMENT(" Total L1s: ");
+ APPEND_TRANSITION_COMMENT(numberOfL1CachePerChip());
+ APPEND_TRANSITION_COMMENT(" Total ACKS: ");
+ APPEND_TRANSITION_COMMENT(out_msg.AckCount);
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ }
+ }
+ }
+ }
+
+ // special INV used when we receive an escape action request. Sharers cannot NACK this invalidate.
+ action(fwm_sendFwdInvEscapeToSharersMinusRequestor, "fwme", desc="invalidate sharers for request, requestor is sharer") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceRequestType:INV_ESCAPE;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination := L2cacheMemory[address].Sharers;
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ //also pass along timestamp
+ out_msg.Timestamp := in_msg.Timestamp;
+ }
+ }
+ }
+
+ action(f_profileRequestor, "prq", desc="Profiles the requestor") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ APPEND_TRANSITION_COMMENT(" requestor: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ }
+ }
+
+ // marks the L2 block as transactional if request was transactional
+ action(f_markBlockTransIfTrans, "\mbt", desc="Mark an L2 block as transactional") {
+ peek(L1unblockNetwork_in, ResponseMsg) {
+ if(in_msg.Transactional == true){
+ L2cacheMemory[address].Trans := true;
+ }
+ }
+ }
+
+ action(q_profileOverflow, "po", desc="profile the overflowed block"){
+ profileOverflow(address, machineID);
+ }
+
+ action(p_profileRequest, "pcc", desc="Profile request msg") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ APPEND_TRANSITION_COMMENT(" request: Timestamp: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
+ APPEND_TRANSITION_COMMENT(" Requestor: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" Dest: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Destination);
+ APPEND_TRANSITION_COMMENT(" PA: ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ APPEND_TRANSITION_COMMENT(" Type: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Type);
+ APPEND_TRANSITION_COMMENT(" Mode: ");
+ APPEND_TRANSITION_COMMENT(in_msg.AccessMode);
+ APPEND_TRANSITION_COMMENT(" PF: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Prefetch);
+ }
+ }
+
+ //********************************END***************************
+
+ action(d_sendDataToRequestor, "d", desc="Send data from cache to reqeustor") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceResponseType:L2_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+
+ out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
+ if (getL2CacheEntry(address).Sharers.isElement(in_msg.Requestor)) {
+ out_msg.AckCount := out_msg.AckCount + 1;
+ }
+ APPEND_TRANSITION_COMMENT(" AckCount: ");
+ APPEND_TRANSITION_COMMENT(out_msg.AckCount);
+ }
+ }
+ }
+
+ // use DATA instead of L2_DATA because L1 doesn't need to wait for acks from L1 filters in this case
+ action(ds_sendSharedDataToRequestor, "ds", desc="Send data from cache to reqeustor") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:L2_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ // no ACKS needed because no possible conflicts
+ out_msg.AckCount := 0;
+ }
+ }
+ }
+
+ action(f_sendInvToSharers, "fsi", desc="invalidate sharers for L2 replacement") {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:REPLACE;
+ out_msg.Requestor := machineID;
+ out_msg.Destination := L2cacheMemory[address].Sharers;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+
+ action(fwm_sendFwdInvToSharersMinusRequestor, "fwm", desc="invalidate sharers for request, requestor is sharer") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination := L2cacheMemory[address].Sharers;
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ //also pass along timestamp
+ out_msg.Timestamp := in_msg.Timestamp;
+ APPEND_TRANSITION_COMMENT(" Sharers: ");
+ APPEND_TRANSITION_COMMENT(L2cacheMemory[address].Sharers);
+ }
+ }
+ }
+
+ // OTHER ACTIONS
+ action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
+ check_allocate(L2_TBEs);
+ L2_TBEs.allocate(address);
+ L2_TBEs[address].L1_GetS_IDs.clear();
+ L2_TBEs[address].DataBlk := getL2CacheEntry(address).DataBlk;
+ L2_TBEs[address].Dirty := getL2CacheEntry(address).Dirty;
+ L2_TBEs[address].pendingAcks := getL2CacheEntry(address).Sharers.count();
+ }
+
+ action(i_setTBEPhysicalAddress, "ia", desc="Sets the physical address field of the TBE"){
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2_TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
+ }
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
+ L2_TBEs.deallocate(address);
+ }
+
+ action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
+ profileMsgDelay(0, L1RequestIntraChipL2Network_in.dequeue_getDelayCycles());
+ }
+
+ action(k_popUnblockQueue, "k", desc="Pop incoming unblock queue") {
+ profileMsgDelay(0, L1unblockNetwork_in.dequeue_getDelayCycles());
+ }
+
+
+ action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
+ profileMsgDelay(3, responseIntraChipL2Network_in.dequeue_getDelayCycles());
+ }
+
+
+ action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
+ getL2CacheEntry(address).Dirty := in_msg.Dirty;
+ // reset the L2 miss bit
+ getL2CacheEntry(address).L2Miss := false;
+ }
+ }
+
+ // Sets the L2Miss bit in the L2 entry - indicates data was sourced from memory
+ action(m_markL2MissBit, "mi", desc="Set the entry's L2 Miss bit") {
+ getL2CacheEntry(address).L2Miss := true;
+ }
+
+ action(m_copyNackersIntoSharers, "mn", desc="Copy the NACKers list into our sharers list") {
+ peek(L1unblockNetwork_in, ResponseMsg) {
+ assert(in_msg.Nackers.count() > 0);
+ getL2CacheEntry(address).Sharers.clear();
+ // only need to copy into sharers list if we are in special state of "multicast" filter checks
+ if(getL2CacheEntry(address).L2Miss == true){
+ getL2CacheEntry(address).Sharers := in_msg.Nackers;
+ APPEND_TRANSITION_COMMENT(" Unblocker: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Sender);
+ APPEND_TRANSITION_COMMENT(" Nackers: ");
+ APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
+ }
+ }
+ }
+
+ action(mr_writeDataToCacheFromRequest, "mr", desc="Write data from response queue to cache") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
+ getL2CacheEntry(address).Dirty := in_msg.Dirty;
+ // reset the L2 miss bit
+ getL2CacheEntry(address).L2Miss := false;
+ }
+ }
+
+ action(q_updateAck, "q", desc="update pending ack count") {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ L2_TBEs[address].pendingAcks := L2_TBEs[address].pendingAcks - in_msg.AckCount;
+ APPEND_TRANSITION_COMMENT(in_msg.AckCount);
+ APPEND_TRANSITION_COMMENT(" p: ");
+ APPEND_TRANSITION_COMMENT(L2_TBEs[address].pendingAcks);
+ }
+ }
+
+ // For transactional memory. If received NACK instead of ACK
+ action(q_updateNack, "qn", desc="update pending ack count") {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ // set flag indicating we have seen NACK
+ L2_TBEs[address].nack := true;
+ L2_TBEs[address].pendingAcks := L2_TBEs[address].pendingAcks - in_msg.AckCount;
+ APPEND_TRANSITION_COMMENT(in_msg.AckCount);
+ APPEND_TRANSITION_COMMENT(" p: ");
+ APPEND_TRANSITION_COMMENT(L2_TBEs[address].pendingAcks);
+ }
+ }
+
+ action(qq_writeDataToTBE, "\qq", desc="Write data from response queue to TBE") {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ L2_TBEs[address].DataBlk := in_msg.DataBlk;
+ L2_TBEs[address].Dirty := in_msg.Dirty;
+ }
+ }
+
+
+ action(z_stall, "z", desc="Stall") {
+ }
+
+
+ action(ss_recordGetSL1ID, "\s", desc="Record L1 GetS for load response") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2_TBEs[address].L1_GetS_IDs.add(in_msg.Requestor);
+ }
+ }
+
+ action(xx_recordGetXL1ID, "\x", desc="Record L1 GetX for store response") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2_TBEs[address].L1_GetX_ID := in_msg.Requestor;
+ }
+ }
+
+ action(set_setMRU, "\set", desc="set the MRU entry") {
+ L2cacheMemory.setMRU(address);
+ }
+
+ action(qq_allocateL2CacheBlock, "\q", desc="Set L2 cache tag equal to tag of block B.") {
+ if (L2cacheMemory.isTagPresent(address) == false) {
+ L2cacheMemory.allocate(address);
+ }
+ }
+
+ action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ L2cacheMemory.deallocate(address);
+ }
+
+ action(t_sendWBAck, "t", desc="Send writeback ACK") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_TAG_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:WB_ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(ts_sendInvAckToUpgrader, "ts", desc="Send ACK to upgrader") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_TAG_LATENCY") {
+ out_msg.Address := address;
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // upgrader doesn't get ack from itself, hence the + 1
+ out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count() + 1;
+ APPEND_TRANSITION_COMMENT(" ");
+ APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
+ }
+ }
+ }
+
+ // same as above, but send NACK instead of ACK
+ action(ts_sendInvNackToUpgrader, "tsn", desc="Send NACK to upgrader") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_TAG_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:NACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // upgrader doesn't get ack from itself, hence the + 1
+ out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count() + 1;
+ }
+ }
+ }
+
+ action(uu_profileMiss, "\u", desc="Profile the demand miss") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, L1CacheMachIDToProcessorNum(in_msg.Requestor));
+ }
+ }
+
+ action(ww_profileMissNoDir, "\w", desc="Profile this transition at the L2 because Dir won't see the request") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ // profile_request(in_msg.L1CacheStateStr, getStateStr(address), "NA", getCoherenceRequestTypeStr(in_msg.Type));
+ }
+ }
+
+
+
+ action(nn_addSharer, "\n", desc="Add L1 sharer to list") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ addSharer(address, in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT( getL2CacheEntry(address).Sharers );
+ }
+ }
+
+ action(nnu_addSharerFromUnblock, "\nu", desc="Add L1 sharer to list") {
+ peek(L1unblockNetwork_in, ResponseMsg) {
+ addSharer(address, in_msg.Sender);
+ if (in_msg.RemoveLastOwnerFromDir == true) {
+ // We do this to solve some races with PUTX
+ APPEND_TRANSITION_COMMENT("Last owner removed, it was ");
+ APPEND_TRANSITION_COMMENT(in_msg.LastOwnerID);
+ L2cacheMemory[address].Sharers.remove(in_msg.LastOwnerID);
+ assert(in_msg.LastOwnerID == L2cacheMemory[address].Exclusive);
+ }
+ }
+ }
+
+
+ action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from list") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2cacheMemory[address].Sharers.remove(in_msg.Requestor);
+ }
+ }
+
+ action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
+ L2cacheMemory[address].Sharers.clear();
+ }
+
+ action(mmu_markExclusiveFromUnblock, "\mu", desc="set the exclusive owner") {
+ peek(L1unblockNetwork_in, ResponseMsg) {
+ if (in_msg.RemoveLastOwnerFromDir == true) {
+ // We do this to solve some races with PUTX
+ APPEND_TRANSITION_COMMENT(" Last owner removed, it was ");
+ APPEND_TRANSITION_COMMENT(in_msg.LastOwnerID);
+ assert(in_msg.LastOwnerID == L2cacheMemory[address].Exclusive);
+ }
+ L2cacheMemory[address].Sharers.clear();
+ L2cacheMemory[address].Exclusive := in_msg.Sender;
+ addSharer(address, in_msg.Sender);
+ }
+ }
+
+ action(zz_recycleL1RequestQueue, "zz", desc="recycle L1 request queue") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:PUTX || in_msg.Type == CoherenceRequestType:PUTS) {
+ if (L2cacheMemory.isTagPresent(in_msg.Address)) {
+ getL2CacheEntry(in_msg.Address).L1PutsPending := getL2CacheEntry(in_msg.Address).L1PutsPending + 1;
+ DEBUG_EXPR("RECYCLE PutSPending ");
+ DEBUG_EXPR(getL2CacheEntry(in_msg.Address).L1PutsPending);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(in_msg.Requestor);
+ }
+ }
+ }
+ L1RequestIntraChipL2Network_in.recycle();
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ /* Recycle while waiting for PUT */
+ transition({PB_MT, PB_MT_IB, PB_SS}, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_GETS_ESCAPE, L1_GETX_ESCAPE, L1_GET_INSTR_ESCAPE, L2_Replacement, L2_Replacement_clean, L2_Replacement_XACT, L2_Replacement_clean_XACT}) {
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ transition({IM, IS, ISS, SS_MB, M_MB, ISS_MB, IS_SSB, MT_MB, MT_IIB, MT_IB, MT_SB, M_SSB, SS_SSB},
+ {L2_Replacement, L2_Replacement_clean, L2_Replacement_XACT, L2_Replacement_clean_XACT}) {
+ zz_recycleL1RequestQueue;
+ }
+
+ transition({SS_MB, M_MB, ISS_MB, IS_SSB, MT_MB, MT_IIB, MT_IB, MT_SB, M_SSB, SS_SSB},
+ {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_GETS_ESCAPE, L1_GETX_ESCAPE, L1_GET_INSTR_ESCAPE}) {
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ transition({NP, SS, M, M_I, MT_I, MCT_I, I_I, S_I, ISS, IS, IM, /*SS_MB,*/ SS_SSB, /* MT_MB, M_MB, ISS_MB,*/ IS_SSB, M_SSB, /*MT_IIB, */MT_IB/*, MT_SB*/}, {L1_PUTX,L1_PUTS}) {
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ // occurs when L2 replacement raced with L1 replacement, and L2 finished its replacement first
+ transition({NP, M_I, MCT_I, I_I, S_I, IS, ISS, IM, SS, M, MT, IS_SSB, MT_IB, M_SSB, SS_SSB}, {L1_PUTX_old, L1_PUTS_old}){
+ f_profileRequestor;
+ jj_popL1RequestQueue;
+ }
+
+ // PUT from current (last) exclusive owner, that was replacing the line when it received Fwd req
+ transition(MT_I, {L1_PUTX_old, L1_PUTS_old}) {
+ f_profileRequestor;
+ jj_popL1RequestQueue;
+ }
+
+ transition({SS, M, MT}, {L1_PUT_PENDING}) { // L1_PUT_ msg pending for the block, don't accept new requests until PUT is processed */
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ //===============================================
+ // BASE STATE - I
+
+ // Transitions from I (Idle)
+
+ // When L2 doesn't have block, need to send broadcasst to all L1s to check appropriate filter(s)
+ transition(NP, L1_GETS, ISS) {
+ p_profileRequest;
+ f_profileRequestor;
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ // will mark as exclusive when we get unblocked with success
+ //nn_addSharer;
+ i_allocateTBE;
+ i_setTBEPhysicalAddress;
+ ss_recordGetSL1ID;
+ a_issueFetchToMemory;
+ // for correctness we need to query both read + write filters
+ a_checkL1ReadWriteFiltersExceptRequestor;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ // no need to check filters, send accumulated ACK to requestor
+ transition(NP, L1_GETS_ESCAPE, ISS) {
+ p_profileRequest;
+ f_profileRequestor;
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ // will mark as exclusive when we get unblocked with success
+ //nn_addSharer;
+ i_allocateTBE;
+ i_setTBEPhysicalAddress;
+ ss_recordGetSL1ID;
+ a_issueFetchToMemory;
+ // send accumulated ACK
+ f_sendAccumulatedAckToRequestor;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition(NP, L1_GET_INSTR, IS) {
+ p_profileRequest;
+ f_profileRequestor;
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ nn_addSharer;
+ i_allocateTBE;
+ i_setTBEPhysicalAddress;
+ ss_recordGetSL1ID;
+ a_issueFetchToMemory;
+ // for correctness query the read + write filters
+ a_checkL1ReadWriteFiltersExceptRequestor;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ // no need to query filters, send accumluated ACK to requestor
+ transition(NP, L1_GET_INSTR_ESCAPE, IS) {
+ p_profileRequest;
+ f_profileRequestor;
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ nn_addSharer;
+ i_allocateTBE;
+ i_setTBEPhysicalAddress;
+ ss_recordGetSL1ID;
+ a_issueFetchToMemory;
+ // send accumulated ACK
+ f_sendAccumulatedAckToRequestor;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition(NP, L1_GETX, IM) {
+ p_profileRequest;
+ f_profileRequestor;
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ // nn_addSharer;
+ i_allocateTBE;
+ i_setTBEPhysicalAddress;
+ xx_recordGetXL1ID;
+ a_issueFetchToMemory;
+ // also query the L1 write and read filters
+ a_checkL1ReadWriteFiltersExceptRequestor;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ // don't check filters
+ transition(NP, L1_GETX_ESCAPE, IM) {
+ p_profileRequest;
+ f_profileRequestor;
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ // nn_addSharer;
+ i_allocateTBE;
+ i_setTBEPhysicalAddress;
+ xx_recordGetXL1ID;
+ a_issueFetchToMemory;
+ // send accumulated ACK to requestor
+ f_sendAccumulatedAckToRequestor;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+
+ // transitions from IS/IM
+
+ // force L1s to respond success or failure
+ transition(ISS, Mem_Data, ISS_MB){
+ m_writeDataToCache;
+ m_markL2MissBit;
+ // send exclusive data but force L1 to wait for filter responses
+ f_sendExclusiveDataToGetSRequestor;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IS, Mem_Data, IS_SSB){
+ m_writeDataToCache;
+ m_markL2MissBit;
+ // send data but force L1 to wait for filter responses
+ f_sendDataToGetSRequestor;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IM, Mem_Data, ISS_MB){
+ m_writeDataToCache;
+ m_markL2MissBit;
+ // send data but force L1 to wait for filter responses
+ f_sendDataToGetXRequestor;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // disallow grouping of requestors. There is a correctness problem if we check the wrong
+ // filters as indicated by the original requestor.
+ transition({IS, ISS}, {L1_GETX, L1_GETS, L1_GET_INSTR, L1_GETX_ESCAPE, L1_GETS_ESCAPE, L1_GET_INSTR_ESCAPE}) {
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ transition(IM, {L1_GETX, L1_GETS, L1_GET_INSTR, L1_GETX_ESCAPE, L1_GETS_ESCAPE, L1_GET_INSTR_ESCAPE}) {
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ // transitions from SS
+ transition(SS, {L1_GETS, L1_GET_INSTR, L1_GETS_ESCAPE, L1_GET_INSTR_ESCAPE}, SS_SSB) {
+ p_profileRequest;
+ f_profileRequestor;
+ ds_sendSharedDataToRequestor;
+ nn_addSharer;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ // For isolation the L1 filters might return NACKs to the requestor
+ transition(SS, L1_GETX, SS_MB) {
+ p_profileRequest;
+ f_profileRequestor;
+ d_sendDataToRequestor;
+ fwm_sendFwdInvToSharersMinusRequestor;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ // send special INV to sharers - they have to invalidate
+ transition(SS, L1_GETX_ESCAPE, SS_MB) {
+ p_profileRequest;
+ f_profileRequestor;
+ d_sendDataToRequestor;
+ fwm_sendFwdInvEscapeToSharersMinusRequestor;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ // For isolation the L1 filters might return NACKs to the requestor
+ transition(SS, L1_UPGRADE, SS_MB) {
+ f_profileRequestor;
+ fwm_sendFwdInvToSharersMinusRequestor;
+ ts_sendInvAckToUpgrader;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(SS, L2_Replacement_clean, I_I) {
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(SS, L2_Replacement_clean_XACT, I_I) {
+ q_profileOverflow;
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(SS, L2_Replacement, S_I) {
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(SS, L2_Replacement_XACT, S_I) {
+ q_profileOverflow;
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ // Transitions from M
+
+ // send data, but force L1 to wait for filter responses
+ transition(M, L1_GETS, M_MB) {
+ p_profileRequest;
+ f_profileRequestor;
+ f_sendExclusiveDataToRequestor;
+ // selective filter checks, but need to check both read+write in case nackers put NP block into M state
+ a_checkNackerL1ReadWriteFiltersExceptRequestor;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ // don't care about filters
+ transition(M, L1_GETS_ESCAPE, M_MB) {
+ p_profileRequest;
+ f_profileRequestor;
+ f_sendExclusiveDataToRequestor;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(M, L1_GET_INSTR, M_SSB) {
+ p_profileRequest;
+ f_profileRequestor;
+ f_sendDataToRequestor;
+ // NEW - selective filter checks, but need to check both read+write in case nackers put NP block into M state
+ a_checkNackerL1ReadWriteFiltersExceptRequestor;
+ // This should always be _after_ f_sendDataToRequestor and a_checkNackerL1WriteFiltersExceptRequestor, since they
+ // explicitly look at the sharers list!
+ nn_addSharer;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ // don't care about filters
+ transition(M, L1_GET_INSTR_ESCAPE, M_SSB) {
+ p_profileRequest;
+ f_profileRequestor;
+ f_sendDataToRequestor;
+ nn_addSharer;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(M, L1_GETX, M_MB) {
+ p_profileRequest;
+ f_profileRequestor;
+ f_sendDataToRequestor;
+ // selective filter checks
+ a_checkNackerL1ReadWriteFiltersExceptRequestor;
+ // issue filter checks
+ //a_checkL1ReadWriteFiltersExceptRequestor;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ // don't care about filters
+ transition(M, L1_GETX_ESCAPE, M_MB) {
+ p_profileRequest;
+ f_profileRequestor;
+ f_sendDataToRequestor;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(M, L2_Replacement, M_I) {
+ i_allocateTBE;
+ c_exclusiveReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(M, L2_Replacement_clean, M_I) {
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(M, L2_Replacement_XACT, M_I) {
+ q_profileOverflow;
+ i_allocateTBE;
+ c_exclusiveReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(M, L2_Replacement_clean_XACT, M_I) {
+ q_profileOverflow;
+ rr_deallocateL2CacheBlock;
+ }
+
+
+ // transitions from MT
+ transition(MT, {L1_GETX, L1_GETX_ESCAPE}, MT_MB) {
+ p_profileRequest;
+ f_profileRequestor;
+ b_forwardRequestToExclusive;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+
+ transition(MT, {L1_GETS, L1_GET_INSTR, L1_GETS_ESCAPE, L1_GET_INSTR_ESCAPE}, MT_IIB) {
+ p_profileRequest;
+ f_profileRequestor;
+ b_forwardRequestToExclusive;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(MT, L2_Replacement, MT_I) {
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(MT, L2_Replacement_clean, MCT_I) {
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(MT, L2_Replacement_XACT, MT_I) {
+ q_profileOverflow;
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(MT, L2_Replacement_clean_XACT, MCT_I) {
+ q_profileOverflow;
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(MT, L1_PUTX, M) {
+ f_profileRequestor;
+ // this doesn't affect exlusive ptr
+ ll_clearSharers;
+ mr_writeDataToCacheFromRequest;
+ t_sendWBAck;
+ jj_popL1RequestQueue;
+ }
+
+ // This is for the case of transactional read line in E state being replaced from L1. We need to maintain isolation on this
+ // in the event of a future transactional store from another proc, so we maintain this transactional sharer on the list
+ transition(MT, L1_PUTS, SS) {
+ f_profileRequestor;
+ ll_clearSharers;
+ // maintain transactional read isolation
+ nn_addSharer;
+ mr_writeDataToCacheFromRequest;
+ t_sendWBAck;
+ jj_popL1RequestQueue;
+ }
+
+ // transitions from blocking states
+ transition(SS_MB, Unblock_Cancel, SS) {
+ k_popUnblockQueue;
+ }
+
+ transition(M_SSB, Unblock_Cancel, M) {
+ ll_clearSharers;
+ // copy NACKers list from unblock message to our sharers list
+ m_copyNackersIntoSharers;
+ k_popUnblockQueue;
+ }
+
+ transition(MT_MB, Unblock_Cancel, MT) {
+ k_popUnblockQueue;
+ }
+
+ transition(MT_IB, Unblock_Cancel, MT) {
+ k_popUnblockQueue;
+ }
+
+ transition(MT_IIB, Unblock_Cancel, MT){
+ k_popUnblockQueue;
+ }
+
+ // L2 just got the data from memory, but we have Nackers. We can let nacked block reside in M, but GETS request needs to check read+write
+ // signatures to avoid atomicity violations.
+ transition({ISS_MB, IS_SSB}, Unblock_Cancel, M){
+ //rr_deallocateL2CacheBlock;
+ // copy NACKers list from unblock message to our sharers list
+ m_copyNackersIntoSharers;
+ k_popUnblockQueue;
+ }
+
+ transition(M_MB, Unblock_Cancel, M) {
+ // copy NACKers list from unblock message to our sharers list
+ m_copyNackersIntoSharers;
+ k_popUnblockQueue;
+ }
+
+ transition(SS_MB, Exclusive_Unblock, MT) {
+ // update actual directory
+ mmu_markExclusiveFromUnblock;
+ // mark block as trans if needed
+ f_markBlockTransIfTrans;
+ k_popUnblockQueue;
+ }
+
+ // PUT from next exclusive surpassed its own ExclusiveUnblock
+ // Perceived as PUTX_old because the directory is outdated
+ transition(SS_MB, {L1_PUTX_old, L1_PUTS_old}) {
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ // PUT from current (old) exclusive, can't do anything with it in this state
+ // Don't know whether exclusive was replacing or not, so wait to see what Unblock says
+ transition(SS_MB, {L1_PUTX, L1_PUTS}) {
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ // Next exclusive informs that last owner was replacing the line when it received Fwd req
+ // Thus, expect a PUTX_old from previous owner
+ transition(SS_MB, Exclusive_Unblock_WaitPUTold, PB_MT) {
+ // update actual directory
+ mmu_markExclusiveFromUnblock;
+ // mark block as trans if needed
+ f_markBlockTransIfTrans;
+ k_popUnblockQueue;
+ }
+
+ transition(PB_MT, {L1_PUTX_old, L1_PUTS_old}, MT) { // OK, PUT_old received, go to MT
+ f_profileRequestor;
+ jj_popL1RequestQueue;
+ }
+
+ // PUT from current (next) exclusive, so recycle
+ // Expecting PUT_old, won't take in new PUT until previous PUT arrives
+ transition(PB_MT, {L1_PUTX, L1_PUTS}) {
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ // L2 blocks on GETS requests in SS state
+ transition(SS_SSB, Unblock, SS) {
+ // mark block as trans if needed
+ f_markBlockTransIfTrans;
+ k_popUnblockQueue;
+ }
+
+ transition({M_SSB, IS_SSB}, Unblock, SS) {
+ // we already added the sharer when we received original request
+ // mark block as trans if needed
+ f_markBlockTransIfTrans;
+ k_popUnblockQueue;
+ }
+
+ transition({M_MB, MT_MB, ISS_MB}, Exclusive_Unblock, MT) {
+ // update actual directory
+ mmu_markExclusiveFromUnblock;
+ // mark block as trans if needed
+ f_markBlockTransIfTrans;
+ k_popUnblockQueue;
+ }
+
+ transition({M_MB, MT_MB, ISS_MB}, Exclusive_Unblock_WaitPUTold, PB_MT) {
+ // update actual directory
+ mmu_markExclusiveFromUnblock;
+ // mark block as trans if needed
+ f_markBlockTransIfTrans;
+ k_popUnblockQueue;
+ }
+
+ // PUT from (not yet) next exclusive surpassed its own ExclusiveUnblock
+ // thus became PUTX_old (since directory is not up-to-date)
+ transition({M_MB, MT_MB, ISS_MB}, {L1_PUTX_old, L1_PUTS_old}) {
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ // PUT from current (previous) owner: recycle until unblock arrives
+ // We don't know whether replacing cache is waiting for WB_Ack or it was replacing when fwd arrived
+ transition({M_MB, MT_MB, ISS_MB}, {L1_PUTX, L1_PUTS}) {
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ // L1 requestor received data from exclusive L1, but writeback data from exclusive L1 hasn't arrived yet
+ transition(MT_IIB, Unblock, MT_IB) {
+ nnu_addSharerFromUnblock;
+ // mark block as trans if needed
+ f_markBlockTransIfTrans;
+ k_popUnblockQueue;
+ }
+
+ // PUT from current (previous) owner: recycle
+ // We don't know whether replacing cache is waiting for WB_Ack or it was replacing when fwd arrived
+ transition(MT_IIB, {L1_PUTX, L1_PUTS}) {
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ transition(MT_IB, {WB_Data, WB_Data_clean}, SS) {
+ m_writeDataToCache;
+ o_popIncomingResponseQueue;
+ }
+
+ // PUT from (not yet) next exclusive, but unblock hasn't arrived yet, so it became PUT_old: recycle
+ transition(MT_IIB, {L1_PUTX_old, L1_PUTS_old}) {
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ transition(MT_IIB, Unblock_WaitPUTold, PB_MT_IB) { // Now arrives Unblock, wait for PUT and WB_Data
+ nnu_addSharerFromUnblock;
+ // mark block as trans if needed
+ f_markBlockTransIfTrans;
+ k_popUnblockQueue;
+ }
+
+ // L1 requestor has not received data from exclusive L1, but we received writeback data from exclusive L1
+ transition(MT_IIB, {WB_Data, WB_Data_clean}, MT_SB) {
+ m_writeDataToCache;
+ o_popIncomingResponseQueue;
+ }
+
+ // PUT_old from previous owner, that was replacing when it received Fwd req
+ transition(PB_MT_IB, {L1_PUTX_old, L1_PUTS_old}, MT_IB) { // Go to MT_IB, and wait for WB_Data
+ f_profileRequestor;
+ jj_popL1RequestQueue;
+ }
+
+ transition(PB_MT_IB, {L1_PUTX, L1_PUTS}) { // Waiting for PUT_old, don't take new PUT in
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ // WB_data from previous owner, we already received unblock, just wait for PUT_old to go to SS
+ transition(PB_MT_IB, {WB_Data, WB_Data_clean}, PB_SS) { // Received Unblock, now arrives WB_Data, wait for PUT
+ m_writeDataToCache;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(PB_SS, {L1_PUTX_old, L1_PUTS_old}, SS) { // Received Unblock and WB_Data, now arrives PUT, go to SS
+ f_profileRequestor;
+ jj_popL1RequestQueue;
+ }
+
+ // PUT from new exclusive owner, while waiting for PUT from previous exclusive owner: recycle
+ transition(PB_SS, {L1_PUTX, L1_PUTS}) {
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ transition(MT_SB, Unblock, SS) {
+ nnu_addSharerFromUnblock;
+ // mark block as trans if needed
+ f_markBlockTransIfTrans;
+ k_popUnblockQueue;
+ }
+
+ transition(MT_SB, Unblock_WaitPUTold, PB_SS) { // Received WB_Data, now arriving Unblock, wait for PUT
+ nnu_addSharerFromUnblock;
+ // mark block as trans if needed
+ f_markBlockTransIfTrans;
+ k_popUnblockQueue;
+ }
+
+ // PUT from (not yet) new exclusive owner, before we receive Unblock from it (became PUT_old because directory is not up-to-date)
+ transition(MT_SB, {L1_PUTX_old, L1_PUTS_old}) {
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ // PUT from current (last) exclusive owner, that was replacing the line when it received Fwd req
+ transition(MT_SB, {L1_PUTX, L1_PUTS}) {
+ kk_removeRequestSharer; // When Unblock arrives, it'll trigger Unblock, not Unblock_WaitPUTold
+ f_profileRequestor;
+ jj_popL1RequestQueue;
+ }
+
+ // writeback states
+ transition({I_I, S_I, MT_I, MCT_I, M_I}, {L1_GETX, L1_UPGRADE, L1_GETS, L1_GET_INSTR, L1_GETX_ESCAPE, L1_GETS_ESCAPE, L1_GET_INSTR_ESCAPE}) {
+ f_profileRequestor;
+ zz_recycleL1RequestQueue;
+ }
+
+ transition(I_I, Ack) {
+ q_updateAck;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(I_I, Ack_all, NP) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition({MT_I, MCT_I}, WB_Data, M_I) {
+ qq_writeDataToTBE;
+ ct_exclusiveReplacementFromTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(MCT_I, WB_Data_clean, NP) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // L1 never changed Dirty data
+ transition(MT_I, Ack_all, M_I) {
+ ct_exclusiveReplacementFromTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // clean data that L1 exclusive never wrote
+ transition(MCT_I, Ack_all, NP) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(MT_I, WB_Data_clean, NP) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(S_I, Ack) {
+ q_updateAck;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(S_I, Ack_all, M_I) {
+ ct_exclusiveReplacementFromTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(M_I, Mem_Ack, NP) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+}
+
diff --git a/src/mem/protocol/MESI_CMP_filter_directory-mem.sm b/src/mem/protocol/MESI_CMP_filter_directory-mem.sm
new file mode 100644
index 000000000..1fcd234fe
--- /dev/null
+++ b/src/mem/protocol/MESI_CMP_filter_directory-mem.sm
@@ -0,0 +1,166 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
+ */
+
+
+machine(Directory, "Token protocol") {
+
+ MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
+ MessageBuffer responseToDir, network="From", virtual_network="3", ordered="false";
+ MessageBuffer responseFromDir, network="To", virtual_network="3", ordered="false";
+
+ // STATES
+ enumeration(State, desc="Directory states", default="Directory_State_I") {
+ // Base states
+ I, desc="Owner";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ Fetch, desc="A GETX arrives";
+ Data, desc="A GETS arrives";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+
+ State getState(Address addr) {
+ return State:I;
+ }
+
+ void setState(Address addr, State state) {
+ }
+
+ // ** OUT_PORTS **
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+
+ // ** IN_PORTS **
+
+ in_port(requestNetwork_in, RequestMsg, requestToDir) {
+ if (requestNetwork_in.isReady()) {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:Fetch, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:Fetch, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(responseNetwork_in, ResponseMsg, responseToDir) {
+ if (responseNetwork_in.isReady()) {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
+ trigger(Event:Data, in_msg.Address);
+ } else {
+ DEBUG_EXPR(in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Actions
+ action(a_sendAck, "a", desc="Send ack to L2") {
+ peek(responseNetwork_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Sender);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(d_sendData, "d", desc="Send data to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
+ requestNetwork_in.dequeue();
+ }
+
+ action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
+ responseNetwork_in.dequeue();
+ }
+
+ action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ // TRANSITIONS
+
+ transition(I, Fetch) {
+ d_sendData;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(I, Data) {
+ m_writeDataToMemory;
+ a_sendAck;
+ k_popIncomingResponseQueue;
+ }
+}
diff --git a/src/mem/protocol/MESI_CMP_filter_directory-msg.sm b/src/mem/protocol/MESI_CMP_filter_directory-msg.sm
new file mode 100644
index 000000000..a888e2450
--- /dev/null
+++ b/src/mem/protocol/MESI_CMP_filter_directory-msg.sm
@@ -0,0 +1,153 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MSI_MOSI_CMP_directory-msg.sm 1.5 05/01/19 15:48:37-06:00 mikem@royal16.cs.wisc.edu $
+ *
+ */
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ GETX, desc="Get eXclusive";
+ GETX_ESCAPE, desc="Get eXclusive, while in escape action";
+ UPGRADE, desc="UPGRADE to exclusive";
+ GETS, desc="Get Shared";
+ GETS_ESCAPE, desc="Get Shared, while in escape action";
+ GET_INSTR, desc="Get Instruction";
+ GET_INSTR_ESCAPE, desc="Get Instruction, while in escape action";
+ INV, desc="INValidate, could be NACKed";
+ INV_ESCAPE, desc="INValidate, cannot be NACKed";
+ PUTX, desc="replacement message, for writeback to lower caches";
+ PUTS, desc="clean replacement message, for writeback to lower caches";
+ REPLACE, desc="replacement message, from lowest cache";
+ CHECK_WRITE_FILTER, desc="check write filter message";
+ CHECK_READ_WRITE_FILTER, desc="check both read and write filters message";
+}
+
+// CoherenceResponseType
+enumeration(CoherenceResponseType, desc="...") {
+ MEMORY_ACK, desc="Ack from memory controller";
+ DATA, desc="Data";
+ DATA_EXCLUSIVE, desc="Data";
+ L2_DATA, desc="data from L2, in shared mode";
+ L2_DATA_EXCLUSIVE, desc="data from L2, in exclusive mode";
+ MEMORY_DATA, desc="Data";
+ ACK, desc="Generic invalidate ack";
+ NACK, desc="NACK used to maintain transactional isolation";
+ WB_ACK, desc="writeback ack";
+ UNBLOCK, desc="unblock";
+ EXCLUSIVE_UNBLOCK, desc="exclusive unblock";
+ UNBLOCK_CANCEL, desc="unblock when trans. request fails";
+}
+
+// RequestMsg
+structure(RequestMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Line address for this request";
+ Address PhysicalAddress, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ AccessModeType AccessMode, desc="user/supervisor access type";
+ MachineID Requestor , desc="What component request";
+ NetDest Destination, desc="What components receive the request, includes MachineType and num";
+ MessageSizeType MessageSize, desc="size category of the message";
+ DataBlock DataBlk, desc="Data for the cache line (if PUTX)";
+ bool Dirty, default="false", desc="Dirty bit";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+ uint64 Timestamp, desc="TLR-like Timestamp";
+}
+
+// ResponseMsg
+structure(ResponseMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Line address for this request";
+ Address PhysicalAddress, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
+ MachineID Sender, desc="What component sent the data";
+ NetDest Destination, desc="Node to whom the data is sent";
+ DataBlock DataBlk, desc="Data for the cache line";
+ bool Dirty, default="false", desc="Dirty bit";
+ int AckCount, default="0", desc="number of acks in this message";
+ MessageSizeType MessageSize, desc="size category of the message";
+ uint64 Timestamp, desc="TLR-like Timestamp";
+ NetDest Nackers, desc="The nodes which sent NACKs to requestor";
+ bool Transactional, desc="Whether this address was transactional";
+ bool RemoveLastOwnerFromDir, desc="To solve some races with PUTX/GETS";
+ MachineID LastOwnerID, desc="What component sent the data";
+}
+
+// TriggerType
+enumeration(TriggerType, desc="...") {
+ ALL_ACKS, desc="When all acks/nacks have been received";
+}
+
+// TriggerMsg
+structure(TriggerMsg, desc="...", interface="Message") {
+ Address Address, desc="Line address for this request";
+ Address PhysicalAddress, desc="Physical address for this request";
+ TriggerType Type, desc="Type of trigger";
+}
+
+/*
+ GETX, desc="Get eXclusive";
+ UPGRADE, desc="UPGRADE to exclusive";
+ GETS, desc="Get Shared";
+ GET_INSTR, desc="Get Instruction";
+ INV, desc="INValidate";
+ PUTX, desc="replacement message, for writeback to lower caches";
+ REPLACE, desc="replacement message, from lowest cache";
+ CHECK_WRITE_FILTER, desc="check write filter message";
+ CHECK_READ_WRITE_FILTER, desc="check both read and write filters message";
+*/
+
+GenericRequestType convertToGenericType(CoherenceRequestType type) {
+ if(type == CoherenceRequestType:PUTX) {
+ return GenericRequestType:PUTX;
+ } else if(type == CoherenceRequestType:GETS) {
+ return GenericRequestType:GETS;
+ } else if(type == CoherenceRequestType:GETS_ESCAPE) {
+ return GenericRequestType:GETS;
+ } else if(type == CoherenceRequestType:GET_INSTR) {
+ return GenericRequestType:GET_INSTR;
+ } else if(type == CoherenceRequestType:GET_INSTR_ESCAPE) {
+ return GenericRequestType:GET_INSTR;
+ } else if(type == CoherenceRequestType:GETX) {
+ return GenericRequestType:GETX;
+ } else if(type == CoherenceRequestType:GETX_ESCAPE) {
+ return GenericRequestType:GETX;
+ } else if(type == CoherenceRequestType:UPGRADE) {
+ return GenericRequestType:UPGRADE;
+ } else if(type == CoherenceRequestType:INV) {
+ return GenericRequestType:INV;
+ } else if( type == CoherenceRequestType:REPLACE) {
+ return GenericRequestType:REPLACEMENT;
+ } else {
+ DEBUG_EXPR(type);
+ error("invalid CoherenceRequestType");
+ }
+}
+
+
diff --git a/src/mem/protocol/MESI_CMP_filter_directory.slicc b/src/mem/protocol/MESI_CMP_filter_directory.slicc
new file mode 100644
index 000000000..715da5795
--- /dev/null
+++ b/src/mem/protocol/MESI_CMP_filter_directory.slicc
@@ -0,0 +1,7 @@
+../protocols/LogTM.sm
+../protocols/MESI_CMP_filter_directory-msg.sm
+../protocols/MESI_CMP_filter_directory-L2cache.sm
+../protocols/MESI_CMP_filter_directory-L1cache.sm
+../protocols/MESI_CMP_filter_directory-mem.sm
+../protocols/standard_CMP-protocol.sm
+
diff --git a/src/mem/protocol/MESI_CMP_filter_directory_m-mem.sm b/src/mem/protocol/MESI_CMP_filter_directory_m-mem.sm
new file mode 100644
index 000000000..2f8818489
--- /dev/null
+++ b/src/mem/protocol/MESI_CMP_filter_directory_m-mem.sm
@@ -0,0 +1,250 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
+ */
+
+// This file is copied from Yasuko Watanabe's prefetch / memory protocol
+// Copied here by aep 12/14/07
+
+
+machine(Directory, "MESI_CMP_filter_directory protocol") {
+
+ MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
+ MessageBuffer responseToDir, network="From", virtual_network="3", ordered="false";
+ MessageBuffer responseFromDir, network="To", virtual_network="3", ordered="false";
+
+ // STATES
+ enumeration(State, desc="Directory states", default="Directory_State_I") {
+ // Base states
+ I, desc="Owner";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ Fetch, desc="A memory fetch arrives";
+ Data, desc="writeback data arrives";
+ Memory_Data, desc="Fetched data from memory arrives";
+ Memory_Ack, desc="Writeback Ack from memory arrives";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+ // to simulate detailed DRAM
+ external_type(MemoryControl, inport="yes", outport="yes") {
+
+ }
+
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+ MemoryControl memBuffer, constructor_hack="i";
+
+ State getState(Address addr) {
+ return State:I;
+ }
+
+ void setState(Address addr, State state) {
+ }
+
+ bool isGETRequest(CoherenceRequestType type) {
+ return (type == CoherenceRequestType:GETS) ||
+ (type == CoherenceRequestType:GET_INSTR) ||
+ (type == CoherenceRequestType:GETX);
+ }
+
+
+ // ** OUT_PORTS **
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+ out_port(memQueue_out, MemoryMsg, memBuffer);
+
+ // ** IN_PORTS **
+
+ in_port(requestNetwork_in, RequestMsg, requestToDir) {
+ if (requestNetwork_in.isReady()) {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (isGETRequest(in_msg.Type)) {
+ trigger(Event:Fetch, in_msg.Address);
+ } else {
+ DEBUG_EXPR(in_msg);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(responseNetwork_in, ResponseMsg, responseToDir) {
+ if (responseNetwork_in.isReady()) {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
+ trigger(Event:Data, in_msg.Address);
+ } else {
+ DEBUG_EXPR(in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, memBuffer) {
+ if (memQueue_in.isReady()) {
+ peek(memQueue_in, MemoryMsg) {
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:Memory_Data, in_msg.Address);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:Memory_Ack, in_msg.Address);
+ } else {
+ DEBUG_EXPR(in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+
+
+ // Actions
+ action(a_sendAck, "a", desc="Send ack to L2") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(d_sendData, "d", desc="Send data to requestor") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
+ requestNetwork_in.dequeue();
+ }
+
+ action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
+ responseNetwork_in.dequeue();
+ }
+
+ action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
+ memQueue_in.dequeue();
+ }
+
+ action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_READ;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := in_msg.Requestor;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
+ peek(responseNetwork_in, ResponseMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_WB;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := in_msg.Sender;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := in_msg.Prefetch;
+
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ // TRANSITIONS
+
+ transition(I, Fetch) {
+ //d_sendData;
+ qf_queueMemoryFetchRequest;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(I, Data) {
+ m_writeDataToMemory;
+ //a_sendAck;
+ qw_queueMemoryWBRequest;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(I, Memory_Data) {
+ d_sendData;
+ l_popMemQueue;
+ }
+
+ transition(I, Memory_Ack) {
+ a_sendAck;
+ l_popMemQueue;
+ }
+}
diff --git a/src/mem/protocol/MESI_CMP_filter_directory_m.slicc b/src/mem/protocol/MESI_CMP_filter_directory_m.slicc
new file mode 100644
index 000000000..43c9d4019
--- /dev/null
+++ b/src/mem/protocol/MESI_CMP_filter_directory_m.slicc
@@ -0,0 +1,7 @@
+../protocols/LogTM.sm
+../protocols/MESI_CMP_filter_directory-msg.sm
+../protocols/MESI_CMP_filter_directory-L2cache.sm
+../protocols/MESI_CMP_filter_directory-L1cache.sm
+../protocols/MESI_CMP_filter_directory_m-mem.sm
+../protocols/standard_CMP-protocol.sm
+
diff --git a/src/mem/protocol/MESI_SCMP_bankdirectory-L1cache.sm b/src/mem/protocol/MESI_SCMP_bankdirectory-L1cache.sm
new file mode 100644
index 000000000..6e707a431
--- /dev/null
+++ b/src/mem/protocol/MESI_SCMP_bankdirectory-L1cache.sm
@@ -0,0 +1,894 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MSI_MOSI_CMP_directory-L1cache.sm 1.10 05/01/19 15:55:40-06:00 beckmann@s0-28.cs.wisc.edu $
+ *
+ */
+
+
+machine(L1Cache, "MSI Directory L1 Cache CMP") {
+
+ // NODE L1 CACHE
+ // From this node's L1 cache TO the network
+ // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+ MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false";
+ // a local L1 -> this L2 bank
+ MessageBuffer responseFromL1Cache, network="To", virtual_network="3", ordered="false";
+ MessageBuffer unblockFromL1Cache, network="To", virtual_network="4", ordered="false";
+
+
+ // To this node's L1 cache FROM the network
+ // a L2 bank -> this L1
+ MessageBuffer requestToL1Cache, network="From", virtual_network="1", ordered="false";
+ // a L2 bank -> this L1
+ MessageBuffer responseToL1Cache, network="From", virtual_network="3", ordered="false";
+
+ // STATES
+ enumeration(State, desc="Cache states", default="L1Cache_State_I") {
+ // Base states
+ NP, desc="Not present in either cache";
+ I, desc="a L1 cache entry Idle";
+ S, desc="a L1 cache entry Shared";
+ E, desc="a L1 cache entry Exclusive";
+ M, desc="a L1 cache entry Modified", format="!b";
+
+ // Transient States
+ IS, desc="L1 idle, issued GETS, have not seen response yet";
+ IM, desc="L1 idle, issued GETX, have not seen response yet";
+ SM, desc="L1 idle, issued GETX, have not seen response yet";
+ IS_I, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
+
+ M_I, desc="L1 replacing, waiting for ACK";
+ E_I, desc="L1 replacing, waiting for ACK";
+
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ // L1 events
+ Load, desc="Load request from the home processor";
+ Ifetch, desc="I-fetch request from the home processor";
+ Store, desc="Store request from the home processor";
+
+ Inv, desc="Invalidate request from L2 bank";
+
+ // internal generated request
+ L1_Replacement, desc="L1 Replacement", format="!r";
+
+ // other requests
+ Fwd_GETX, desc="GETX from other processor";
+ Fwd_GETS, desc="GETS from other processor";
+ Fwd_GET_INSTR, desc="GET_INSTR from other processor";
+
+ Data, desc="Data for processor";
+ Data_Exclusive, desc="Data for processor";
+ DataS_fromL1, desc="data for GETS request, need to unblock directory";
+ Data_all_Acks, desc="Data for processor, all acks";
+
+ Ack, desc="Ack for processor";
+ Ack_all, desc="Last ack for processor";
+
+ WB_Ack, desc="Ack for replacement";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
+ State CacheState, desc="cache state";
+ DataBlock DataBlk, desc="data for the block";
+ bool Dirty, default="false", desc="data is dirty";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Address Address, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ bool Dirty, default="false", desc="data is dirty";
+ bool isPrefetch, desc="Set if this was caused by a prefetch";
+ int pendingAcks, default="0", desc="number of pending acks";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
+
+ CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
+ CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
+
+ MessageBuffer mandatoryQueue, ordered="false", rank="100", abstract_chip_ptr="true";
+
+ Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+
+ int cache_state_to_int(State state);
+
+ // inclusive cache returns L1 entries only
+ Entry getL1CacheEntry(Address addr), return_by_ref="yes" {
+ if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory[addr];
+ } else {
+ return L1IcacheMemory[addr];
+ }
+ }
+
+ void changeL1Permission(Address addr, AccessPermission permission) {
+ if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory.changePermission(addr, permission);
+ } else if(L1IcacheMemory.isTagPresent(addr)) {
+ return L1IcacheMemory.changePermission(addr, permission);
+ } else {
+ error("cannot change permission, L1 block not present");
+ }
+ }
+
+ bool isL1CacheTagPresent(Address addr) {
+ return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
+ }
+
+ State getState(Address addr) {
+ if((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == true){
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(addr);
+ }
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+
+ if(L1_TBEs.isPresent(addr)) {
+ return L1_TBEs[addr].TBEState;
+ } else if (isL1CacheTagPresent(addr)) {
+ return getL1CacheEntry(addr).CacheState;
+ }
+ return State:NP;
+ }
+
+
+ void setState(Address addr, State state) {
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+
+ // MUST CHANGE
+ if(L1_TBEs.isPresent(addr)) {
+ L1_TBEs[addr].TBEState := state;
+ }
+
+ if (isL1CacheTagPresent(addr)) {
+ getL1CacheEntry(addr).CacheState := state;
+
+ // Set permission
+ if (state == State:I) {
+ changeL1Permission(addr, AccessPermission:Invalid);
+ } else if (state == State:S || state == State:E) {
+ changeL1Permission(addr, AccessPermission:Read_Only);
+ } else if (state == State:M) {
+ changeL1Permission(addr, AccessPermission:Read_Write);
+ } else {
+ changeL1Permission(addr, AccessPermission:Busy);
+ }
+ }
+ }
+
+ Event mandatory_request_type_to_event(CacheRequestType type) {
+ if (type == CacheRequestType:LD) {
+ return Event:Load;
+ } else if (type == CacheRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
+ return Event:Store;
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+
+ GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
+ if (machineIDToMachineType(sender) == MachineType:L1Cache) {
+ return GenericMachineType:L1Cache_wCC; // NOTE direct L1 hits should not call this
+ } else if (machineIDToMachineType(sender) == MachineType:L2Cache) {
+ return GenericMachineType:L2Cache;
+ } else {
+ return ConvertMachToGenericMach(machineIDToMachineType(sender));
+ }
+ }
+
+
+
+ out_port(requestIntraChipL1Network_out, RequestMsg, requestFromL1Cache);
+ out_port(responseIntraChipL1Network_out, ResponseMsg, responseFromL1Cache);
+ out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
+
+ // Response IntraChip L1 Network - response msg to this L1 cache
+ in_port(responseIntraChipL1Network_in, ResponseMsg, responseToL1Cache) {
+ if (responseIntraChipL1Network_in.isReady()) {
+ peek(responseIntraChipL1Network_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Data_Exclusive, in_msg.Address);
+ } else if(in_msg.Type == CoherenceResponseType:DATA) {
+ if ( (getState(in_msg.Address) == State:IS || getState(in_msg.Address) == State:IS_I) &&
+ machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache ) {
+
+ trigger(Event:DataS_fromL1, in_msg.Address);
+
+ } else if ( (L1_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0 ) {
+ trigger(Event:Data_all_Acks, in_msg.Address);
+ } else {
+ trigger(Event:Data, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:ACK) {
+ if ( (L1_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0 ) {
+ trigger(Event:Ack_all, in_msg.Address);
+ } else {
+ trigger(Event:Ack, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
+ trigger(Event:WB_Ack, in_msg.Address);
+ } else {
+ error("Invalid L1 response type");
+ }
+ }
+ }
+ }
+
+ // Request InterChip network - request from this L1 cache to the shared L2
+ in_port(requestIntraChipL1Network_in, RequestMsg, requestToL1Cache) {
+ if(requestIntraChipL1Network_in.isReady()) {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceRequestType:INV) {
+ trigger(Event:Inv, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:UPGRADE) {
+ // upgrade transforms to GETX due to race
+ trigger(Event:Fwd_GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:Fwd_GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
+ trigger(Event:Fwd_GET_INSTR, in_msg.Address);
+ } else {
+ error("Invalid forwarded request type");
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue betweens Node's CPU and it's L1 caches
+ in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady()) {
+ peek(mandatoryQueue_in, CacheMsg) {
+
+ // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
+
+ if (in_msg.Type == CacheRequestType:IFETCH) {
+ // ** INSTRUCTION ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_Replacement, in_msg.Address);
+ }
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 asks the L2 for it.
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ // No room in the L1, so we need to make room in the L1
+ trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+ } else {
+ // *** DATA ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_Replacement, in_msg.Address);
+ }
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 ask the L2 for it
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ // No room in the L1, so we need to make room in the L1
+ trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+ action(a_issueGETS, "a", desc="Issue GETS") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GET_INSTR;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+
+ action(b_issueGETX, "b", desc="Issue GETX") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ DEBUG_EXPR(machineID);
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(c_issueUPGRADE, "c", desc="Issue GETX") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:UPGRADE;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(d_sendDataToRequestor, "d", desc="send data to requestor") {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL1CacheEntry(address).Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL1CacheEntry(address).Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := L1_TBEs[address].DataBlk;
+ out_msg.Dirty := L1_TBEs[address].Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := L1_TBEs[address].DataBlk;
+ out_msg.Dirty := L1_TBEs[address].Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL1CacheEntry(address).Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ }
+ }
+
+ action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := L1_TBEs[address].DataBlk;
+ out_msg.Dirty := L1_TBEs[address].Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ }
+ }
+
+ action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.AckCount := 1;
+ }
+ }
+ }
+
+
+ action(g_issuePUTX, "g", desc="send data to the L2 cache") {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL1CacheEntry(address).Dirty;
+ out_msg.Requestor:= machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ if (getL1CacheEntry(address).Dirty) {
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
+ enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
+ enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+
+
+ action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
+ DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
+ sequencer.readCallback(address, getL1CacheEntry(address).DataBlk);
+ }
+
+ action(x_external_load_hit, "x", desc="Notify sequencer the load completed.") {
+ peek(responseIntraChipL1Network_in, ResponseMsg) {
+ sequencer.readCallback(address, getL1CacheEntry(address).DataBlk, getNondirectHitMachType(in_msg.Address, in_msg.Sender), PrefetchBit:No);
+ }
+ }
+
+
+ action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
+ sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk);
+ getL1CacheEntry(address).Dirty := true;
+ }
+
+ action(xx_external_store_hit, "\x", desc="Notify sequencer that store completed.") {
+ peek(responseIntraChipL1Network_in, ResponseMsg) {
+ sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk, getNondirectHitMachType(in_msg.Address, in_msg.Sender), PrefetchBit:No);
+ }
+ getL1CacheEntry(address).Dirty := true;
+ }
+
+
+ action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
+ check_allocate(L1_TBEs);
+ L1_TBEs.allocate(address);
+ L1_TBEs[address].isPrefetch := false;
+ L1_TBEs[address].Dirty := getL1CacheEntry(address).Dirty;
+ L1_TBEs[address].DataBlk := getL1CacheEntry(address).DataBlk;
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue();
+ }
+
+ action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") {
+ profileMsgDelay(2, requestIntraChipL1Network_in.dequeue_getDelayCycles());
+ }
+
+ action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") {
+ profileMsgDelay(3, responseIntraChipL1Network_in.dequeue_getDelayCycles());
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+ L1_TBEs.deallocate(address);
+ }
+
+ action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
+ peek(responseIntraChipL1Network_in, ResponseMsg) {
+ getL1CacheEntry(address).DataBlk := in_msg.DataBlk;
+ getL1CacheEntry(address).Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(q_updateAckCount, "q", desc="Update ack count") {
+ peek(responseIntraChipL1Network_in, ResponseMsg) {
+ L1_TBEs[address].pendingAcks := L1_TBEs[address].pendingAcks - in_msg.AckCount;
+ APPEND_TRANSITION_COMMENT(in_msg.AckCount);
+ APPEND_TRANSITION_COMMENT(" p: ");
+ APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
+ }
+ }
+
+ action(z_stall, "z", desc="Stall") {
+ }
+
+ action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L1DcacheMemory.deallocate(address);
+ } else {
+ L1IcacheMemory.deallocate(address);
+ }
+ }
+
+ action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
+ if (L1DcacheMemory.isTagPresent(address) == false) {
+ L1DcacheMemory.allocate(address);
+ }
+ }
+
+ action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
+ if (L1IcacheMemory.isTagPresent(address) == false) {
+ L1IcacheMemory.allocate(address);
+ }
+ }
+
+ action(zz_recycleRequestQueue, "zz", desc="recycle L1 request queue") {
+ requestIntraChipL1Network_in.recycle();
+ }
+
+ action(z_recycleMandatoryQueue, "\z", desc="recycle L1 request queue") {
+ mandatoryQueue_in.recycle();
+ }
+
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // Transitions for Load/Store/Replacement/WriteBack from transient states
+ transition({IS, IM, IS_I, M_I, E_I, SM}, {Load, Ifetch, Store, L1_Replacement}) {
+ z_recycleMandatoryQueue;
+ }
+
+ // Transitions from Idle
+ transition({NP,I}, L1_Replacement) {
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition({NP,I}, Load, IS) {
+ oo_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP,I}, Ifetch, IS) {
+ pp_allocateL1ICacheBlock;
+ i_allocateTBE;
+ ai_issueGETINSTR;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP,I}, Store, IM) {
+ oo_allocateL1DCacheBlock;
+ i_allocateTBE;
+ b_issueGETX;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP, I}, Inv) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ // Transitions from Shared
+ transition(S, {Load,Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Store, SM) {
+ i_allocateTBE;
+ c_issueUPGRADE;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, L1_Replacement, I) {
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition(S, Inv, I) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ // Transitions from Exclusive
+
+ transition(E, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(E, Store, M) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(E, L1_Replacement, M_I) {
+ // silent E replacement??
+ i_allocateTBE;
+ g_issuePUTX; // send data, but hold in case forwarded request
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition(E, Inv, I) {
+ // don't send data
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition(E, Fwd_GETX, I) {
+ d_sendDataToRequestor;
+ l_popRequestQueue;
+ }
+
+ transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
+ d_sendDataToRequestor;
+ d2_sendDataToL2;
+ l_popRequestQueue;
+ }
+
+ // Transitions from Modified
+ transition(M, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Store) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, L1_Replacement, M_I) {
+ i_allocateTBE;
+ g_issuePUTX; // send data, but hold in case forwarded request
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition(M_I, WB_Ack, I) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(M, Inv, I) {
+ f_sendDataToL2;
+ l_popRequestQueue;
+ }
+
+ transition(M_I, Inv, I) {
+ ft_sendDataToL2_fromTBE;
+ s_deallocateTBE;
+ l_popRequestQueue;
+ }
+
+ transition(M, Fwd_GETX, I) {
+ d_sendDataToRequestor;
+ l_popRequestQueue;
+ }
+
+ transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
+ d_sendDataToRequestor;
+ d2_sendDataToL2;
+ l_popRequestQueue;
+ }
+
+ transition(M_I, Fwd_GETX, I) {
+ dt_sendDataToRequestor_fromTBE;
+ s_deallocateTBE;
+ l_popRequestQueue;
+ }
+
+ transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, I) {
+ dt_sendDataToRequestor_fromTBE;
+ d2t_sendDataToL2_fromTBE;
+ s_deallocateTBE;
+ l_popRequestQueue;
+ }
+
+ // Transitions from IS
+ transition({IS, IS_I}, Inv, IS_I) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition(IS, Data_all_Acks, S) {
+ u_writeDataToL1Cache;
+ x_external_load_hit;
+ s_deallocateTBE;
+ j_sendUnblock;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IS_I, Data_all_Acks, I) {
+ u_writeDataToL1Cache;
+ x_external_load_hit;
+ s_deallocateTBE;
+ j_sendUnblock;
+ o_popIncomingResponseQueue;
+ }
+
+
+ transition(IS, DataS_fromL1, S) {
+ u_writeDataToL1Cache;
+ j_sendUnblock;
+ x_external_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IS_I, DataS_fromL1, I) {
+ u_writeDataToL1Cache;
+ j_sendUnblock;
+ x_external_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // directory is blocked when sending exclusive data
+ transition(IS_I, Data_Exclusive, E) {
+ u_writeDataToL1Cache;
+ x_external_load_hit;
+ jj_sendExclusiveUnblock;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IS, Data_Exclusive, E) {
+ u_writeDataToL1Cache;
+ x_external_load_hit;
+ jj_sendExclusiveUnblock;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // Transitions from IM
+ transition({IM, SM}, Inv, IM) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition(IM, Data, SM) {
+ u_writeDataToL1Cache;
+ q_updateAckCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IM, Data_all_Acks, M) {
+ u_writeDataToL1Cache;
+ xx_external_store_hit;
+ jj_sendExclusiveUnblock;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // transitions from SM
+ transition({SM, IM}, Ack) {
+ q_updateAckCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(SM, Ack_all, M) {
+ jj_sendExclusiveUnblock;
+ xx_external_store_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+}
+
+
+
diff --git a/src/mem/protocol/MESI_SCMP_bankdirectory-L2cache.sm b/src/mem/protocol/MESI_SCMP_bankdirectory-L2cache.sm
new file mode 100644
index 000000000..0bd9c2b14
--- /dev/null
+++ b/src/mem/protocol/MESI_SCMP_bankdirectory-L2cache.sm
@@ -0,0 +1,1052 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MSI_MOSI_CMP_directory-L2cache.sm 1.12 05/01/19 15:55:40-06:00 beckmann@s0-28.cs.wisc.edu $
+ *
+ */
+
+machine(L2Cache, "MOSI Directory L2 Cache CMP") {
+
+ // L2 BANK QUEUES
+ // From local bank of L2 cache TO the network
+ MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="2", ordered="false"; // this L2 bank -> Memory
+ MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false"; // this L2 bank -> a local L1
+ MessageBuffer responseFromL2Cache, network="To", virtual_network="3", ordered="false"; // this L2 bank -> a local L1 || Memory
+
+ // FROM the network to this local bank of L2 cache
+ MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="false"; // a local L1 -> this L2 bank
+ MessageBuffer responseToL2Cache, network="From", virtual_network="3", ordered="false"; // a local L1 || Memory -> this L2 bank
+ MessageBuffer unblockToL2Cache, network="From", virtual_network="4", ordered="false"; // a local L1 || Memory -> this L2 bank
+
+ // STATES
+ enumeration(State, desc="L2 Cache states", default="L2Cache_State_NP") {
+ // Base states
+ NP, desc="Not present in either cache";
+ SS, desc="L2 cache entry Shared, also present in one or more L1s";
+ M, desc="L2 cache entry Modified, not present in any L1s", format="!b";
+ MT, desc="L2 cache entry Modified in a local L1, assume L2 copy stale", format="!b";
+
+ // L2 replacement
+ M_I, desc="L2 cache replacing, have all acks, sent dirty data to memory, waiting for ACK from memory";
+ MT_I, desc="L2 cache replacing, getting data from exclusive";
+ MCT_I, desc="L2 cache replacing, clean in L2, getting data or ack from exclusive";
+ I_I, desc="L2 replacing clean data, need to inv sharers and then drop data";
+ S_I, desc="L2 replacing dirty data, collecting acks from L1s";
+
+ // Transient States for fetching data from memory
+ ISS, desc="L2 idle, got single L1_GETS, issued memory fetch, have not seen response yet";
+ IS, desc="L2 idle, got L1_GET_INSTR or multiple L1_GETS, issued memory fetch, have not seen response yet";
+ IM, desc="L2 idle, got L1_GETX, issued memory fetch, have not seen response(s) yet";
+
+ // Blocking states
+ SS_MB, desc="Blocked for L1_GETX from SS";
+ MT_MB, desc="Blocked for L1_GETX from MT";
+ M_MB, desc="Blocked for L1_GETX from M";
+
+ MT_IIB, desc="Blocked for L1_GETS from MT, waiting for unblock and data";
+ MT_IB, desc="Blocked for L1_GETS from MT, got unblock, waiting for data";
+ MT_SB, desc="Blocked for L1_GETS from MT, got data, waiting for unblock";
+
+ }
+
+ // EVENTS
+ enumeration(Event, desc="L2 Cache events") {
+ // L2 events
+
+ // events initiated by the local L1s
+ L1_GET_INSTR, desc="a L1I GET INSTR request for a block maped to us";
+ L1_GETS, desc="a L1D GETS request for a block maped to us";
+ L1_GETX, desc="a L1D GETX request for a block maped to us";
+ L1_UPGRADE, desc="a L1D GETX request for a block maped to us";
+
+ L1_PUTX, desc="L1 replacing data";
+ L1_PUTX_old, desc="L1 replacing data, but no longer sharer";
+
+ Fwd_L1_GETX, desc="L1 did not have data, so we supply";
+ Fwd_L1_GETS, desc="L1 did not have data, so we supply";
+ Fwd_L1_GET_INSTR, desc="L1 did not have data, so we supply";
+
+ // events initiated by this L2
+ L2_Replacement, desc="L2 Replacement", format="!r";
+ L2_Replacement_clean, desc="L2 Replacement, but data is clean", format="!r";
+
+ // events from memory controller
+ Mem_Data, desc="data from memory", format="!r";
+ Mem_Ack, desc="ack from memory", format="!r";
+
+ // M->S data writeback
+ WB_Data, desc="data from L1";
+ WB_Data_clean, desc="clean data from L1";
+ Ack, desc="writeback ack";
+ Ack_all, desc="writeback ack";
+
+ Unblock, desc="Unblock from L1 requestor";
+ Unblock_Cancel, desc="Unblock from L1 requestor (FOR XACT MEMORY)";
+ Exclusive_Unblock, desc="Unblock from L1 requestor";
+
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ NetDest Sharers, desc="tracks the L1 shares on-chip";
+ MachineID Exclusive, desc="Exclusive holder of block";
+ DataBlock DataBlk, desc="data for the block";
+ bool Dirty, default="false", desc="data is dirty";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Address Address, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ bool Dirty, default="false", desc="Data is Dirty";
+
+ NetDest L1_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
+ MachineID L1_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
+ bool isPrefetch, desc="Set if this was caused by a prefetch";
+
+ int pendingAcks, desc="number of pending acks for invalidates during writeback";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ void setMRU(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
+
+ CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)';
+
+ // inclusive cache, returns L2 entries only
+ Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
+ return L2cacheMemory[addr];
+ }
+
+ void changeL2Permission(Address addr, AccessPermission permission) {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory.changePermission(addr, permission);
+ }
+ }
+
+ string getCoherenceRequestTypeStr(CoherenceRequestType type) {
+ return CoherenceRequestType_to_string(type);
+ }
+
+ bool isL2CacheTagPresent(Address addr) {
+ return (L2cacheMemory.isTagPresent(addr));
+ }
+
+ bool isOneSharerLeft(Address addr, MachineID requestor) {
+ assert(L2cacheMemory[addr].Sharers.isElement(requestor));
+ return (L2cacheMemory[addr].Sharers.count() == 1);
+ }
+
+ bool isSharer(Address addr, MachineID requestor) {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory[addr].Sharers.isElement(requestor);
+ } else {
+ return false;
+ }
+ }
+
+ void addSharer(Address addr, MachineID requestor) {
+ DEBUG_EXPR(machineID);
+ DEBUG_EXPR(requestor);
+ DEBUG_EXPR(addr);
+ assert(map_L1CacheMachId_to_L2Cache(addr, requestor) == machineID);
+ L2cacheMemory[addr].Sharers.add(requestor);
+ }
+
+ State getState(Address addr) {
+ if(L2_TBEs.isPresent(addr)) {
+ return L2_TBEs[addr].TBEState;
+ } else if (isL2CacheTagPresent(addr)) {
+ return getL2CacheEntry(addr).CacheState;
+ }
+ return State:NP;
+ }
+
+ string getStateStr(Address addr) {
+ return L2Cache_State_to_string(getState(addr));
+ }
+
+ // when is this called
+ void setState(Address addr, State state) {
+
+ // MUST CHANGE
+ if (L2_TBEs.isPresent(addr)) {
+ L2_TBEs[addr].TBEState := state;
+ }
+
+ if (isL2CacheTagPresent(addr)) {
+ getL2CacheEntry(addr).CacheState := state;
+
+ // Set permission
+ if (state == State:SS ) {
+ changeL2Permission(addr, AccessPermission:Read_Only);
+ } else if (state == State:M) {
+ changeL2Permission(addr, AccessPermission:Read_Write);
+ } else if (state == State:MT) {
+ changeL2Permission(addr, AccessPermission:Stale);
+ } else {
+ changeL2Permission(addr, AccessPermission:Busy);
+ }
+ }
+ }
+
+ Event L1Cache_request_type_to_event(CoherenceRequestType type, Address addr, MachineID requestor) {
+ if(type == CoherenceRequestType:GETS) {
+ return Event:L1_GETS;
+ } else if(type == CoherenceRequestType:GET_INSTR) {
+ return Event:L1_GET_INSTR;
+ } else if (type == CoherenceRequestType:GETX) {
+ return Event:L1_GETX;
+ } else if (type == CoherenceRequestType:UPGRADE) {
+ if ( isL2CacheTagPresent(addr) && getL2CacheEntry(addr).Sharers.isElement(requestor) ) {
+ return Event:L1_UPGRADE;
+ } else {
+ return Event:L1_GETX;
+ }
+ } else if (type == CoherenceRequestType:PUTX) {
+ if (isSharer(addr, requestor)) {
+ return Event:L1_PUTX;
+ } else {
+ return Event:L1_PUTX_old;
+ }
+ } else {
+ DEBUG_EXPR(addr);
+ DEBUG_EXPR(type);
+ error("Invalid L1 forwarded request type");
+ }
+ }
+
+ // ** OUT_PORTS **
+
+ out_port(L1RequestIntraChipL2Network_out, RequestMsg, L1RequestFromL2Cache);
+ out_port(DirRequestIntraChipL2Network_out, RequestMsg, DirRequestFromL2Cache);
+ out_port(responseIntraChipL2Network_out, ResponseMsg, responseFromL2Cache);
+
+
+ // Response IntraChip L2 Network - response msg to this particular L2 bank
+ in_port(responseIntraChipL2Network_in, ResponseMsg, responseToL2Cache) {
+ if (responseIntraChipL2Network_in.isReady()) {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ // test wether it's from a local L1 or an off chip source
+ assert(in_msg.Destination.isElement(machineID));
+ if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
+ if(in_msg.Type == CoherenceResponseType:DATA) {
+ if (in_msg.Dirty) {
+ trigger(Event:WB_Data, in_msg.Address);
+ } else {
+ trigger(Event:WB_Data_clean, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:ACK) {
+ if ((L2_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0) {
+ trigger(Event:Ack_all, in_msg.Address);
+ } else {
+ trigger(Event:Ack, in_msg.Address);
+ }
+ } else {
+ error("unknown message type");
+ }
+
+ } else { // external message
+ if(in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
+ trigger(Event:Mem_Data, in_msg.Address); // L2 now has data and all off-chip acks
+ } else if(in_msg.Type == CoherenceResponseType:MEMORY_ACK) {
+ trigger(Event:Mem_Ack, in_msg.Address); // L2 now has data and all off-chip acks
+ } else {
+ error("unknown message type");
+ }
+ }
+ }
+ } // if not ready, do nothing
+ }
+
+ // L1 Request
+ in_port(L1RequestIntraChipL2Network_in, RequestMsg, L1RequestToL2Cache) {
+ if(L1RequestIntraChipL2Network_in.isReady()) {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.Requestor);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(in_msg.Destination);
+ assert(machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache);
+ assert(in_msg.Destination.isElement(machineID));
+ if (L2cacheMemory.isTagPresent(in_msg.Address)) {
+ // The L2 contains the block, so proceeded with handling the request
+ trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.Requestor), in_msg.Address);
+ } else {
+ if (L2cacheMemory.cacheAvail(in_msg.Address)) {
+ // L2 does't have the line, but we have space for it in the L2
+ trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.Requestor), in_msg.Address);
+ } else {
+ // No room in the L2, so we need to make room before handling the request
+ if (L2cacheMemory[ L2cacheMemory.cacheProbe(in_msg.Address) ].Dirty ) {
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ trigger(Event:L2_Replacement_clean, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache) {
+ if(L1unblockNetwork_in.isReady()) {
+ peek(L1unblockNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceResponseType:EXCLUSIVE_UNBLOCK) {
+ trigger(Event:Exclusive_Unblock, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
+ trigger(Event:Unblock, in_msg.Address);
+ } else {
+ error("unknown unblock message");
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+
+ action(a_issueFetchToMemory, "a", desc="fetch data from memory") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+
+ action(b_forwardRequestToExclusive, "b", desc="Forward request to the exclusive L1") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(L2cacheMemory[address].Exclusive);
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+ }
+
+ action(c_exclusiveReplacement, "c", desc="Send data to memory") {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(ct_exclusiveReplacementFromTBE, "ct", desc="Send data to memory") {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.DataBlk := L2_TBEs[address].DataBlk;
+ out_msg.Dirty := L2_TBEs[address].Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+
+ action(d_sendDataToRequestor, "d", desc="Send data from cache to reqeustor") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+
+ out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
+ if (getL2CacheEntry(address).Sharers.isElement(in_msg.Requestor)) {
+ out_msg.AckCount := out_msg.AckCount + 1;
+ }
+ }
+ }
+ }
+
+ action(dd_sendExclusiveDataToRequestor, "dd", desc="Send data from cache to reqeustor") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+
+ out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
+ if (getL2CacheEntry(address).Sharers.isElement(in_msg.Requestor)) {
+ out_msg.AckCount := out_msg.AckCount + 1;
+ }
+ }
+ }
+ }
+
+ action(ds_sendSharedDataToRequestor, "ds", desc="Send data from cache to reqeustor") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.AckCount := 0;
+ }
+ }
+ }
+
+ action(e_sendDataToGetSRequestors, "e", desc="Send data from cache to all GetS IDs") {
+ assert(L2_TBEs[address].L1_GetS_IDs.count() > 0);
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ //out_msg.Sender := machineID;
+ out_msg.Sender := map_Address_to_Directory(address);
+ out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(ex_sendExclusiveDataToGetSRequestors, "ex", desc="Send data from cache to all GetS IDs") {
+ assert(L2_TBEs[address].L1_GetS_IDs.count() == 1);
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ // out_msg.Sender := machineID;
+ out_msg.Sender := map_Address_to_Directory(address);
+ out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+
+ action(ee_sendDataToGetXRequestor, "ee", desc="Send data from cache to GetX ID") {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ // out_msg.Sender := machineID;
+ out_msg.Sender := map_Address_to_Directory(address);
+ out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ DEBUG_EXPR(out_msg.Address);
+ DEBUG_EXPR(out_msg.Destination);
+ DEBUG_EXPR(out_msg.DataBlk);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+
+ action(f_sendInvToSharers, "f", desc="invalidate sharers for L2 replacement") {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := machineID;
+ out_msg.Destination := L2cacheMemory[address].Sharers;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+
+ action(fw_sendFwdInvToSharers, "fw", desc="invalidate sharers for request") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination := L2cacheMemory[address].Sharers;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+ }
+
+
+ action(fwm_sendFwdInvToSharersMinusRequestor, "fwm", desc="invalidate sharers for request, requestor is sharer") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination := L2cacheMemory[address].Sharers;
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+ }
+
+ // OTHER ACTIONS
+ action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
+ check_allocate(L2_TBEs);
+ L2_TBEs.allocate(address);
+ L2_TBEs[address].L1_GetS_IDs.clear();
+ L2_TBEs[address].DataBlk := getL2CacheEntry(address).DataBlk;
+ L2_TBEs[address].Dirty := getL2CacheEntry(address).Dirty;
+ L2_TBEs[address].pendingAcks := getL2CacheEntry(address).Sharers.count();
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
+ L2_TBEs.deallocate(address);
+ }
+
+ action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
+ profileMsgDelay(0, L1RequestIntraChipL2Network_in.dequeue_getDelayCycles());
+ }
+
+ action(k_popUnblockQueue, "k", desc="Pop incoming unblock queue") {
+ profileMsgDelay(0, L1unblockNetwork_in.dequeue_getDelayCycles());
+ }
+
+
+ action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
+ profileMsgDelay(3, responseIntraChipL2Network_in.dequeue_getDelayCycles());
+ }
+
+
+ action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
+ getL2CacheEntry(address).Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(mr_writeDataToCacheFromRequest, "mr", desc="Write data from response queue to cache") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
+ getL2CacheEntry(address).Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(q_updateAck, "q", desc="update pending ack count") {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ L2_TBEs[address].pendingAcks := L2_TBEs[address].pendingAcks - in_msg.AckCount;
+ APPEND_TRANSITION_COMMENT(in_msg.AckCount);
+ APPEND_TRANSITION_COMMENT(" p: ");
+ APPEND_TRANSITION_COMMENT(L2_TBEs[address].pendingAcks);
+ }
+ }
+
+ action(qq_writeDataToTBE, "\qq", desc="Write data from response queue to TBE") {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ L2_TBEs[address].DataBlk := in_msg.DataBlk;
+ L2_TBEs[address].Dirty := in_msg.Dirty;
+ }
+ }
+
+
+ action(z_stall, "z", desc="Stall") {
+ }
+
+
+ action(ss_recordGetSL1ID, "\s", desc="Record L1 GetS for load response") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2_TBEs[address].L1_GetS_IDs.add(in_msg.Requestor);
+ }
+ }
+
+ action(xx_recordGetXL1ID, "\x", desc="Record L1 GetX for store response") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2_TBEs[address].L1_GetX_ID := in_msg.Requestor;
+ }
+ }
+
+ action(set_setMRU, "\set", desc="set the MRU entry") {
+ L2cacheMemory.setMRU(address);
+ }
+
+ action(qq_allocateL2CacheBlock, "\q", desc="Set L2 cache tag equal to tag of block B.") {
+ if (L2cacheMemory.isTagPresent(address) == false) {
+ L2cacheMemory.allocate(address);
+ }
+ }
+
+ action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ L2cacheMemory.deallocate(address);
+ }
+
+ action(t_sendWBAck, "t", desc="Send writeback ACK") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:WB_ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(ts_sendInvAckToUpgrader, "ts", desc="Send ACK to upgrader") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // upgrader doesn't get ack from itself, hence the + 1
+ out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count() + 1;
+ }
+ }
+ }
+
+ action(uu_profileMiss, "\u", desc="Profile the demand miss") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ //profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, L1CacheMachIDToProcessorNum(in_msg.Requestor));
+ }
+ }
+
+ action(ww_profileMissNoDir, "\w", desc="Profile this transition at the L2 because Dir won't see the request") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ // profile_request(in_msg.L1CacheStateStr, getStateStr(address), "NA", getCoherenceRequestTypeStr(in_msg.Type));
+ }
+ }
+
+
+
+ action(nn_addSharer, "\n", desc="Add L1 sharer to list") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ addSharer(address, in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT( getL2CacheEntry(address).Sharers );
+ }
+ }
+
+ action(nnu_addSharerFromUnblock, "\nu", desc="Add L1 sharer to list") {
+ peek(L1unblockNetwork_in, ResponseMsg) {
+ addSharer(address, in_msg.Sender);
+ }
+ }
+
+
+ action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from list") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2cacheMemory[address].Sharers.remove(in_msg.Requestor);
+ }
+ }
+
+ action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2cacheMemory[address].Sharers.clear();
+ }
+ }
+
+ action(mm_markExclusive, "\m", desc="set the exclusive owner") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2cacheMemory[address].Sharers.clear();
+ L2cacheMemory[address].Exclusive := in_msg.Requestor;
+ addSharer(address, in_msg.Requestor);
+ }
+ }
+
+ action(mmu_markExclusiveFromUnblock, "\mu", desc="set the exclusive owner") {
+ peek(L1unblockNetwork_in, ResponseMsg) {
+ L2cacheMemory[address].Sharers.clear();
+ L2cacheMemory[address].Exclusive := in_msg.Sender;
+ addSharer(address, in_msg.Sender);
+ }
+ }
+
+ action(zz_recycleL1RequestQueue, "zz", desc="recycle L1 request queue") {
+ L1RequestIntraChipL2Network_in.recycle();
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+
+ //===============================================
+ // BASE STATE - I
+
+ // Transitions from I (Idle)
+ transition({NP, IS, ISS, IM, SS, M, M_I, MT_I, MCT_I, I_I, S_I, SS_MB, M_MB, MT_IIB, MT_IB, MT_SB}, L1_PUTX) {
+ jj_popL1RequestQueue;
+ }
+
+ transition({NP, SS, M, MT, M_I, MT_I, MCT_I, I_I, S_I, IS, ISS, IM, SS_MB, MT_MB, M_MB, MT_IIB, MT_IB, MT_SB}, L1_PUTX_old) {
+ jj_popL1RequestQueue;
+ }
+
+ transition({IM, IS, ISS, SS_MB, M_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, {L2_Replacement, L2_Replacement_clean}) {
+ zz_recycleL1RequestQueue;
+ }
+
+ transition({SS_MB, M_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE}) {
+ zz_recycleL1RequestQueue;
+ }
+
+
+ transition(NP, L1_GETS, ISS) {
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ nn_addSharer;
+ i_allocateTBE;
+ ss_recordGetSL1ID;
+ a_issueFetchToMemory;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition(NP, L1_GET_INSTR, IS) {
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ nn_addSharer;
+ i_allocateTBE;
+ ss_recordGetSL1ID;
+ a_issueFetchToMemory;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition(NP, L1_GETX, IM) {
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ // nn_addSharer;
+ i_allocateTBE;
+ xx_recordGetXL1ID;
+ a_issueFetchToMemory;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+
+ // transitions from IS/IM
+
+ transition(ISS, Mem_Data, MT_MB) {
+ m_writeDataToCache;
+ ex_sendExclusiveDataToGetSRequestors;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // transition(IS, Mem_Data, SS) {
+ transition(IS, Mem_Data, SS_MB) {
+ m_writeDataToCache;
+ e_sendDataToGetSRequestors;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IM, Mem_Data, MT_MB) {
+ m_writeDataToCache;
+ ee_sendDataToGetXRequestor;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+/*
+ transition({IS, ISS}, {L1_GETS, L1_GET_INSTR}, IS) {
+ nn_addSharer;
+ ss_recordGetSL1ID;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+*/
+
+ transition({IS, ISS}, {L1_GETS, L1_GET_INSTR}) {
+ zz_recycleL1RequestQueue;
+ }
+
+ transition({IS, ISS}, L1_GETX) {
+ zz_recycleL1RequestQueue;
+ }
+
+ transition(IM, {L1_GETX, L1_GETS, L1_GET_INSTR}) {
+ zz_recycleL1RequestQueue;
+ }
+
+ // transitions from SS
+ // transition(SS, {L1_GETS, L1_GET_INSTR}) {
+ transition(SS, {L1_GETS, L1_GET_INSTR}, SS_MB) {
+ ds_sendSharedDataToRequestor;
+ nn_addSharer;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+
+ transition(SS, L1_GETX, SS_MB) {
+ d_sendDataToRequestor;
+ // fw_sendFwdInvToSharers;
+ fwm_sendFwdInvToSharersMinusRequestor;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(SS, L1_UPGRADE, SS_MB) {
+ fwm_sendFwdInvToSharersMinusRequestor;
+ ts_sendInvAckToUpgrader;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(SS, L2_Replacement_clean, I_I) {
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(SS, L2_Replacement, S_I) {
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(M, L1_GETX, MT_MB) {
+ d_sendDataToRequestor;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ // transition(M, L1_GET_INSTR, SS) {
+ transition(M, L1_GET_INSTR, SS_MB) {
+ d_sendDataToRequestor;
+ nn_addSharer;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(M, L1_GETS, MT_MB) {
+ dd_sendExclusiveDataToRequestor;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(M, L2_Replacement, M_I) {
+ i_allocateTBE;
+ c_exclusiveReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(M, L2_Replacement_clean, M_I) {
+ rr_deallocateL2CacheBlock;
+ }
+
+
+ // transitions from MT
+
+ transition(MT, L1_GETX, MT_MB) {
+ b_forwardRequestToExclusive;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+
+ transition(MT, {L1_GETS, L1_GET_INSTR}, MT_IIB) {
+ b_forwardRequestToExclusive;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(MT, L2_Replacement, MT_I) {
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(MT, L2_Replacement_clean, MCT_I) {
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(MT, L1_PUTX, M) {
+ ll_clearSharers;
+ mr_writeDataToCacheFromRequest;
+ t_sendWBAck;
+ jj_popL1RequestQueue;
+ }
+
+
+ // transitions from blocking states
+ transition(SS_MB, Unblock_Cancel, SS) {
+ k_popUnblockQueue;
+ }
+
+ transition(MT_MB, Unblock_Cancel, MT) {
+ k_popUnblockQueue;
+ }
+
+ transition(MT_IB, Unblock_Cancel, MT) {
+ k_popUnblockQueue;
+ }
+
+ transition(SS_MB, Exclusive_Unblock, MT) {
+ // update actual directory
+ mmu_markExclusiveFromUnblock;
+ k_popUnblockQueue;
+ }
+
+ transition(SS_MB, Unblock, SS) {
+ k_popUnblockQueue;
+ }
+
+ transition({M_MB, MT_MB}, Exclusive_Unblock, MT) {
+ // update actual directory
+ mmu_markExclusiveFromUnblock;
+ k_popUnblockQueue;
+ }
+
+ transition(MT_IIB, Unblock, MT_IB) {
+ nnu_addSharerFromUnblock;
+ k_popUnblockQueue;
+ }
+
+ transition(MT_IIB, {WB_Data, WB_Data_clean}, MT_SB) {
+ m_writeDataToCache;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(MT_IB, {WB_Data, WB_Data_clean}, SS) {
+ m_writeDataToCache;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(MT_SB, Unblock, SS) {
+ nnu_addSharerFromUnblock;
+ k_popUnblockQueue;
+ }
+
+ // writeback states
+ transition({I_I, S_I, MT_I, MCT_I, M_I}, {L1_GETX, L1_UPGRADE, L1_GETS, L1_GET_INSTR}) {
+ zz_recycleL1RequestQueue;
+ }
+
+ transition(I_I, Ack) {
+ q_updateAck;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(I_I, Ack_all, NP) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition({MT_I, MCT_I}, WB_Data, M_I) {
+ qq_writeDataToTBE;
+ ct_exclusiveReplacementFromTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(MCT_I, WB_Data_clean, NP) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // L1 never changed Dirty data
+ transition(MT_I, Ack_all, M_I) {
+ ct_exclusiveReplacementFromTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // clean data that L1 exclusive never wrote
+ transition(MCT_I, Ack_all, NP) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // drop this because L1 will send data again
+ // the reason we don't accept is that the request virtual network may be completely backed up
+ // transition(MT_I, L1_PUTX) {
+ // jj_popL1RequestQueue;
+ //}
+
+ // possible race between unblock and immediate replacement
+ transition(MT_MB, L1_PUTX) {
+ zz_recycleL1RequestQueue;
+ }
+
+ transition(MT_I, WB_Data_clean, NP) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(S_I, Ack) {
+ q_updateAck;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(S_I, Ack_all, M_I) {
+ ct_exclusiveReplacementFromTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(M_I, Mem_Ack, NP) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+}
+
diff --git a/src/mem/protocol/MESI_SCMP_bankdirectory-mem.sm b/src/mem/protocol/MESI_SCMP_bankdirectory-mem.sm
new file mode 100644
index 000000000..1fcd234fe
--- /dev/null
+++ b/src/mem/protocol/MESI_SCMP_bankdirectory-mem.sm
@@ -0,0 +1,166 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
+ */
+
+
+machine(Directory, "Token protocol") {
+
+ MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
+ MessageBuffer responseToDir, network="From", virtual_network="3", ordered="false";
+ MessageBuffer responseFromDir, network="To", virtual_network="3", ordered="false";
+
+ // STATES
+ enumeration(State, desc="Directory states", default="Directory_State_I") {
+ // Base states
+ I, desc="Owner";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ Fetch, desc="A GETX arrives";
+ Data, desc="A GETS arrives";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+
+ State getState(Address addr) {
+ return State:I;
+ }
+
+ void setState(Address addr, State state) {
+ }
+
+ // ** OUT_PORTS **
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+
+ // ** IN_PORTS **
+
+ in_port(requestNetwork_in, RequestMsg, requestToDir) {
+ if (requestNetwork_in.isReady()) {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:Fetch, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:Fetch, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(responseNetwork_in, ResponseMsg, responseToDir) {
+ if (responseNetwork_in.isReady()) {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
+ trigger(Event:Data, in_msg.Address);
+ } else {
+ DEBUG_EXPR(in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Actions
+ action(a_sendAck, "a", desc="Send ack to L2") {
+ peek(responseNetwork_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Sender);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(d_sendData, "d", desc="Send data to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
+ requestNetwork_in.dequeue();
+ }
+
+ action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
+ responseNetwork_in.dequeue();
+ }
+
+ action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ // TRANSITIONS
+
+ transition(I, Fetch) {
+ d_sendData;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(I, Data) {
+ m_writeDataToMemory;
+ a_sendAck;
+ k_popIncomingResponseQueue;
+ }
+}
diff --git a/src/mem/protocol/MESI_SCMP_bankdirectory-msg.sm b/src/mem/protocol/MESI_SCMP_bankdirectory-msg.sm
new file mode 100644
index 000000000..c2d02b59d
--- /dev/null
+++ b/src/mem/protocol/MESI_SCMP_bankdirectory-msg.sm
@@ -0,0 +1,112 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MSI_MOSI_CMP_directory-msg.sm 1.5 05/01/19 15:48:37-06:00 mikem@royal16.cs.wisc.edu $
+ *
+ */
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ GETX, desc="Get eXclusive";
+ UPGRADE, desc="UPGRADE to exclusive";
+ GETS, desc="Get Shared";
+ GET_INSTR, desc="Get Instruction";
+ INV, desc="INValidate";
+ PUTX, desc="replacement message";
+}
+
+// CoherenceResponseType
+enumeration(CoherenceResponseType, desc="...") {
+ MEMORY_ACK, desc="Ack from memory controller";
+ DATA, desc="Data";
+ DATA_EXCLUSIVE, desc="Data";
+ MEMORY_DATA, desc="Data";
+ ACK, desc="Generic invalidate ack";
+ WB_ACK, desc="writeback ack";
+ UNBLOCK, desc="unblock";
+ EXCLUSIVE_UNBLOCK, desc="exclusive unblock";
+}
+
+// RequestMsg
+structure(RequestMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ AccessModeType AccessMode, desc="user/supervisor access type";
+ MachineID Requestor , desc="What component request";
+ NetDest Destination, desc="What components receive the request, includes MachineType and num";
+ MessageSizeType MessageSize, desc="size category of the message";
+ DataBlock DataBlk, desc="Data for the cache line (if PUTX)";
+ bool Dirty, default="false", desc="Dirty bit";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+}
+
+// ResponseMsg
+structure(ResponseMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
+ MachineID Sender, desc="What component sent the data";
+ NetDest Destination, desc="Node to whom the data is sent";
+ DataBlock DataBlk, desc="Data for the cache line";
+ bool Dirty, default="false", desc="Dirty bit";
+ int AckCount, default="0", desc="number of acks in this message";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+/*
+GenericRequestType convertToGenericType(CoherenceRequestType type) {
+ if(type == CoherenceRequestType:PUTX) {
+ return GenericRequestType:PUTX;
+ } else if(type == CoherenceRequestType:GETS) {
+ return GenericRequestType:GETS;
+ } else if(type == CoherenceRequestType:GET_INSTR) {
+ return GenericRequestType:GET_INSTR;
+ } else if(type == CoherenceRequestType:GETX) {
+ return GenericRequestType:GETX;
+ } else if(type == CoherenceRequestType:UPGRADE) {
+ return GenericRequestType:UPGRADE;
+ } else if(type == CoherenceRequestType:PUTS) {
+ return GenericRequestType:PUTS;
+ } else if(type == CoherenceRequestType:INV) {
+ return GenericRequestType:INV;
+ } else if(type == CoherenceRequestType:INV_S) {
+ return GenericRequestType:INV_S;
+ } else if(type == CoherenceRequestType:L1_DG) {
+ return GenericRequestType:DOWNGRADE;
+ } else if(type == CoherenceRequestType:WB_ACK) {
+ return GenericRequestType:WB_ACK;
+ } else if(type == CoherenceRequestType:EXE_ACK) {
+ return GenericRequestType:EXE_ACK;
+ } else {
+ DEBUG_EXPR(type);
+ error("invalid CoherenceRequestType");
+ }
+}
+*/
+
diff --git a/src/mem/protocol/MESI_SCMP_bankdirectory.slicc b/src/mem/protocol/MESI_SCMP_bankdirectory.slicc
new file mode 100644
index 000000000..2d07999d7
--- /dev/null
+++ b/src/mem/protocol/MESI_SCMP_bankdirectory.slicc
@@ -0,0 +1,5 @@
+MESI_SCMP_bankdirectory-msg.sm
+MESI_SCMP_bankdirectory-L2cache.sm
+MESI_SCMP_bankdirectory-L1cache.sm
+MESI_SCMP_bankdirectory-mem.sm
+standard_CMP-protocol.sm
diff --git a/src/mem/protocol/MESI_SCMP_bankdirectory_m-mem.sm b/src/mem/protocol/MESI_SCMP_bankdirectory_m-mem.sm
new file mode 100644
index 000000000..37ecb2ffa
--- /dev/null
+++ b/src/mem/protocol/MESI_SCMP_bankdirectory_m-mem.sm
@@ -0,0 +1,250 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
+ */
+
+// This file is copied from Yasuko Watanabe's prefetch / memory protocol
+// Copied here by aep 12/14/07
+
+
+machine(Directory, "MESI_SCMP_bankdirectory protocol") {
+
+ MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
+ MessageBuffer responseToDir, network="From", virtual_network="3", ordered="false";
+ MessageBuffer responseFromDir, network="To", virtual_network="3", ordered="false";
+
+ // STATES
+ enumeration(State, desc="Directory states", default="Directory_State_I") {
+ // Base states
+ I, desc="Owner";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ Fetch, desc="A memory fetch arrives";
+ Data, desc="writeback data arrives";
+ Memory_Data, desc="Fetched data from memory arrives";
+ Memory_Ack, desc="Writeback Ack from memory arrives";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+ // to simulate detailed DRAM
+ external_type(MemoryControl, inport="yes", outport="yes") {
+
+ }
+
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+ MemoryControl memBuffer, constructor_hack="i";
+
+ State getState(Address addr) {
+ return State:I;
+ }
+
+ void setState(Address addr, State state) {
+ }
+
+ bool isGETRequest(CoherenceRequestType type) {
+ return (type == CoherenceRequestType:GETS) ||
+ (type == CoherenceRequestType:GET_INSTR) ||
+ (type == CoherenceRequestType:GETX);
+ }
+
+
+ // ** OUT_PORTS **
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+ out_port(memQueue_out, MemoryMsg, memBuffer);
+
+ // ** IN_PORTS **
+
+ in_port(requestNetwork_in, RequestMsg, requestToDir) {
+ if (requestNetwork_in.isReady()) {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (isGETRequest(in_msg.Type)) {
+ trigger(Event:Fetch, in_msg.Address);
+ } else {
+ DEBUG_EXPR(in_msg);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(responseNetwork_in, ResponseMsg, responseToDir) {
+ if (responseNetwork_in.isReady()) {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
+ trigger(Event:Data, in_msg.Address);
+ } else {
+ DEBUG_EXPR(in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, memBuffer) {
+ if (memQueue_in.isReady()) {
+ peek(memQueue_in, MemoryMsg) {
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:Memory_Data, in_msg.Address);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:Memory_Ack, in_msg.Address);
+ } else {
+ DEBUG_EXPR(in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+
+
+ // Actions
+ action(a_sendAck, "a", desc="Send ack to L2") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(d_sendData, "d", desc="Send data to requestor") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
+ requestNetwork_in.dequeue();
+ }
+
+ action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
+ responseNetwork_in.dequeue();
+ }
+
+ action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
+ memQueue_in.dequeue();
+ }
+
+ action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_READ;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := in_msg.Requestor;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
+ peek(responseNetwork_in, ResponseMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_WB;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := in_msg.Sender;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := in_msg.Prefetch;
+
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ // TRANSITIONS
+
+ transition(I, Fetch) {
+ //d_sendData;
+ qf_queueMemoryFetchRequest;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(I, Data) {
+ m_writeDataToMemory;
+ //a_sendAck;
+ qw_queueMemoryWBRequest;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(I, Memory_Data) {
+ d_sendData;
+ l_popMemQueue;
+ }
+
+ transition(I, Memory_Ack) {
+ a_sendAck;
+ l_popMemQueue;
+ }
+}
diff --git a/src/mem/protocol/MESI_SCMP_bankdirectory_m.slicc b/src/mem/protocol/MESI_SCMP_bankdirectory_m.slicc
new file mode 100644
index 000000000..4752cea5a
--- /dev/null
+++ b/src/mem/protocol/MESI_SCMP_bankdirectory_m.slicc
@@ -0,0 +1,5 @@
+MESI_SCMP_bankdirectory-msg.sm
+MESI_SCMP_bankdirectory-L2cache.sm
+MESI_SCMP_bankdirectory-L1cache.sm
+MESI_SCMP_bankdirectory_m-mem.sm
+standard_CMP-protocol.sm
diff --git a/src/mem/protocol/MI_example-cache.sm b/src/mem/protocol/MI_example-cache.sm
new file mode 100644
index 000000000..6c1cb02b6
--- /dev/null
+++ b/src/mem/protocol/MI_example-cache.sm
@@ -0,0 +1,369 @@
+
+machine(L1Cache, "MI Example") {
+
+ // NETWORK BUFFERS
+ MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="true";
+ MessageBuffer responseFromCache, network="To", virtual_network="1", ordered="true";
+
+ MessageBuffer forwardToCache, network="From", virtual_network="2", ordered="true";
+ MessageBuffer responseToCache, network="From", virtual_network="1", ordered="true";
+
+ // STATES
+ enumeration(State, desc="Cache states") {
+ I, desc="Not Present/Invalid";
+ II, desc="Not Present/Invalid, issued PUT";
+ M, desc="Modified";
+ MI, desc="Modified, issued PUT";
+
+ IS, desc="Issued request for IFETCH/GETX";
+ IM, desc="Issued request for STORE/ATOMIC";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ // From processor
+
+ Load, desc="Load request from processor";
+ Ifetch, desc="Ifetch request from processor";
+ Store, desc="Store request from processor";
+
+ Data, desc="Data from network";
+ Fwd_GETX, desc="Forward from network";
+
+ Replacement, desc="Replace a block";
+ Writeback_Ack, desc="Ack from the directory for a writeback";
+ Writeback_Nack, desc="Nack from the directory for a writeback";
+ }
+
+ // STRUCTURE DEFINITIONS
+
+ MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
+ Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
+ bool Trans, desc="Is this block part of a the current transaction?";
+ bool Logged, desc="Has this block been logged in the current transaction?";
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+
+ // STRUCTURES
+
+ CacheMemory cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS, L1_CACHE_ASSOC, MachineType_L1Cache, int_to_string(i)+"_L1"', abstract_chip_ptr="true";
+
+ TBETable TBEs, template_hack="<L1Cache_TBE>";
+
+
+
+ // FUNCTIONS
+ Event mandatory_request_type_to_event(CacheRequestType type) {
+ if (type == CacheRequestType:LD) {
+ return Event:Load;
+ } else if (type == CacheRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
+ return Event:Store;
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+
+
+ State getState(Address addr) {
+
+ if (TBEs.isPresent(addr)) {
+ return TBEs[addr].TBEState;
+ }
+ else if (cacheMemory.isTagPresent(addr)) {
+ return cacheMemory[addr].CacheState;
+ }
+ else {
+ return State:I;
+ }
+ }
+
+ void setState(Address addr, State state) {
+
+ if (TBEs.isPresent(addr)) {
+ TBEs[addr].TBEState := state;
+ }
+
+ if (cacheMemory.isTagPresent(addr)) {
+ cacheMemory[addr].CacheState := state;
+ }
+ }
+
+
+ // NETWORK PORTS
+
+ out_port(requestNetwork_out, RequestMsg, requestFromCache);
+ out_port(responseNetwork_out, ResponseMsg, responseFromCache);
+
+ in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
+ if (forwardRequestNetwork_in.isReady()) {
+ peek(forwardRequestNetwork_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:Fwd_GETX, in_msg.Address);
+ }
+ else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
+ trigger(Event:Writeback_Ack, in_msg.Address);
+ }
+ else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
+ trigger(Event:Writeback_Nack, in_msg.Address);
+ }
+ else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ in_port(responseNetwork_in, ResponseMsg, responseToCache) {
+ if (responseNetwork_in.isReady()) {
+ peek(responseNetwork_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:DATA) {
+ trigger(Event:Data, in_msg.Address);
+ }
+ else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady()) {
+ peek(mandatoryQueue_in, CacheMsg) {
+
+
+ if (cacheMemory.isTagPresent(in_msg.Address) == false &&
+ cacheMemory.cacheAvail(in_msg.Address) == false ) {
+ // make room for the block
+ trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
+ }
+ else {
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+
+ action(a_issueRequest, "a", desc="Issue a request") {
+ enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(b_issuePUT, "b", desc="Issue a PUT request") {
+ enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+
+
+ action(e_sendData, "e", desc="Send data from cache to requestor") {
+ peek(forwardRequestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
+ peek(forwardRequestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+
+ action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
+ if (cacheMemory.isTagPresent(address) == false) {
+ cacheMemory.allocate(address);
+ }
+ }
+
+ action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
+ if (cacheMemory.isTagPresent(address) == true) {
+ cacheMemory.deallocate(address);
+ }
+ }
+
+ action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
+ mandatoryQueue_in.dequeue();
+ }
+
+ action(n_popResponseQueue, "n", desc="Pop the response queue") {
+ responseNetwork_in.dequeue();
+ }
+
+ action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
+ forwardRequestNetwork_in.dequeue();
+ }
+
+ action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
+ DEBUG_EXPR(cacheMemory[address].DataBlk);
+ sequencer.readCallback(address, cacheMemory[address].DataBlk);
+ }
+
+ action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
+ DEBUG_EXPR(cacheMemory[address].DataBlk);
+ sequencer.writeCallback(address, cacheMemory[address].DataBlk);
+ }
+
+
+ action(u_writeDataToCache, "u", desc="Write data to the cache") {
+ peek(responseNetwork_in, ResponseMsg) {
+ cacheMemory[address].DataBlk := in_msg.DataBlk;
+ }
+ }
+
+
+ action(v_allocateTBE, "v", desc="Allocate TBE") {
+ TBEs.allocate(address);
+ }
+
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ }
+
+ action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
+ TBEs[address].DataBlk := cacheMemory[address].DataBlk;
+ }
+
+ action(z_stall, "z", desc="stall") {
+ // do nothing
+ }
+
+ // TRANSITIONS
+
+ transition({IS, IM, MI, II}, {Load, Ifetch, Store, Replacement}) {
+ z_stall;
+ }
+
+ transition({IS, IM}, Fwd_GETX) {
+ z_stall;
+ }
+
+ transition(M, Store) {
+ s_store_hit;
+ m_popMandatoryQueue;
+ }
+
+ transition(M, {Load, Ifetch}) {
+ r_load_hit;
+ m_popMandatoryQueue;
+ }
+
+
+ transition(I, Store, IM) {
+ v_allocateTBE;
+ i_allocateL1CacheBlock;
+ a_issueRequest;
+ m_popMandatoryQueue;
+ }
+
+ transition(I, {Load, Ifetch}, IS) {
+ v_allocateTBE;
+ i_allocateL1CacheBlock;
+ a_issueRequest;
+ m_popMandatoryQueue;
+ }
+
+ transition(IS, Data, M) {
+ u_writeDataToCache;
+ r_load_hit;
+ w_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(IM, Data, M) {
+ u_writeDataToCache;
+ s_store_hit;
+ w_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(M, Fwd_GETX, I) {
+ e_sendData;
+ o_popForwardedRequestQueue;
+ }
+
+ transition(I, Replacement) {
+ h_deallocateL1CacheBlock;
+ }
+
+ transition(M, Replacement, MI) {
+ v_allocateTBE;
+ b_issuePUT;
+ x_copyDataFromCacheToTBE;
+ h_deallocateL1CacheBlock;
+ }
+
+ transition(MI, Writeback_Ack, I) {
+ w_deallocateTBE;
+ o_popForwardedRequestQueue;
+ }
+
+ transition(MI, Fwd_GETX, II) {
+ ee_sendDataFromTBE;
+ o_popForwardedRequestQueue;
+ }
+
+ transition(II, Writeback_Nack, I) {
+ w_deallocateTBE;
+ o_popForwardedRequestQueue;
+ }
+}
+
diff --git a/src/mem/protocol/MI_example-dir.sm b/src/mem/protocol/MI_example-dir.sm
new file mode 100644
index 000000000..311f8488b
--- /dev/null
+++ b/src/mem/protocol/MI_example-dir.sm
@@ -0,0 +1,257 @@
+
+machine(Directory, "Directory protocol") {
+
+ MessageBuffer forwardFromDir, network="To", virtual_network="2", ordered="true";
+ MessageBuffer responseFromDir, network="To", virtual_network="1", ordered="false";
+
+ MessageBuffer requestToDir, network="From", virtual_network="0", ordered="true";
+ MessageBuffer unblockToDir, network="From", virtual_network="3", ordered="true";
+
+ // STATES
+ enumeration(State, desc="Directory states", default="Directory_State_I") {
+ // Base states
+ I, desc="Invalid";
+ M, desc="Modified";
+
+ MI, desc="Blocked on a writeback";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ GETX, desc="A GETX arrives";
+ GETS, desc="A GETS arrives";
+ PUTX, desc="A PUTX arrives";
+ PUTX_NotOwner, desc="A PUTX arrives";
+ PUTO, desc="A PUTO arrives";
+ Unblock, desc="An unblock message arrives";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ State DirectoryState, desc="Directory state";
+ DataBlock DataBlk, desc="data for the block";
+ NetDest Sharers, desc="Sharers for this block";
+ NetDest Owner, desc="Owner of this block";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+
+ State getState(Address addr) {
+ return directory[addr].DirectoryState;
+ }
+
+ void setState(Address addr, State state) {
+ if (directory.isPresent(addr)) {
+
+ if (state == State:I) {
+ assert(directory[addr].Owner.count() == 0);
+ assert(directory[addr].Sharers.count() == 0);
+ }
+
+ if (state == State:M) {
+ assert(directory[addr].Owner.count() == 1);
+ assert(directory[addr].Sharers.count() == 0);
+ }
+
+ directory[addr].DirectoryState := state;
+ }
+ }
+
+ // ** OUT_PORTS **
+ out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+ out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
+
+ // ** IN_PORTS **
+
+
+ in_port(requestQueue_in, RequestMsg, requestToDir) {
+ if (requestQueue_in.isReady()) {
+ peek(requestQueue_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ if (directory[in_msg.Address].Owner.isElement(in_msg.Requestor)) {
+ trigger(Event:PUTX, in_msg.Address);
+ } else {
+ trigger(Event:PUTX_NotOwner, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:PUTO) {
+ trigger(Event:PUTO, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
+ if (unblockNetwork_in.isReady()) {
+ peek(unblockNetwork_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
+ trigger(Event:Unblock, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+
+
+ // Actions
+
+ action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:WB_ACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:WB_NACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(c_clearOwner, "c", desc="Clear the owner field") {
+ directory[address].Owner.clear();
+ }
+
+ action(d_sendData, "d", desc="Send data to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+
+ if (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0) {
+ // out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE_CLEAN;
+ out_msg.Type := CoherenceResponseType:DATA;
+ } else {
+ out_msg.Type := CoherenceResponseType:DATA;
+ }
+
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Dirty := false; // By definition, the block is now clean
+ out_msg.Acks := directory[address].Sharers.count();
+ if (directory[address].Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ directory[address].Owner.clear();
+ directory[address].Owner.add(in_msg.Requestor);
+ }
+ }
+
+ action(f_forwardRequest, "f", desc="Forward request to owner") {
+ peek(requestQueue_in, RequestMsg) {
+ APPEND_TRANSITION_COMMENT("Own: ");
+ APPEND_TRANSITION_COMMENT(directory[in_msg.Address].Owner);
+ APPEND_TRANSITION_COMMENT("Req: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination := directory[in_msg.Address].Owner;
+ out_msg.Acks := directory[address].Sharers.count();
+ if (directory[address].Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ }
+ }
+ }
+
+
+ action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
+ requestQueue_in.dequeue();
+ }
+
+ action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
+ unblockNetwork_in.dequeue();
+ }
+
+ action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
+ // peek(unblockNetwork_in, ResponseMsg) {
+ peek(requestQueue_in, RequestMsg) {
+ // assert(in_msg.Dirty);
+ // assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+
+
+ // TRANSITIONS
+
+ transition(I, GETX, M) {
+ d_sendData;
+ e_ownerIsRequestor;
+ i_popIncomingRequestQueue;
+ }
+
+
+
+ transition(M, GETX, M) {
+ f_forwardRequest;
+ e_ownerIsRequestor;
+ i_popIncomingRequestQueue;
+ }
+
+ // transition(M, PUTX, MI) {
+ transition(M, PUTX, I) {
+ c_clearOwner;
+ l_writeDataToMemory;
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, PUTX_NotOwner, M) {
+ b_sendWriteBackNack;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(I, PUTX_NotOwner, I) {
+ b_sendWriteBackNack;
+ i_popIncomingRequestQueue;
+ }
+
+
+ transition(MI, Unblock, M) {
+ j_popIncomingUnblockQueue;
+ }
+
+}
diff --git a/src/mem/protocol/MI_example-msg.sm b/src/mem/protocol/MI_example-msg.sm
new file mode 100644
index 000000000..f577d60df
--- /dev/null
+++ b/src/mem/protocol/MI_example-msg.sm
@@ -0,0 +1,92 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MOESI_SMP_directory-msg.sm 1.8 05/01/19 15:48:36-06:00 mikem@royal16.cs.wisc.edu $
+ *
+ */
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ GETX, desc="Get eXclusive";
+ GETS, desc="Get Shared";
+ PUTX, desc="Put eXclusive";
+ PUTO, desc="Put Owned";
+ WB_ACK, desc="Writeback ack";
+ WB_NACK, desc="Writeback neg. ack";
+ INV, desc="Invalidation";
+ FWD, desc="Generic FWD";
+}
+
+// CoherenceResponseType
+enumeration(CoherenceResponseType, desc="...") {
+ ACK, desc="ACKnowledgment, responder doesn't have a copy";
+ DATA, desc="Data";
+ DATA_EXCLUSIVE_CLEAN, desc="Data, no other processor has a copy, data is clean";
+ DATA_EXCLUSIVE_DIRTY, desc="Data, no other processor has a copy, data is dirty";
+ UNBLOCK, desc="Unblock";
+ UNBLOCK_EXCLUSIVE, desc="Unblock, we're in E/M";
+ WRITEBACK_CLEAN, desc="Clean writeback (no data)";
+ WRITEBACK_DIRTY, desc="Dirty writeback (contains data)";
+ WRITEBACK, desc="Generic writeback (contains data)";
+}
+
+// TriggerType
+enumeration(TriggerType, desc="...") {
+ ALL_ACKS, desc="See corresponding event";
+}
+
+// TriggerMsg
+structure(TriggerMsg, desc="...", interface="Message") {
+ Address Address, desc="Physical address for this request";
+ TriggerType Type, desc="Type of trigger";
+}
+
+// RequestMsg (and also forwarded requests)
+structure(RequestMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest Destination, desc="Multicast destination mask";
+ int Acks, desc="How many acks to expect";
+ DataBlock DataBlk, desc="data for the cache line";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+// ResponseMsg (and also unblock requests)
+structure(ResponseMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
+ MachineID Sender, desc="Node who sent the data";
+ NetDest Destination, desc="Node to whom the data is sent";
+ DataBlock DataBlk, desc="data for the cache line";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int Acks, desc="How many acks to expect";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
diff --git a/src/mem/protocol/MI_example.slicc b/src/mem/protocol/MI_example.slicc
new file mode 100644
index 000000000..cb1f80135
--- /dev/null
+++ b/src/mem/protocol/MI_example.slicc
@@ -0,0 +1,4 @@
+MI_example-msg.sm
+MI_example-cache.sm
+MI_example-dir.sm
+standard_1level_SMP-protocol.sm
diff --git a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
new file mode 100644
index 000000000..a65ade10f
--- /dev/null
+++ b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
@@ -0,0 +1,1153 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+machine(L1Cache, "Directory protocol") {
+
+ // NODE L1 CACHE
+ // From this node's L1 cache TO the network
+ // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+ MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false";
+ MessageBuffer foo, network="To", virtual_network="1", ordered="false";
+ // a local L1 -> this L2 bank
+ MessageBuffer responseFromL1Cache, network="To", virtual_network="2", ordered="false";
+// MessageBuffer writebackFromL1Cache, network="To", virtual_network="3", ordered="false";
+
+
+ // To this node's L1 cache FROM the network
+ // a L2 bank -> this L1
+ MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false";
+ MessageBuffer goo, network="From", virtual_network="1", ordered="false";
+ // a L2 bank -> this L1
+ MessageBuffer responseToL1Cache, network="From", virtual_network="2", ordered="false";
+
+
+
+ // STATES
+ enumeration(State, desc="Cache states", default="L1Cache_State_I") {
+ // Base states
+ I, desc="Idle";
+ S, desc="Shared";
+ O, desc="Owned";
+ M, desc="Modified (dirty)";
+ M_W, desc="Modified (dirty)";
+ MM, desc="Modified (dirty and locally modified)";
+ MM_W, desc="Modified (dirty and locally modified)";
+
+ // Transient States
+ IM, "IM", desc="Issued GetX";
+ SM, "SM", desc="Issued GetX, we still have an old copy of the line";
+ OM, "SM", desc="Issued GetX, received data";
+ IS, "IS", desc="Issued GetS";
+ SI, "OI", desc="Issued PutS, waiting for ack";
+ OI, "OI", desc="Issued PutO, waiting for ack";
+ MI, "MI", desc="Issued PutX, waiting for ack";
+ II, "II", desc="Issued PutX/O, saw Fwd_GETS or Fwd_GETX, waiting for ack";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ Load, desc="Load request from the processor";
+ Ifetch, desc="I-fetch request from the processor";
+ Store, desc="Store request from the processor";
+ L1_Replacement, desc="Replacement";
+
+ // Requests
+ Own_GETX, desc="We observe our own GetX forwarded back to us";
+ Fwd_GETX, desc="A GetX from another processor";
+ Fwd_GETS, desc="A GetS from another processor";
+ Inv, desc="Invalidations from the directory";
+
+ // Responses
+ Ack, desc="Received an ack message";
+ Data, desc="Received a data message, responder has a shared copy";
+ Exclusive_Data, desc="Received a data message";
+
+ Writeback_Ack, desc="Writeback O.K. from directory";
+ Writeback_Ack_Data, desc="Writeback O.K. from directory";
+ Writeback_Nack, desc="Writeback not O.K. from directory";
+
+ // Triggers
+ All_acks, desc="Received all required data and message acks";
+
+ // Timeouts
+ Use_Timeout, desc="lockout period ended";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Address Address, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int NumPendingMsgs, default="0", desc="Number of acks/data messages that this processor is waiting for";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+
+ MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
+ Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+
+ TBETable TBEs, template_hack="<L1Cache_TBE>";
+ CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
+ CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
+ TimerTable useTimerTable;
+
+ Entry getCacheEntry(Address addr), return_by_ref="yes" {
+ if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory[addr];
+ } else {
+ return L1IcacheMemory[addr];
+ }
+ }
+
+ void changePermission(Address addr, AccessPermission permission) {
+ if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory.changePermission(addr, permission);
+ } else {
+ return L1IcacheMemory.changePermission(addr, permission);
+ }
+ }
+
+ bool isCacheTagPresent(Address addr) {
+ return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
+ }
+
+ State getState(Address addr) {
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+
+ if(TBEs.isPresent(addr)) {
+ return TBEs[addr].TBEState;
+ } else if (isCacheTagPresent(addr)) {
+ return getCacheEntry(addr).CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(Address addr, State state) {
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+
+ if (TBEs.isPresent(addr)) {
+ TBEs[addr].TBEState := state;
+ }
+
+ if (isCacheTagPresent(addr)) {
+ if ( ((getCacheEntry(addr).CacheState != State:M) && (state == State:M)) ||
+ ((getCacheEntry(addr).CacheState != State:MM) && (state == State:MM)) ||
+ ((getCacheEntry(addr).CacheState != State:S) && (state == State:S)) ||
+ ((getCacheEntry(addr).CacheState != State:O) && (state == State:O)) ) {
+
+ getCacheEntry(addr).CacheState := state;
+ sequencer.checkCoherence(addr);
+ }
+ else {
+ getCacheEntry(addr).CacheState := state;
+ }
+
+ // Set permission
+ if (state == State:MM || state == State:MM_W) {
+ changePermission(addr, AccessPermission:Read_Write);
+ } else if ((state == State:S) ||
+ (state == State:O) ||
+ (state == State:M) ||
+ (state == State:M_W) ||
+ (state == State:SM) ||
+ (state == State:OM)) {
+ changePermission(addr, AccessPermission:Read_Only);
+ } else {
+ changePermission(addr, AccessPermission:Invalid);
+ }
+ }
+ }
+
+ bool isBlockExclusive(Address addr) {
+
+ if (isCacheTagPresent(addr)) {
+ if ( (getCacheEntry(addr).CacheState == State:M) || (getCacheEntry(addr).CacheState == State:MM)
+ || (getCacheEntry(addr).CacheState == State:MI) || (getCacheEntry(addr).CacheState == State:MM_W)
+ ) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ bool isBlockShared(Address addr) {
+ if (isCacheTagPresent(addr)) {
+ if ( (getCacheEntry(addr).CacheState == State:S) || (getCacheEntry(addr).CacheState == State:O)
+ || (getCacheEntry(addr).CacheState == State:SM)
+ || (getCacheEntry(addr).CacheState == State:OI)
+ || (getCacheEntry(addr).CacheState == State:SI)
+ || (getCacheEntry(addr).CacheState == State:OM)
+ ) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+
+ Event mandatory_request_type_to_event(CacheRequestType type) {
+ if (type == CacheRequestType:LD) {
+ return Event:Load;
+ } else if (type == CacheRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
+ return Event:Store;
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+
+ MessageBuffer triggerQueue, ordered="true";
+
+ // ** OUT_PORTS **
+
+ out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
+ out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+ out_port(foo_out, ResponseMsg, foo);
+
+ // ** IN_PORTS **
+
+ // Use Timer
+ in_port(useTimerTable_in, Address, useTimerTable) {
+ if (useTimerTable_in.isReady()) {
+ trigger(Event:Use_Timeout, useTimerTable.readyAddress());
+ }
+ }
+
+
+ in_port(goo_in, RequestMsg, goo) {
+ if (goo_in.isReady()) {
+ peek(goo_in, RequestMsg) {
+ assert(false);
+ }
+ }
+ }
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
+ if (triggerQueue_in.isReady()) {
+ peek(triggerQueue_in, TriggerMsg) {
+ if (in_msg.Type == TriggerType:ALL_ACKS) {
+ trigger(Event:All_acks, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Nothing from the request network
+
+ // Request Network
+ in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
+ if (requestNetwork_in.isReady()) {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ DEBUG_EXPR("MRM_DEBUG: L1 received");
+ DEBUG_EXPR(in_msg.Type);
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
+ trigger(Event:Own_GETX, in_msg.Address);
+ } else {
+ trigger(Event:Fwd_GETX, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:Fwd_GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
+ trigger(Event:Writeback_Ack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
+ trigger(Event:Writeback_Ack_Data, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
+ trigger(Event:Writeback_Nack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:INV) {
+ trigger(Event:Inv, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Response Network
+ in_port(responseToL1Cache_in, ResponseMsg, responseToL1Cache) {
+ if (responseToL1Cache_in.isReady()) {
+ peek(responseToL1Cache_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Ack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA) {
+ trigger(Event:Data, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Exclusive_Data, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Nothing from the unblock network
+ // Mandatory Queue betweens Node's CPU and it's L1 caches
+ in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady()) {
+ peek(mandatoryQueue_in, CacheMsg) {
+
+ // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
+
+ if (in_msg.Type == CacheRequestType:IFETCH) {
+ // ** INSTRUCTION ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_Replacement, in_msg.Address);
+ }
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 asks the L2 for it.
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ // No room in the L1, so we need to make room in the L1
+ trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+ } else {
+ // *** DATA ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_Replacement, in_msg.Address);
+ }
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 ask the L2 for it
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ // No room in the L1, so we need to make room in the L1
+ trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ // ACTIONS
+
+ action(a_issueGETS, "a", desc="Issue GETS") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.AccessMode := in_msg.AccessMode;
+ out_msg.Prefetch := in_msg.Prefetch;
+ }
+ }
+ }
+
+ action(b_issueGETX, "b", desc="Issue GETX") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.AccessMode := in_msg.AccessMode;
+ out_msg.Prefetch := in_msg.Prefetch;
+ }
+ }
+ }
+
+ action(d_issuePUTX, "d", desc="Issue PUTX") {
+ // enqueue(writebackNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(dd_issuePUTO, "\d", desc="Issue PUTO") {
+ // enqueue(writebackNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTO;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(dd_issuePUTS, "\ds", desc="Issue PUTS") {
+ // enqueue(writebackNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(e_sendData, "e", desc="Send data from cache to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ if (in_msg.RequestorMachine == MachineType:L2Cache) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(in_msg.Address, machineID));
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ // out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.Dirty := false;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ DEBUG_EXPR("Sending data to L2");
+ DEBUG_EXPR(in_msg.Address);
+ }
+ else {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ // out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.Dirty := false;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
+ }
+ DEBUG_EXPR("Sending data to L1");
+ }
+ }
+ }
+
+ action(e_sendDataToL2, "ee", desc="Send data from cache to requestor") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.Acks := 0; // irrelevant
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+
+ action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
+ peek(requestNetwork_in, RequestMsg) {
+ if (in_msg.RequestorMachine == MachineType:L2Cache) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(in_msg.Address, machineID));
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ DEBUG_EXPR("Sending exclusive data to L2");
+ }
+ else {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
+ }
+ DEBUG_EXPR("Sending exclusive data to L1");
+ }
+ }
+ }
+
+ action(f_sendAck, "f", desc="Send ack from cache to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ if (in_msg.RequestorMachine == MachineType:L1Cache) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Acks := 0 - 1; // -1
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ else {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(in_msg.Address, machineID));
+ out_msg.Acks := 0 - 1; // -1
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+ }
+
+ action(g_sendUnblock, "g", desc="Send unblock to memory") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ }
+ }
+
+ action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ }
+ }
+
+ action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ sequencer.readCallback(address, getCacheEntry(address).DataBlk);
+ }
+
+ action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ sequencer.writeCallback(address, getCacheEntry(address).DataBlk);
+ getCacheEntry(address).Dirty := true;
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE") {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ TBEs[address].DataBlk := getCacheEntry(address).DataBlk; // Data only used for writebacks
+ TBEs[address].Dirty := getCacheEntry(address).Dirty;
+ }
+
+ action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
+ triggerQueue_in.dequeue();
+ }
+
+ action(jj_unsetUseTimer, "\jj", desc="Unset use timer.") {
+ useTimerTable.unset(address);
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue();
+ }
+
+ action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
+ requestNetwork_in.dequeue();
+ }
+
+ action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
+ peek(responseToL1Cache_in, ResponseMsg) {
+ DEBUG_EXPR("MRM_DEBUG: L1 decrementNumberOfMessages");
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(in_msg.Acks);
+ TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
+ }
+ }
+
+ action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
+ peek(requestNetwork_in, RequestMsg) {
+ TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
+ }
+ }
+
+ action(n_popResponseQueue, "n", desc="Pop response queue") {
+ responseToL1Cache_in.dequeue();
+ }
+
+ action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
+ if (TBEs[address].NumPendingMsgs == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.Address := address;
+ out_msg.Type := TriggerType:ALL_ACKS;
+ }
+ }
+ }
+
+ action(o_scheduleUseTimeout, "oo", desc="Schedule a use timeout.") {
+ useTimerTable.set(address, 50);
+ }
+
+
+ action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
+ peek(requestNetwork_in, RequestMsg) {
+ if (in_msg.RequestorMachine == MachineType:L1Cache) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ // out_msg.Dirty := TBEs[address].Dirty;
+ out_msg.Dirty := false;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
+ }
+ }
+ else {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ // out_msg.Dirty := TBEs[address].Dirty;
+ out_msg.Dirty := false;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+ }
+
+ action(q_sendExclusiveDataFromTBEToCache, "qq", desc="Send data from TBE to cache") {
+ peek(requestNetwork_in, RequestMsg) {
+ if (in_msg.RequestorMachine == MachineType:L1Cache) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.Dirty := TBEs[address].Dirty;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
+ }
+ }
+ else {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.Dirty := TBEs[address].Dirty;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+ }
+
+
+ // L2 will usually request data for a writeback
+ action(qq_sendWBDataFromTBEToL2, "\q", desc="Send data from TBE to L2") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Dirty := TBEs[address].Dirty;
+ if (TBEs[address].Dirty) {
+ out_msg.Type := CoherenceResponseType:WRITEBACK_DIRTY_DATA;
+ } else {
+ out_msg.Type := CoherenceResponseType:WRITEBACK_CLEAN_DATA;
+ }
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ }
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ }
+
+ action(u_writeDataToCache, "u", desc="Write data to cache") {
+ peek(responseToL1Cache_in, ResponseMsg) {
+ getCacheEntry(address).DataBlk := in_msg.DataBlk;
+ getCacheEntry(address).Dirty := in_msg.Dirty;
+
+ if (in_msg.Type == CoherenceResponseType:DATA) {
+ //assert(in_msg.Dirty == false);
+ }
+ }
+
+ }
+
+ action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
+ peek(responseToL1Cache_in, ResponseMsg) {
+ assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
+ getCacheEntry(address).DataBlk := in_msg.DataBlk;
+ getCacheEntry(address).Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(kk_deallocateL1CacheBlock, "\k", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L1DcacheMemory.deallocate(address);
+ } else {
+ L1IcacheMemory.deallocate(address);
+ }
+ }
+
+ action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
+ if (L1DcacheMemory.isTagPresent(address) == false) {
+ L1DcacheMemory.allocate(address);
+ }
+ }
+
+ action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
+ if (L1IcacheMemory.isTagPresent(address) == false) {
+ L1IcacheMemory.allocate(address);
+ }
+ }
+
+
+
+ action(uu_profileMiss, "\u", desc="Profile the demand miss") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ profile_miss(in_msg, id);
+ }
+ }
+
+ action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
+ requestNetwork_in.recycle();
+ }
+
+ action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
+ mandatoryQueue_in.recycle();
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // Transitions for Load/Store/L2_Replacement from transient states
+ transition({IM, SM, OM, IS, OI, SI, MI, II}, {Store, L1_Replacement}) {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({M_W, MM_W}, L1_Replacement) {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({M_W, MM_W}, {Fwd_GETS, Fwd_GETX, Own_GETX, Inv}) {
+ z_recycleRequestQueue;
+ }
+
+ transition({IM, IS, OI, MI, SI, II}, {Load, Ifetch}) {
+ zz_recycleMandatoryQueue;
+ }
+
+ // Transitions from Idle
+ transition(I, Load, IS) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ // uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, Ifetch, IS) {
+ jj_allocateL1ICacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ // uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, Store, IM) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ b_issueGETX;
+ // uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, L1_Replacement) {
+ kk_deallocateL1CacheBlock;
+ }
+
+ transition(I, Inv) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ // Transitions from Shared
+ transition({S, SM}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Store, SM) {
+ i_allocateTBE;
+ b_issueGETX;
+ // uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, L1_Replacement, SI) {
+ i_allocateTBE;
+ dd_issuePUTS;
+ kk_deallocateL1CacheBlock;
+ }
+
+ transition(S, Inv, I) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(S, Fwd_GETS) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ // Transitions from Owned
+ transition({O, OM}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, Store, OM) {
+ i_allocateTBE;
+ b_issueGETX;
+ // uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, L1_Replacement, OI) {
+ i_allocateTBE;
+ dd_issuePUTO;
+ kk_deallocateL1CacheBlock;
+ }
+
+ transition(O, Fwd_GETX, I) {
+ ee_sendDataExclusive;
+ l_popForwardQueue;
+ }
+
+ transition(O, Fwd_GETS) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ // Transitions from MM
+ transition({MM, MM_W}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition({MM, MM_W}, Store) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM, L1_Replacement, MI) {
+ i_allocateTBE;
+ d_issuePUTX;
+ kk_deallocateL1CacheBlock;
+ }
+
+ transition(MM, Fwd_GETX, I) {
+ ee_sendDataExclusive;
+ l_popForwardQueue;
+ }
+
+ transition(MM, Fwd_GETS, I) {
+ ee_sendDataExclusive;
+ l_popForwardQueue;
+ }
+
+ // Transitions from M
+ transition({M, M_W}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Store, MM) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M_W, Store, MM_W) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, L1_Replacement, MI) {
+ i_allocateTBE;
+ d_issuePUTX;
+ kk_deallocateL1CacheBlock;
+ }
+
+ transition(M, Fwd_GETX, I) {
+ // e_sendData;
+ ee_sendDataExclusive;
+ l_popForwardQueue;
+ }
+
+ transition(M, Fwd_GETS, O) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ // Transitions from IM
+
+ transition(IM, Inv) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(IM, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IM, {Exclusive_Data, Data}, OM) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ // Transitions from SM
+ transition(SM, Inv, IM) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(SM, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(SM, {Data, Exclusive_Data}, OM) {
+ // v_writeDataToCacheVerify;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(SM, Fwd_GETS) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ // Transitions from OM
+ transition(OM, Own_GETX) {
+ mm_decrementNumberOfMessages;
+ o_checkForCompletion;
+ l_popForwardQueue;
+ }
+
+
+ // transition(OM, Fwd_GETX, OMF) {
+ transition(OM, Fwd_GETX, IM) {
+ ee_sendDataExclusive;
+ l_popForwardQueue;
+ }
+
+ transition(OM, Fwd_GETS, OM) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ //transition({OM, OMF}, Ack) {
+ transition(OM, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(OM, All_acks, MM_W) {
+ hh_store_hit;
+ gg_sendUnblockExclusive;
+ s_deallocateTBE;
+ o_scheduleUseTimeout;
+ j_popTriggerQueue;
+ }
+
+ transition(MM_W, Use_Timeout, MM) {
+ jj_unsetUseTimer;
+ }
+
+ // Transitions from IS
+
+ transition(IS, Inv) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(IS, Data, S) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ h_load_hit;
+ g_sendUnblock;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Exclusive_Data, M_W) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ h_load_hit;
+ gg_sendUnblockExclusive;
+ o_scheduleUseTimeout;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(M_W, Use_Timeout, M) {
+ jj_unsetUseTimer;
+ }
+
+ // Transitions from OI/MI
+
+ transition(MI, Fwd_GETS, OI) {
+ q_sendDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition(MI, Fwd_GETX, II) {
+ q_sendExclusiveDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition({SI, OI}, Fwd_GETS) {
+ q_sendDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition(OI, Fwd_GETX, II) {
+ q_sendExclusiveDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition({SI, OI, MI}, Writeback_Ack_Data, I) {
+ qq_sendWBDataFromTBEToL2; // always send data
+ s_deallocateTBE;
+ l_popForwardQueue;
+ }
+
+ transition({SI, OI, MI}, Writeback_Ack, I) {
+ g_sendUnblock;
+ s_deallocateTBE;
+ l_popForwardQueue;
+ }
+
+ transition({MI, OI}, Writeback_Nack, OI) {
+ // FIXME: This might cause deadlock by re-using the writeback
+ // channel, we should handle this case differently.
+ dd_issuePUTO;
+ l_popForwardQueue;
+ }
+
+ // Transitions from II
+ transition(II, {Writeback_Ack, Writeback_Ack_Data}, I) {
+ g_sendUnblock;
+ s_deallocateTBE;
+ l_popForwardQueue;
+ }
+
+ // transition({II, SI}, Writeback_Nack, I) {
+ transition(II, Writeback_Nack, I) {
+ s_deallocateTBE;
+ l_popForwardQueue;
+ }
+
+ transition(SI, Writeback_Nack) {
+ dd_issuePUTS;
+ l_popForwardQueue;
+ }
+
+ transition(II, Inv) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(SI, Inv, II) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+}
+
diff --git a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
new file mode 100644
index 000000000..fa01f925c
--- /dev/null
+++ b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
@@ -0,0 +1,2569 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+machine(L2Cache, "Token protocol") {
+
+ // L2 BANK QUEUES
+ // From local bank of L2 cache TO the network
+ MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="0", ordered="false"; // this L2 bank -> a local L1
+ MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="1", ordered="false"; // this L2 bank -> mod-directory
+ MessageBuffer responseFromL2Cache, network="To", virtual_network="2", ordered="false"; // this L2 bank -> a local L1 || mod-directory
+
+ // FROM the network to this local bank of L2 cache
+ MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="false"; // a local L1 -> this L2 bank, Lets try this???
+ MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="1", ordered="false"; // mod-directory -> this L2 bank
+ MessageBuffer responseToL2Cache, network="From", virtual_network="2", ordered="false"; // a local L1 || mod-directory -> this L2 bank
+// MessageBuffer L1WritebackToL2Cache, network="From", virtual_network="3", ordered="false";
+
+ // STATES
+ enumeration(State, desc="L2 Cache states", default="L2Cache_State_I") {
+
+ // Stable states
+ NP, desc="Not Present";
+ I, desc="Invalid";
+ ILS, desc="Idle/NP, but local sharers exist";
+ ILX, desc="Idle/NP, but local exclusive exists";
+ ILO, desc="Idle/NP, but local owner exists";
+ ILOX, desc="Idle/NP, but local owner exists and chip is exclusive";
+ ILOS, desc="Idle/NP, but local owner exists and local sharers as well";
+ ILOSX, desc="Idle/NP, but local owner exists, local sharers exist, chip is exclusive ";
+ S, desc="Shared, no local sharers";
+ O, desc="Owned, no local sharers";
+ OLS, desc="Owned with local sharers";
+ OLSX, desc="Owned with local sharers, chip is exclusive";
+ SLS, desc="Shared with local sharers";
+ M, desc="Modified";
+
+ // Transient States
+
+ IFGX, desc="Blocked, forwarded global GETX to local owner/exclusive. No other on-chip invs needed";
+ IFGS, desc="Blocked, forwarded global GETS to local owner";
+ ISFGS, desc="Blocked, forwarded global GETS to local owner, local sharers exist";
+ // UNUSED
+ IFGXX, desc="Blocked, forwarded global GETX to local owner but may need acks from other sharers";
+ OFGX, desc="Blocked, forwarded global GETX to owner and got data but may need acks";
+
+ OLSF, desc="Blocked, got Fwd_GETX with local sharers, waiting for local inv acks";
+
+ // writebacks
+ ILOW, desc="local WB request, was ILO";
+ ILOXW, desc="local WB request, was ILOX";
+ ILOSW, desc="local WB request, was ILOS";
+ ILOSXW, desc="local WB request, was ILOSX";
+ SLSW, desc="local WB request, was SLS";
+ OLSW, desc="local WB request, was OLS";
+ ILSW, desc="local WB request, was ILS";
+ IW, desc="local WB request from only sharer, was ILS";
+ OW, desc="local WB request from only sharer, was OLS";
+ SW, desc="local WB request from only sharer, was SLS";
+ OXW, desc="local WB request from only sharer, was OLSX";
+ OLSXW, desc="local WB request from sharer, was OLSX";
+ ILXW, desc="local WB request, was ILX";
+
+ IFLS, desc="Blocked, forwarded local GETS to _some_ local sharer";
+ IFLO, desc="Blocked, forwarded local GETS to local owner";
+ IFLOX, desc="Blocked, forwarded local GETS to local owner but chip is exclusive";
+ IFLOXX, desc="Blocked, forwarded local GETX to local owner/exclusive, chip is exclusive";
+ IFLOSX, desc="Blocked, forwarded local GETS to local owner w/ other sharers, chip is exclusive";
+ IFLXO, desc="Blocked, forwarded local GETX to local owner with other sharers, chip is exclusive";
+
+ IGS, desc="Semi-blocked, issued local GETS to directory";
+ IGM, desc="Blocked, issued local GETX to directory. Need global acks and data";
+ IGMLS, desc="Blocked, issued local GETX to directory but may need to INV local sharers";
+ IGMO, desc="Blocked, have data for local GETX but need all acks";
+ IGMIO, desc="Blocked, issued local GETX, local owner with possible local sharer, may need to INV";
+ OGMIO, desc="Blocked, issued local GETX, was owner, may need to INV";
+ IGMIOF, desc="Blocked, issued local GETX, local owner, waiting for global acks, got Fwd_GETX";
+ IGMIOFS, desc="Blocked, issued local GETX, local owner, waiting for global acks, got Fwd_GETS";
+ OGMIOF, desc="Blocked, issued local GETX, was owner, waiting for global acks, got Fwd_GETX";
+
+ II, desc="Blocked, handling invalidations";
+ MM, desc="Blocked, was M satisfying local GETX";
+ SS, desc="Blocked, was S satisfying local GETS";
+ OO, desc="Blocked, was O satisfying local GETS";
+ OLSS, desc="Blocked, satisfying local GETS";
+ OLSXS, desc="Blocked, satisfying local GETS";
+ SLSS, desc="Blocked, satisfying local GETS";
+
+ OI, desc="Blocked, doing writeback, was O";
+ MI, desc="Blocked, doing writeback, was M";
+ MII, desc="Blocked, doing writeback, was M, got Fwd_GETX";
+ OLSI, desc="Blocked, doing writeback, was OLS";
+ ILSI, desc="Blocked, doing writeback, was OLS got Fwd_GETX";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+
+ // Requests
+ L1_GETS, desc="local L1 GETS request";
+ L1_GETX, desc="local L1 GETX request";
+ L1_PUTO, desc="local owner wants to writeback";
+ L1_PUTX, desc="local exclusive wants to writeback";
+ L1_PUTS_only, desc="only local sharer wants to writeback";
+ L1_PUTS, desc="local sharer wants to writeback";
+ Fwd_GETX, desc="A GetX from another processor";
+ Fwd_GETS, desc="A GetS from another processor";
+ Own_GETX, desc="A GetX from this node";
+ Inv, desc="Invalidations from the directory";
+
+ // Responses
+ IntAck, desc="Received an ack message";
+ ExtAck, desc="Received an ack message";
+ All_Acks, desc="Received all ack messages";
+ Data, desc="Received a data message, responder has a shared copy";
+ Data_Exclusive, desc="Received a data message";
+ L1_WBCLEANDATA, desc="Writeback from L1, with data";
+ L1_WBDIRTYDATA, desc="Writeback from L1, with data";
+
+ Writeback_Ack, desc="Writeback O.K. from directory";
+ Writeback_Nack, desc="Writeback not O.K. from directory";
+
+ Unblock, desc="Local L1 is telling L2 dir to unblock";
+ Exclusive_Unblock, desc="Local L1 is telling L2 dir to unblock";
+
+
+ // events initiated by this L2
+ L2_Replacement, desc="L2 Replacement", format="!r";
+
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ NetDest Sharers, desc="Set of the internal processors that want the block in shared state";
+ MachineID Owner, desc="ID of the L1 cache to forward the block to once we get a response";
+ bool OwnerValid, default="false", desc="true if Owner means something";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+
+ structure(DirEntry, desc="...") {
+ NetDest Sharers, desc="Set of the internal processors that want the block in shared state";
+ MachineID Owner, desc="ID of the L1 cache to forward the block to once we get a response";
+ bool OwnerValid, default="false", desc="true if Owner means something";
+ State DirState, desc="directory state";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Address Address, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ Address PC, desc="Program counter of request";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+
+ int NumExtPendingAcks, default="0", desc="Number of global acks/data messages waiting for";
+ int NumIntPendingAcks, default="0", desc="Number of global acks/data messages waiting for";
+ int Fwd_GETX_ExtAcks, default="0", desc="Number of acks that requestor will need";
+ int Local_GETX_IntAcks, default="0", desc="Number of acks that requestor will need";
+
+ NetDest L1_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
+ MachineID L1_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
+ NetDest Fwd_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
+ MachineID Fwd_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ void setMRU(Address);
+ }
+
+ external_type(PerfectCacheMemory) {
+ void allocate(Address);
+ void deallocate(Address);
+ DirEntry lookup(Address);
+ bool isTagPresent(Address);
+ }
+
+
+ TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
+ CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)+"_L2"';
+ PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";
+
+
+ Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory[addr];
+ }
+ }
+
+ void changePermission(Address addr, AccessPermission permission) {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory.changePermission(addr, permission);
+ }
+ }
+
+ bool isCacheTagPresent(Address addr) {
+ return (L2cacheMemory.isTagPresent(addr) );
+ }
+
+ bool isDirTagPresent(Address addr) {
+ return (localDirectory.isTagPresent(addr) );
+ }
+
+ bool isOnlySharer(Address addr, MachineID shar_id) {
+ if (isCacheTagPresent(addr)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ if (L2cacheMemory[addr].Sharers.count() > 1) {
+ return false;
+ }
+ else if (L2cacheMemory[addr].Sharers.count() == 1) {
+ if (L2cacheMemory[addr].Sharers.isElement(shar_id)) {
+ return true;
+ }
+ else {
+ return false; // something happened which should cause this PUTS to be nacked
+ }
+ return true;
+ }
+ else {
+ return false;
+ }
+ }
+ else if (localDirectory.isTagPresent(addr)){
+ if (localDirectory[addr].Sharers.count() > 1) {
+ return false;
+ }
+ else if (localDirectory[addr].Sharers.count() == 1) {
+ if (localDirectory[addr].Sharers.isElement(shar_id)) {
+ return true;
+ }
+ else {
+ return false; // something happened which should cause this PUTS to be nacked
+ }
+ }
+ else {
+ return false;
+ }
+ }
+ else {
+ // shouldn't happen unless L1 issues PUTS before unblock received
+ return false;
+ }
+ }
+
+ void copyCacheStateToDir(Address addr) {
+ assert(localDirectory.isTagPresent(addr) == false);
+ localDirectory.allocate(addr);
+ localDirectory[addr].DirState := L2cacheMemory[addr].CacheState;
+ localDirectory[addr].Sharers := L2cacheMemory[addr].Sharers;
+ localDirectory[addr].Owner := L2cacheMemory[addr].Owner;
+ localDirectory[addr].OwnerValid := L2cacheMemory[addr].OwnerValid;
+
+ }
+
+ void copyDirToCache(Address addr) {
+ L2cacheMemory[addr].Sharers := localDirectory[addr].Sharers;
+ L2cacheMemory[addr].Owner := localDirectory[addr].Owner;
+ L2cacheMemory[addr].OwnerValid := localDirectory[addr].OwnerValid;
+ }
+
+
+ void recordLocalSharerInDir(Address addr, MachineID shar_id) {
+ if (isCacheTagPresent(addr)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ L2cacheMemory[addr].Sharers.add(shar_id);
+ }
+ else {
+ if (localDirectory.isTagPresent(addr) == false) {
+ localDirectory.allocate(addr);
+ localDirectory[addr].Sharers.clear();
+ localDirectory[addr].OwnerValid := false;
+ }
+ localDirectory[addr].Sharers.add(shar_id);
+ }
+ }
+
+ void recordNewLocalExclusiveInDir(Address addr, MachineID exc_id) {
+
+ if (isCacheTagPresent(addr)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ L2cacheMemory[addr].Sharers.clear();
+ L2cacheMemory[addr].OwnerValid := true;
+ L2cacheMemory[addr].Owner := exc_id;
+ }
+ else {
+ if (localDirectory.isTagPresent(addr) == false) {
+ localDirectory.allocate(addr);
+ }
+ localDirectory[addr].Sharers.clear();
+ localDirectory[addr].OwnerValid := true;
+ localDirectory[addr].Owner := exc_id;
+ }
+ }
+
+
+ void removeAllLocalSharersFromDir(Address addr) {
+ if (isCacheTagPresent(addr)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ L2cacheMemory[addr].Sharers.clear();
+ L2cacheMemory[addr].OwnerValid := false;
+ }
+ else {
+ localDirectory[addr].Sharers.clear();
+ localDirectory[addr].OwnerValid := false;
+ }
+ }
+
+ void removeSharerFromDir(Address addr, MachineID sender) {
+ if (isCacheTagPresent(addr)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ L2cacheMemory[addr].Sharers.remove(sender);
+ }
+ else {
+ localDirectory[addr].Sharers.remove(sender);
+ }
+ }
+
+ void removeOwnerFromDir(Address addr, MachineID sender) {
+ if (isCacheTagPresent(addr)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ L2cacheMemory[addr].OwnerValid := false;
+ }
+ else {
+ localDirectory[addr].OwnerValid := false;
+ }
+ }
+
+ bool isLocalSharer(Address addr, MachineID shar_id) {
+ if (isCacheTagPresent(addr)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ return L2cacheMemory[addr].Sharers.isElement(shar_id);
+ }
+ else {
+ return localDirectory[addr].Sharers.isElement(shar_id);
+ }
+
+ }
+
+ NetDest getLocalSharers(Address addr) {
+ if (isCacheTagPresent(addr)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ return L2cacheMemory[addr].Sharers;
+ }
+ else {
+ return localDirectory[addr].Sharers;
+ }
+
+ }
+
+ MachineID getLocalOwner(Address addr) {
+ if (isCacheTagPresent(addr)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ return L2cacheMemory[addr].Owner;
+ }
+ else {
+ return localDirectory[addr].Owner;
+ }
+
+ }
+
+
+ int countLocalSharers(Address addr) {
+ if (isCacheTagPresent(addr)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ return L2cacheMemory[addr].Sharers.count();
+ }
+ else {
+ return localDirectory[addr].Sharers.count();
+ }
+ }
+
+ bool isLocalOwnerValid(Address addr) {
+ if (isCacheTagPresent(addr)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ return L2cacheMemory[addr].OwnerValid;
+ }
+ else {
+ return localDirectory[addr].OwnerValid;
+ }
+ }
+
+ int countLocalSharersExceptRequestor(Address addr, MachineID requestor) {
+ if (isCacheTagPresent(addr)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ if (L2cacheMemory[addr].Sharers.isElement(requestor)) {
+ return ( L2cacheMemory[addr].Sharers.count() - 1 );
+ }
+ else {
+ return L2cacheMemory[addr].Sharers.count();
+ }
+ }
+ else {
+ if (localDirectory[addr].Sharers.isElement(requestor)) {
+ return ( localDirectory[addr].Sharers.count() - 1 );
+ }
+ else {
+ return localDirectory[addr].Sharers.count();
+ }
+ }
+ }
+
+
+
+ State getState(Address addr) {
+
+ if (L2_TBEs.isPresent(addr)) {
+ return L2_TBEs[addr].TBEState;
+ } else if (isCacheTagPresent(addr)) {
+ return getL2CacheEntry(addr).CacheState;
+ } else if (isDirTagPresent(addr)) {
+ return localDirectory[addr].DirState;
+ } else {
+ return State:NP;
+ }
+ }
+
+ string getStateStr(Address addr) {
+ return L2Cache_State_to_string(getState(addr));
+ }
+
+ string getCoherenceRequestTypeStr(CoherenceRequestType type) {
+ return CoherenceRequestType_to_string(type);
+ }
+
+
+ void setState(Address addr, State state) {
+ assert((localDirectory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+
+ if (L2_TBEs.isPresent(addr)) {
+ L2_TBEs[addr].TBEState := state;
+ }
+
+ if (
+ (state == State:M) ||
+ (state == State:O) ||
+ (state == State:S) ||
+ (state == State:OLS) ||
+ (state == State:SLS) ||
+ (state == State:OLSX) ||
+ (state == State:SLS)
+ ) {
+ assert(isCacheTagPresent(addr));
+ }
+ else if (
+ (state == State:ILS) ||
+ (state == State:ILX) ||
+ (state == State:ILO) ||
+ (state == State:ILOX) ||
+ (state == State:ILOS) ||
+ (state == State:ILOSX)
+ ) {
+ // assert(isCacheTagPresent(addr) == false);
+ }
+
+
+
+ if (isCacheTagPresent(addr)) {
+ if ( ((getL2CacheEntry(addr).CacheState != State:M) && (state == State:M)) ||
+ ((getL2CacheEntry(addr).CacheState != State:S) && (state == State:S)) ||
+ ((getL2CacheEntry(addr).CacheState != State:O) && (state == State:O)) ) {
+ getL2CacheEntry(addr).CacheState := state;
+ // disable Coherence Checker for now
+ // sequencer.checkCoherence(addr);
+ }
+ else {
+ getL2CacheEntry(addr).CacheState := state;
+ }
+
+ // Set permission
+ changePermission(addr, AccessPermission:Read_Only);
+ }
+ else if (localDirectory.isTagPresent(addr)) {
+ localDirectory[addr].DirState := state;
+ }
+
+ }
+
+
+ bool isBlockExclusive(Address addr) {
+ if (isCacheTagPresent(addr)) {
+ // the list of exclusive states below is likely incomplete
+ if ( (getL2CacheEntry(addr).CacheState == State:M) ||
+ (getL2CacheEntry(addr).CacheState == State:MI) ) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ bool isBlockShared(Address addr) {
+ if (isCacheTagPresent(addr)) {
+ // the list of shared states below is likely incomplete
+ if ( (getL2CacheEntry(addr).CacheState == State:S) ||
+ (getL2CacheEntry(addr).CacheState == State:O) ||
+ (getL2CacheEntry(addr).CacheState == State:OI) ||
+ (getL2CacheEntry(addr).CacheState == State:OXW) ) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ MessageBuffer triggerQueue, ordered="true";
+
+ out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
+ out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
+ out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
+
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+
+
+
+ // ** IN_PORTS **
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
+ if (triggerQueue_in.isReady()) {
+ peek(triggerQueue_in, TriggerMsg) {
+ if (in_msg.Type == TriggerType:ALL_ACKS) {
+ trigger(Event:All_Acks, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+
+ // Request Network
+ in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
+ if (requestNetwork_in.isReady()) {
+ peek(requestNetwork_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ if (in_msg.Requestor == machineID) {
+ trigger(Event:Own_GETX, in_msg.Address);
+ } else {
+ trigger(Event:Fwd_GETX, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:Fwd_GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:INV) {
+ trigger(Event:Inv, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
+ trigger(Event:Writeback_Ack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
+ trigger(Event:Writeback_Nack, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
+ if (L1requestNetwork_in.isReady()) {
+ peek(L1requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:L1_GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:L1_GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTO) {
+ trigger(Event:L1_PUTO, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ trigger(Event:L1_PUTX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTS) {
+ if (isOnlySharer(in_msg.Address, in_msg.Requestor)) {
+ trigger(Event:L1_PUTS_only, in_msg.Address);
+ }
+ else {
+ trigger(Event:L1_PUTS, in_msg.Address);
+ }
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+
+ // Response Network
+ in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
+ if (responseNetwork_in.isReady()) {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ if (in_msg.SenderMachine == MachineType:L2Cache) {
+ trigger(Event:ExtAck, in_msg.Address);
+ }
+ else {
+ trigger(Event:IntAck, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:DATA) {
+ trigger(Event:Data, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Data_Exclusive, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
+ trigger(Event:Unblock, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
+ trigger(Event:Exclusive_Unblock, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
+ if (L2cacheMemory.isTagPresent(in_msg.Address) == false &&
+ L2cacheMemory.cacheAvail(in_msg.Address) == false) {
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ else {
+ trigger(Event:L1_WBDIRTYDATA, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_DATA) {
+ if (L2cacheMemory.isTagPresent(in_msg.Address) == false &&
+ L2cacheMemory.cacheAvail(in_msg.Address) == false) {
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ else {
+ trigger(Event:L1_WBCLEANDATA, in_msg.Address);
+ }
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+
+ // ACTIONS
+
+ action(a_issueGETS, "a", desc="issue local request globally") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+ }
+
+ action(a_issueGETX, "\a", desc="issue local request globally") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+ }
+
+ action(b_issuePUTX, "b", desc="Issue PUTX") {
+ enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(b_issuePUTO, "\b", desc="Issue PUTO") {
+ enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTO;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ /* PUTO, but local sharers exist */
+ action(b_issuePUTO_ls, "\bb", desc="Issue PUTO") {
+ enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTO_SHARERS;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(c_sendDataFromTBEToL1GETS, "c", desc="Send data from TBE to L1 requestors in TBE") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.addNetDest(L2_TBEs[address].L1_GetS_IDs);
+ out_msg.DataBlk := L2_TBEs[address].DataBlk;
+ // out_msg.Dirty := L2_TBEs[address].Dirty;
+ // shared data should be clean
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(L2_TBEs[address].DataBlk);
+ }
+
+ action(c_sendDataFromTBEToL1GETX, "\c", desc="Send data from TBE to L1 requestors in TBE") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID);
+ out_msg.DataBlk := L2_TBEs[address].DataBlk;
+ out_msg.Dirty := L2_TBEs[address].Dirty;
+ out_msg.Acks := L2_TBEs[address].Local_GETX_IntAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(L2_TBEs[address].DataBlk);
+ }
+
+ action(c_sendExclusiveDataFromTBEToL1GETS, "\cc", desc="Send data from TBE to L1 requestors in TBE") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.addNetDest(L2_TBEs[address].L1_GetS_IDs);
+ out_msg.DataBlk := L2_TBEs[address].DataBlk;
+ out_msg.Dirty := L2_TBEs[address].Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(c_sendDataFromTBEToFwdGETX, "cc", desc="Send data from TBE to external GETX") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(L2_TBEs[address].Fwd_GetX_ID);
+ out_msg.DataBlk := L2_TBEs[address].DataBlk;
+ out_msg.Dirty := L2_TBEs[address].Dirty;
+ out_msg.Acks := L2_TBEs[address].Fwd_GETX_ExtAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(c_sendDataFromTBEToFwdGETS, "ccc", desc="Send data from TBE to external GETX") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.addNetDest(L2_TBEs[address].Fwd_GetS_IDs);
+ out_msg.DataBlk := L2_TBEs[address].DataBlk;
+ // out_msg.Dirty := L2_TBEs[address].Dirty;
+ // shared data should be clean
+ out_msg.Dirty := false;
+ out_msg.Acks := L2_TBEs[address].Fwd_GETX_ExtAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(L2_TBEs[address].DataBlk);
+ }
+
+ action(c_sendExclusiveDataFromTBEToFwdGETS, "\ccc", desc="Send data from TBE to external GETX") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.addNetDest(L2_TBEs[address].Fwd_GetS_IDs);
+ out_msg.DataBlk := L2_TBEs[address].DataBlk;
+ out_msg.Dirty := L2_TBEs[address].Dirty;
+ out_msg.Acks := L2_TBEs[address].Fwd_GETX_ExtAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(L2_TBEs[address].DataBlk);
+ }
+
+ action(d_sendDataToL1GETS, "d", desc="Send data directly to L1 requestor") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ // out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ // shared data should be clean
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
+ }
+ }
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
+ }
+
+ action(d_sendDataToL1GETX, "\d", desc="Send data and a token from TBE to L1 requestor") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
+ out_msg.Acks := L2_TBEs[address].Local_GETX_IntAcks;
+ }
+ }
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
+ }
+
+ action(dd_sendDataToFwdGETX, "dd", desc="send data") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Acks := in_msg.Acks;
+ }
+ }
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
+ }
+
+
+ action(dd_sendDataToFwdGETS, "\dd", desc="send data") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ // out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ // shared data should be clean
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
+ }
+
+ action(dd_sendExclusiveDataToFwdGETS, "\d\d", desc="send data") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(e_sendAck, "e", desc="Send ack with the tokens we've collected thus far.") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+
+ out_msg.Destination.add( L2_TBEs[address].Fwd_GetX_ID);
+ out_msg.Acks := 0 - 1;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(e_sendAckToL1Requestor, "\e", desc="Send ack with the tokens we've collected thus far.") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Acks := 0 - 1;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(e_sendAckToL1RequestorFromTBE, "eee", desc="Send ack with the tokens we've collected thus far.") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID);
+ out_msg.Acks := 0 - 1;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(ee_sendLocalInv, "\ee", desc="Send local invalidates") {
+ L2_TBEs[address].NumIntPendingAcks := countLocalSharers(address);
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(getLocalSharers(address));
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(L2_TBEs[address].NumIntPendingAcks);
+ if (isLocalOwnerValid(address)) {
+ L2_TBEs[address].NumIntPendingAcks := L2_TBEs[address].NumIntPendingAcks + 1;
+ DEBUG_EXPR(getLocalOwner(address));
+ }
+
+ enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Destination.addNetDest(getLocalSharers(address));
+ if (isLocalOwnerValid(address))
+ {
+ out_msg.Destination.add(getLocalOwner(address));
+ }
+ out_msg.MessageSize := MessageSizeType:Invalidate_Control;
+ }
+ }
+
+ action(ee_sendLocalInvSharersOnly, "\eee", desc="Send local invalidates to sharers if they exist") {
+
+ // assert(countLocalSharers(address) > 0);
+ L2_TBEs[address].NumIntPendingAcks := countLocalSharers(address);
+
+ if (countLocalSharers(address) > 0) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Destination.addNetDest(getLocalSharers(address));
+ out_msg.MessageSize := MessageSizeType:Invalidate_Control;
+ }
+ }
+ }
+
+ action(ee_addLocalIntAck, "e\ee", desc="add a local ack to wait for") {
+ L2_TBEs[address].NumIntPendingAcks := L2_TBEs[address].NumIntPendingAcks + 1;
+ }
+
+ action(ee_issueLocalInvExceptL1Requestor, "\eeee", desc="Send local invalidates to sharers if they exist") {
+ peek(L1requestNetwork_in, RequestMsg) {
+
+// assert(countLocalSharers(address) > 0);
+ if (countLocalSharers(address) == 0) {
+ L2_TBEs[address].NumIntPendingAcks := 0;
+ }
+ else {
+
+ if (isLocalSharer(address, in_msg.Requestor)) {
+ L2_TBEs[address].NumIntPendingAcks := countLocalSharers(address) - 1;
+ }
+ else {
+ L2_TBEs[address].NumIntPendingAcks := countLocalSharers(address);
+ }
+
+ enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.addNetDest(getLocalSharers(address));
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Invalidate_Control;
+ }
+ }
+ }
+ }
+
+ action(ee_issueLocalInvExceptL1RequestorInTBE, "\eeeeee", desc="Send local invalidates to sharers if they exist") {
+ if (countLocalSharers(address) == 0) {
+ L2_TBEs[address].NumIntPendingAcks := 0;
+ }
+ else {
+ if (isLocalSharer(address, L2_TBEs[address].L1_GetX_ID)) {
+ L2_TBEs[address].NumIntPendingAcks := countLocalSharers(address) - 1;
+ }
+ else {
+ L2_TBEs[address].NumIntPendingAcks := countLocalSharers(address);
+ }
+ }
+ enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := L2_TBEs[address].L1_GetX_ID;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.addNetDest(getLocalSharers(address));
+ out_msg.Destination.remove(L2_TBEs[address].L1_GetX_ID);
+ out_msg.MessageSize := MessageSizeType:Invalidate_Control;
+ }
+ }
+
+
+ action(f_sendUnblock, "f", desc="Send unblock to global directory") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ }
+ }
+
+
+ action(f_sendExclusiveUnblock, "\f", desc="Send unblock to global directory") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ }
+ }
+
+
+ action(g_recordLocalSharer, "g", desc="Record new local sharer from unblock message") {
+ peek(responseNetwork_in, ResponseMsg) {
+ recordLocalSharerInDir(in_msg.Address, in_msg.Sender);
+ }
+ }
+
+ action(g_recordLocalExclusive, "\g", desc="Record new local exclusive sharer from unblock message") {
+ peek(responseNetwork_in, ResponseMsg) {
+ recordNewLocalExclusiveInDir(address, in_msg.Sender);
+ }
+ }
+
+ action(gg_clearLocalSharers, "gg", desc="Clear local sharers") {
+ removeAllLocalSharersFromDir(address);
+ }
+
+ action(gg_clearSharerFromL1Response, "\gg", desc="Clear sharer from L1 response queue") {
+ peek(responseNetwork_in, ResponseMsg) {
+ removeSharerFromDir(in_msg.Address, in_msg.Sender);
+ }
+ }
+
+ action(gg_clearOwnerFromL1Response, "g\g", desc="Clear sharer from L1 response queue") {
+ peek(responseNetwork_in, ResponseMsg) {
+ removeOwnerFromDir(in_msg.Address, in_msg.Sender);
+ }
+ }
+
+ action(h_countLocalSharersExceptRequestor, "h", desc="counts number of acks needed for L1 GETX") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ L2_TBEs[address].Local_GETX_IntAcks := countLocalSharersExceptRequestor(address, in_msg.Requestor);
+ }
+ }
+
+ action(h_clearIntAcks, "\h", desc="clear IntAcks") {
+ L2_TBEs[address].Local_GETX_IntAcks := 0;
+ }
+
+ action(hh_countLocalSharersExceptL1GETXRequestorInTBE, "hh", desc="counts number of acks needed for L1 GETX") {
+ L2_TBEs[address].Local_GETX_IntAcks := countLocalSharersExceptRequestor(address, L2_TBEs[address].L1_GetX_ID);
+ }
+
+ action(i_copyDataToTBE, "\i", desc="Copy data from response queue to TBE") {
+ peek(responseNetwork_in, ResponseMsg) {
+ L2_TBEs[address].DataBlk := in_msg.DataBlk;
+ L2_TBEs[address].Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
+ check_allocate(L2_TBEs);
+ L2_TBEs.allocate(address);
+ if(isCacheTagPresent(address)) {
+ L2_TBEs[address].DataBlk := getL2CacheEntry(address).DataBlk;
+ L2_TBEs[address].Dirty := getL2CacheEntry(address).Dirty;
+ }
+ L2_TBEs[address].NumIntPendingAcks := 0; // default value
+ L2_TBEs[address].NumExtPendingAcks := 0; // default value
+ L2_TBEs[address].Fwd_GetS_IDs.clear();
+ L2_TBEs[address].L1_GetS_IDs.clear();
+ }
+
+
+
+ action(j_forwardGlobalRequestToLocalOwner, "j", desc="Forward external request to local owner") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ out_msg.Address := in_msg.Address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Destination.add(getLocalOwner(in_msg.Address));
+ out_msg.Type := in_msg.Type;
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ out_msg.Acks := 0 - 1;
+ }
+ }
+ }
+
+
+ action(k_forwardLocalGETSToLocalSharer, "k", desc="Forward local request to local sharer/owner") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ out_msg.Address := in_msg.Address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ // should randomize this so one node doesn't get abused more than others
+ out_msg.Destination.add(localDirectory[in_msg.Address].Sharers.smallestElement(MachineType:L1Cache));
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ }
+ }
+ }
+
+ action(k_forwardLocalGETXToLocalOwner, "\k", desc="Forward local request to local owner") {
+ enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := L2_TBEs[address].L1_GetX_ID;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.add(localDirectory[address].Owner);
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ out_msg.Acks := 1 + L2_TBEs[address].Local_GETX_IntAcks;
+ }
+ }
+
+ // same as previous except that it assumes to TBE is present to get number of acks
+ action(kk_forwardLocalGETXToLocalExclusive, "kk", desc="Forward local request to local owner") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ out_msg.Address := in_msg.Address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.add(getLocalOwner(in_msg.Address));
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ out_msg.Acks := 1;
+ }
+ }
+ }
+
+ action(kk_forwardLocalGETSToLocalOwner, "\kk", desc="Forward local request to local owner") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ out_msg.Address := in_msg.Address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.add(getLocalOwner(in_msg.Address));
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ }
+ }
+ }
+
+
+ action(l_writebackAckNeedData, "l", desc="Send writeback ack to L1 requesting data") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ out_msg.Address := in_msg.Address;
+ // out_msg.Type := CoherenceResponseType:WRITEBACK_SEND_DATA;
+ out_msg.Type := CoherenceRequestType:WB_ACK_DATA;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(l_writebackAckDropData, "\l", desc="Send writeback ack to L1 indicating to drop data") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ out_msg.Address := in_msg.Address;
+ // out_msg.Type := CoherenceResponseType:WRITEBACK_ACK;
+ out_msg.Type := CoherenceRequestType:WB_ACK;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(ll_writebackNack, "\ll", desc="Send writeback nack to L1") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ out_msg.Address := in_msg.Address;
+ out_msg.Type := CoherenceRequestType:WB_NACK;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(m_popRequestQueue, "m", desc="Pop request queue.") {
+ requestNetwork_in.dequeue();
+ }
+
+ action(m_decrementNumberOfMessagesInt, "\m", desc="Decrement the number of messages for which we're waiting") {
+ peek(responseNetwork_in, ResponseMsg) {
+ L2_TBEs[address].NumIntPendingAcks := L2_TBEs[address].NumIntPendingAcks + in_msg.Acks;
+ }
+ }
+
+ action(m_decrementNumberOfMessagesExt, "\mmm", desc="Decrement the number of messages for which we're waiting") {
+ peek(responseNetwork_in, ResponseMsg) {
+ L2_TBEs[address].NumExtPendingAcks := L2_TBEs[address].NumExtPendingAcks - in_msg.Acks;
+ }
+ }
+
+ action(mm_decrementNumberOfMessagesExt, "\mm", desc="Decrement the number of messages for which we're waiting") {
+ peek(requestNetwork_in, RequestMsg) {
+ L2_TBEs[address].NumExtPendingAcks := L2_TBEs[address].NumExtPendingAcks - in_msg.Acks;
+ }
+ }
+
+ action(n_popResponseQueue, "n", desc="Pop response queue") {
+ responseNetwork_in.dequeue();
+ }
+
+ action(n_popTriggerQueue, "\n", desc="Pop trigger queue.") {
+ triggerQueue_in.dequeue();
+ }
+
+ action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
+ L1requestNetwork_in.dequeue();
+ }
+
+
+ action(o_checkForIntCompletion, "\o", desc="Check if we have received all the messages required for completion") {
+ if (L2_TBEs[address].NumIntPendingAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.Address := address;
+ out_msg.Type := TriggerType:ALL_ACKS;
+ }
+ }
+ }
+
+ action(o_checkForExtCompletion, "\oo", desc="Check if we have received all the messages required for completion") {
+ if (L2_TBEs[address].NumExtPendingAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.Address := address;
+ out_msg.Type := TriggerType:ALL_ACKS;
+ }
+ }
+ }
+
+
+ action( qq_sendDataFromTBEToMemory, "qq", desc="Send data from TBE to directory") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.Dirty := L2_TBEs[address].Dirty;
+ if (L2_TBEs[address].Dirty) {
+ out_msg.Type := CoherenceResponseType:WRITEBACK_DIRTY_DATA;
+ out_msg.DataBlk := L2_TBEs[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ } else {
+ out_msg.Type := CoherenceResponseType:WRITEBACK_CLEAN_ACK;
+ // NOTE: in a real system this would not send data. We send
+ // data here only so we can check it at the memory
+ out_msg.DataBlk := L2_TBEs[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action( r_setMRU, "\rrr", desc="manually set the MRU bit for cache line" ) {
+ if(isCacheTagPresent(address)) {
+ L2cacheMemory.setMRU(address);
+ }
+ }
+
+ action( s_recordGetXL1ID, "ss", desc="record local GETX requestor") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ L2_TBEs[address].L1_GetX_ID := in_msg.Requestor;
+ }
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
+ L2_TBEs.deallocate(address);
+ }
+
+ action( s_recordGetSL1ID, "\ss", desc="record local GETS requestor") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ L2_TBEs[address].L1_GetS_IDs.add(in_msg.Requestor);
+ }
+ }
+
+ action(t_recordFwdXID, "t", desc="record global GETX requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ L2_TBEs[address].Fwd_GetX_ID := in_msg.Requestor;
+ L2_TBEs[address].Fwd_GETX_ExtAcks := in_msg.Acks;
+ }
+ }
+
+ action(t_recordFwdSID, "\t", desc="record global GETS requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ L2_TBEs[address].Fwd_GetS_IDs.clear();
+ L2_TBEs[address].Fwd_GetS_IDs.add(in_msg.Requestor);
+ }
+ }
+
+
+ action(u_writeDataToCache, "u", desc="Write data to cache") {
+ peek(responseNetwork_in, ResponseMsg) {
+ getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
+ if ((getL2CacheEntry(address).Dirty == false) && in_msg.Dirty) {
+ getL2CacheEntry(address).Dirty := in_msg.Dirty;
+ }
+ }
+ }
+
+ action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
+ L2cacheMemory.allocate(address);
+ }
+
+ action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ L2cacheMemory.deallocate(address);
+ }
+
+
+ action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(getL2CacheEntry(address).DataBlk == in_msg.DataBlk);
+ }
+ }
+
+ action(uu_profileMiss, "\u", desc="Profile the demand miss") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ // AccessModeType not implemented
+ profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, machineIDToNodeID(in_msg.Requestor));
+ }
+ }
+
+
+
+ action(y_copyCacheStateToDir, "y", desc="Copy cache state to directory state") {
+
+ assert(isCacheTagPresent(address));
+ copyCacheStateToDir(address);
+
+ }
+
+ action(y_copyDirToCacheAndRemove, "/y", desc="Copy dir state to cache and remove") {
+ copyDirToCache(address);
+ localDirectory.deallocate(address);
+ }
+
+
+ action(z_stall, "z", desc="Stall") {
+ }
+
+
+ action(zz_recycleL1RequestQueue, "zz", desc="Send the head of the mandatory queue to the back of the queue.") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ }
+ L1requestNetwork_in.recycle();
+ }
+
+ action(zz_recycleRequestQueue, "\zz", desc="Send the head of the mandatory queue to the back of the queue.") {
+ peek(requestNetwork_in, RequestMsg) {
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ }
+ requestNetwork_in.recycle();
+ }
+
+
+ action(zz_recycleResponseQueue, "\z\z", desc="Send the head of the mandatory queue to the back of the queue.") {
+ peek(responseNetwork_in, ResponseMsg) {
+ APPEND_TRANSITION_COMMENT(in_msg.Sender);
+ }
+ responseNetwork_in.recycle();
+ }
+
+
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ transition({II, IFGX, IFGS, ISFGS, IFGXX, IFLXO, OFGX, ILOW, ILOXW, ILOSW, ILOSXW, SLSW, OLSW, ILSW, IW, OW, SW, OXW, OLSXW, ILXW, IFLS, IFLO, IFLOX, IFLOXX, IFLOSX, OLSXS, IGS, IGM, IGMLS, IGMO, IGMIO, OGMIO, IGMIOF, OGMIOF, MM, SS, OO, OI, MI, MII, OLSI, ILSI, SLSS, OLSS, OLSF, IGMIOFS}, {L1_PUTO, L1_PUTS, L1_PUTS_only, L1_PUTX}) {
+ zz_recycleL1RequestQueue;
+ }
+
+ transition({II, IFGX, IFGS, ISFGS, IFGXX, IFLXO, OFGX, ILOW, ILOXW, ILOSW, ILOSXW, SLSW, OLSW, ILSW, IW, OW, SW, OXW, OLSXW, ILXW, IFLS, IFLO, IFLOX, IFLOXX, IFLOSX, OLSXS, IGS, IGM, IGMLS, IGMO, IGMIO, OGMIO, IGMIOF, OGMIOF, MM, SS, OO, OI, MI, MII, OLSI, ILSI, SLSS, OLSS, OLSF, IGMIOFS}, {L1_GETX, L1_GETS}) {
+ zz_recycleL1RequestQueue;
+ }
+
+ transition({IFGX, IFGS, ISFGS, IFGXX, IFLXO, OFGX, ILOW, ILOXW, ILOSW, ILOSXW, SLSW, OLSW, ILSW, IW, ILXW, OW, SW, OXW, OLSXW, IFLS, IFLO, IFLOX, IFLOXX, IFLOSX,OLSXS, IGS, IGM, IGMLS, IGMO, MM, SS, OO, OI, MI, MII, OLSI, ILSI, SLSS, OLSS, OLSF, IGMIOFS}, L2_Replacement) {
+ zz_recycleResponseQueue;
+ }
+
+ transition({IFGX, IFGS, ISFGS, IFGXX, IFLXO, OFGX, ILOW, ILOXW, ILOSW, ILOSXW, SLSW, OLSW, ILSW, IW, OW, SW, OXW, OLSXW, ILXW, IFLS, IFLO, IFLOX, IFLOXX, IFLOSX,OLSXS, MM, SS, OO, SLSS, OLSS, OLSF, IGMIOFS}, {Fwd_GETX, Fwd_GETS, Inv}) {
+ zz_recycleRequestQueue;
+ }
+
+ // must happened because we forwarded GETX to local exclusive trying to do wb
+ transition({I, M, O, ILS, ILOX, OLS, OLSX, SLS, S}, L1_PUTX) {
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+ transition({M}, {L1_PUTS, L1_PUTO} ) {
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+ transition({ILS, OLSX}, L1_PUTO){
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+// happened if we forwarded GETS to exclusive who tried to do writeback
+// ?? should we just Nack these instead? Could be a bugs here
+ transition(ILO, L1_PUTX, ILOW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+ // this can happen if we forwarded a L1_GETX to exclusiver after it issued a PUTX
+ transition(ILOS, L1_PUTX, ILOSW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILOSX, L1_PUTX, ILOSXW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+ // must happened because we got Inv when L1 attempted PUTS
+ transition(I, L1_PUTS) {
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+ transition(I, L1_PUTO) {
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+ // FORWARDED REQUESTS
+
+ transition({ILO, ILX, ILOX}, Fwd_GETS, IFGS) {
+ i_allocateTBE;
+ t_recordFwdSID;
+ j_forwardGlobalRequestToLocalOwner;
+ m_popRequestQueue;
+ }
+
+ transition({ILOS, ILOSX}, Fwd_GETS, ISFGS) {
+ i_allocateTBE;
+ t_recordFwdSID;
+ j_forwardGlobalRequestToLocalOwner;
+ m_popRequestQueue;
+ }
+
+ transition(IFGS, Data, ILO) {
+ i_copyDataToTBE;
+ c_sendDataFromTBEToFwdGETS;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(ISFGS, Data, ILOS) {
+ i_copyDataToTBE;
+ c_sendDataFromTBEToFwdGETS;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(IFGS, Data_Exclusive, I) {
+ i_copyDataToTBE;
+ c_sendExclusiveDataFromTBEToFwdGETS;
+ gg_clearLocalSharers;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+
+ transition({ILX, ILO, ILOX}, Fwd_GETX, IFGX) {
+ i_allocateTBE;
+ t_recordFwdXID;
+ j_forwardGlobalRequestToLocalOwner;
+ m_popRequestQueue;
+ }
+
+ transition(IFGX, {Data_Exclusive, Data}, I) {
+ i_copyDataToTBE;
+ c_sendDataFromTBEToFwdGETX;
+ gg_clearLocalSharers;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition({ILOSX, ILOS}, Fwd_GETX, IFGXX) {
+ i_allocateTBE;
+ t_recordFwdXID;
+ j_forwardGlobalRequestToLocalOwner;
+ ee_sendLocalInvSharersOnly;
+ ee_addLocalIntAck;
+ m_popRequestQueue;
+ }
+
+
+ transition(IFGXX, IntAck) {
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IFGXX, Data_Exclusive) {
+ i_copyDataToTBE;
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IFGXX, All_Acks, I) {
+ c_sendDataFromTBEToFwdGETX;
+ gg_clearLocalSharers;
+ s_deallocateTBE;
+ n_popTriggerQueue;
+ }
+
+
+ // transition({O, OX}, Fwd_GETX, I) {
+ transition(O, Fwd_GETX, I) {
+ dd_sendDataToFwdGETX;
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ m_popRequestQueue;
+ }
+
+ transition({O, OLS}, Fwd_GETS) {
+ dd_sendDataToFwdGETS;
+ m_popRequestQueue;
+ }
+
+ // transition({OLSX, OX}, Fwd_GETS, O) {
+ transition(OLSX, Fwd_GETS, OLS) {
+ dd_sendDataToFwdGETS;
+ m_popRequestQueue;
+ }
+
+
+ transition(M, Fwd_GETX, I) {
+ dd_sendDataToFwdGETX;
+ rr_deallocateL2CacheBlock;
+ m_popRequestQueue;
+ }
+
+ // MAKE THIS THE SAME POLICY FOR NOW
+
+ // transition(M, Fwd_GETS, O) {
+ // dd_sendDataToFwdGETS;
+ // m_popRequestQueue;
+ // }
+
+ transition(M, Fwd_GETS, I) {
+ dd_sendExclusiveDataToFwdGETS;
+ rr_deallocateL2CacheBlock;
+ m_popRequestQueue;
+ }
+
+
+ transition({OLS, OLSX}, Fwd_GETX, OLSF) {
+ i_allocateTBE;
+ t_recordFwdXID;
+ ee_sendLocalInv;
+ m_popRequestQueue;
+ }
+
+ transition(OLSF, IntAck) {
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(OLSF, All_Acks, I) {
+ c_sendDataFromTBEToFwdGETX;
+ gg_clearLocalSharers;
+ s_deallocateTBE;
+ rr_deallocateL2CacheBlock;
+ n_popTriggerQueue;
+ }
+
+
+
+ // INVALIDATIONS FROM GLOBAL DIRECTORY
+
+ transition({IGM, IGS}, Inv) {
+ t_recordFwdXID;
+ e_sendAck;
+ m_popRequestQueue;
+ }
+
+ transition({I,NP}, Inv) {
+ i_allocateTBE;
+ t_recordFwdXID;
+ e_sendAck;
+ s_deallocateTBE;
+ m_popRequestQueue;
+ }
+
+ // NEED INV for S state
+
+ transition({ILS, ILO, ILX}, Inv, II) {
+ i_allocateTBE;
+ t_recordFwdXID;
+ ee_sendLocalInv;
+ gg_clearLocalSharers;
+ m_popRequestQueue;
+ }
+
+ transition(SLS, Inv, II) {
+ i_allocateTBE;
+ t_recordFwdXID;
+ ee_sendLocalInv;
+ rr_deallocateL2CacheBlock;
+ m_popRequestQueue;
+ }
+
+ transition(II, IntAck) {
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(II, All_Acks, I) {
+ e_sendAck;
+ s_deallocateTBE;
+ n_popTriggerQueue;
+ }
+
+ transition(S, Inv, I) {
+ i_allocateTBE;
+ t_recordFwdXID;
+ e_sendAck;
+ s_deallocateTBE;
+ rr_deallocateL2CacheBlock;
+ m_popRequestQueue;
+ }
+
+
+ // LOCAL REQUESTS SATISFIED LOCALLY
+
+ transition(OLSX, L1_GETX, IFLOX) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ // count number of INVs needed that doesn't include requestor
+ h_countLocalSharersExceptRequestor;
+ // issue INVs to everyone except requestor
+ ee_issueLocalInvExceptL1Requestor;
+ d_sendDataToL1GETX
+ y_copyCacheStateToDir;
+ r_setMRU;
+ rr_deallocateL2CacheBlock;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(IFLOX, Exclusive_Unblock, ILX) {
+ g_recordLocalExclusive;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(OLSX, L1_GETS, OLSXS) {
+ d_sendDataToL1GETS;
+ r_setMRU;
+ o_popL1RequestQueue;
+ }
+
+ transition(OLSXS, Unblock, OLSX) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ }
+
+ // after this, can't get Fwd_GETX
+ transition(IGMO, Own_GETX) {
+ mm_decrementNumberOfMessagesExt;
+ o_checkForExtCompletion;
+ m_popRequestQueue;
+
+ }
+
+
+ transition(ILX, L1_GETS, IFLOXX) {
+ kk_forwardLocalGETSToLocalOwner;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILOSX, L1_GETS, IFLOSX) {
+ kk_forwardLocalGETSToLocalOwner;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition({ILOS, ILO}, L1_GETS, IFLO) {
+ kk_forwardLocalGETSToLocalOwner;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILS, L1_GETS, IFLS) {
+ k_forwardLocalGETSToLocalSharer;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition({ILX, ILOX}, L1_GETX, IFLOXX) {
+ kk_forwardLocalGETXToLocalExclusive;
+ e_sendAckToL1Requestor;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILOX, L1_GETS, IFLOX) {
+ kk_forwardLocalGETSToLocalOwner;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(IFLOX, Unblock, ILOSX) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ }
+
+ transition(IFLS, Unblock, ILS) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ }
+
+ transition(IFLOXX, Unblock, ILOSX) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ }
+
+ transition(IFLOSX, Unblock, ILOSX) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ }
+
+ transition({IFLOSX, IFLOXX}, Exclusive_Unblock, ILX) {
+ g_recordLocalExclusive;
+ n_popResponseQueue;
+ }
+
+ transition(IFLO, Unblock, ILOS) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ }
+
+
+ transition(ILOSX, L1_GETX, IFLXO) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ h_countLocalSharersExceptRequestor;
+ ee_issueLocalInvExceptL1Requestor;
+ k_forwardLocalGETXToLocalOwner;
+ e_sendAckToL1RequestorFromTBE;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(IFLXO, Exclusive_Unblock, ILX) {
+ g_recordLocalExclusive;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+
+
+ // LOCAL REQUESTS THAT MUST ISSUE
+
+ transition(NP, {L1_PUTS, L1_PUTX, L1_PUTO}) {
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+ transition({NP, I}, L1_GETS, IGS) {
+ i_allocateTBE;
+ s_recordGetSL1ID;
+ a_issueGETS;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition({NP, I}, L1_GETX, IGM) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ a_issueGETX;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(S, L1_GETX, IGM) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ a_issueGETX;
+ y_copyCacheStateToDir;
+ r_setMRU;
+ rr_deallocateL2CacheBlock;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILS, L1_GETX, IGMLS) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ a_issueGETX;
+ // count number of INVs (just sharers?) needed that doesn't include requestor
+ h_countLocalSharersExceptRequestor;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(IGMLS, Inv) {
+ t_recordFwdXID;
+ ee_sendLocalInv;
+ m_popRequestQueue;
+ }
+
+ transition(IGMLS, IntAck) {
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IGMLS, All_Acks, IGM) {
+ gg_clearLocalSharers;
+ h_clearIntAcks;
+ e_sendAck;
+ n_popTriggerQueue;
+ }
+
+ // transition(IGMLS, ExtAck, IGMO) {
+ transition(IGMLS, ExtAck) {
+ m_decrementNumberOfMessagesExt;
+ o_checkForExtCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IGMLS, {Data, Data_Exclusive}, IGMO) {
+ ee_issueLocalInvExceptL1RequestorInTBE;
+ i_copyDataToTBE;
+ m_decrementNumberOfMessagesExt;
+ o_checkForExtCompletion;
+ n_popResponseQueue;
+ }
+
+
+ transition(ILOS, L1_GETX, IGMIO) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ a_issueGETX;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ // new exclusive happened while sharer attempted writeback
+ transition(ILX, {L1_PUTS, L1_PUTS_only, L1_PUTO}) {
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+ transition(S, L1_PUTS) {
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+ transition(OLS, L1_GETX, OGMIO) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ a_issueGETX;
+ h_countLocalSharersExceptRequestor;
+ // COPY DATA FROM CACHE TO TBE (happens during i_allocateTBE)
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(OGMIO, Fwd_GETS) {
+ t_recordFwdSID;
+ c_sendDataFromTBEToFwdGETS;
+ m_popRequestQueue;
+ }
+
+ transition(ILO, L1_GETX, IGMIO) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ a_issueGETX;
+ // the following, of course, returns 0 sharers but do anyways for consistency
+ h_countLocalSharersExceptRequestor;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition({ILO, ILOX}, L1_PUTS) {
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+ transition(IGMIO, Fwd_GETX, IGMIOF) {
+ t_recordFwdXID;
+ j_forwardGlobalRequestToLocalOwner;
+ ee_sendLocalInvSharersOnly;
+ ee_addLocalIntAck;
+ m_popRequestQueue;
+ }
+
+ transition(IGMIO, Fwd_GETS, IGMIOFS) {
+ t_recordFwdSID;
+ j_forwardGlobalRequestToLocalOwner;
+ m_popRequestQueue;
+ }
+
+ transition(IGMIOFS, Data, IGMIO) {
+ i_copyDataToTBE;
+ c_sendDataFromTBEToFwdGETS;
+ n_popResponseQueue;
+ }
+
+ transition(OGMIO, Fwd_GETX, OGMIOF) {
+ t_recordFwdXID;
+ ee_sendLocalInvSharersOnly;
+ m_popRequestQueue;
+ }
+
+ transition(OGMIOF, IntAck) {
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(OGMIOF, All_Acks, IGM) {
+ gg_clearLocalSharers;
+ hh_countLocalSharersExceptL1GETXRequestorInTBE;
+ c_sendDataFromTBEToFwdGETX;
+ n_popTriggerQueue;
+ }
+
+ transition(IGMIOF, IntAck) {
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IGMIOF, Data_Exclusive) {
+ i_copyDataToTBE;
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IGMIOF, All_Acks, IGM) {
+ gg_clearLocalSharers;
+ c_sendDataFromTBEToFwdGETX;
+ n_popTriggerQueue;
+ }
+
+ transition(IGMIO, All_Acks, IGMO) {
+ hh_countLocalSharersExceptL1GETXRequestorInTBE;
+ ee_issueLocalInvExceptL1RequestorInTBE;
+ k_forwardLocalGETXToLocalOwner;
+ e_sendAckToL1RequestorFromTBE;
+ n_popTriggerQueue;
+ }
+
+ transition(OGMIO, All_Acks, IGMO) {
+ ee_issueLocalInvExceptL1RequestorInTBE;
+ c_sendDataFromTBEToL1GETX;
+ n_popTriggerQueue;
+ }
+
+ transition({IGMIO, OGMIO}, Own_GETX) {
+ mm_decrementNumberOfMessagesExt;
+ o_checkForExtCompletion;
+ m_popRequestQueue;
+
+ }
+
+ transition(IGM, {Data, Data_Exclusive}, IGMO) {
+ i_copyDataToTBE;
+ m_decrementNumberOfMessagesExt;
+ o_checkForExtCompletion;
+ n_popResponseQueue;
+ }
+
+ transition({IGM, IGMIO, OGMIO}, ExtAck) {
+ m_decrementNumberOfMessagesExt;
+ o_checkForExtCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IGMO, ExtAck) {
+ m_decrementNumberOfMessagesExt;
+ o_checkForExtCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IGS, Data) {
+ i_copyDataToTBE;
+ m_decrementNumberOfMessagesExt;
+ c_sendDataFromTBEToL1GETS;
+ n_popResponseQueue;
+ }
+
+ transition(IGS, Data_Exclusive) {
+ i_copyDataToTBE;
+ m_decrementNumberOfMessagesExt;
+ c_sendExclusiveDataFromTBEToL1GETS;
+ n_popResponseQueue;
+ }
+
+ transition(IGS, Unblock, ILS) {
+ g_recordLocalSharer;
+ f_sendUnblock;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(IGS, Exclusive_Unblock, ILX) {
+ g_recordLocalExclusive;
+ f_sendExclusiveUnblock;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(IGMO, All_Acks) {
+ c_sendDataFromTBEToL1GETX;
+ n_popTriggerQueue;
+ }
+
+ transition(IGMO, Exclusive_Unblock, ILX) {
+ g_recordLocalExclusive;
+ f_sendExclusiveUnblock;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+
+ transition(SLS, L1_GETX, IGMLS) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ a_issueGETX;
+ // count number of INVs needed that doesn't include requestor
+ h_countLocalSharersExceptRequestor;
+ // issue INVs to everyone except requestor
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+
+ }
+
+ transition(SLS, L1_GETS, SLSS ) {
+ d_sendDataToL1GETS;
+ r_setMRU;
+ o_popL1RequestQueue;
+ }
+
+ transition(SLSS, Unblock, SLS) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ }
+
+
+ transition(O, L1_GETX, IGMO) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ a_issueGETX;
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(OLS, L1_GETS, OLSS) {
+ d_sendDataToL1GETS;
+ r_setMRU;
+ o_popL1RequestQueue;
+ }
+
+ transition(OLSS, Unblock, OLS) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ }
+
+ transition(IGMO, Fwd_GETX, IGM) {
+ t_recordFwdXID;
+ c_sendDataFromTBEToFwdGETX;
+ m_popRequestQueue;
+
+ }
+
+ transition(IGMO, Fwd_GETS) {
+ t_recordFwdSID;
+ c_sendDataFromTBEToFwdGETS;
+ m_popRequestQueue;
+ }
+
+
+ // LOCAL REQUESTS SATISFIED DIRECTLY BY L2
+
+ transition(M, L1_GETX, MM) {
+ i_allocateTBE;
+ // should count 0 of course
+ h_countLocalSharersExceptRequestor;
+ d_sendDataToL1GETX
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ s_deallocateTBE;
+ o_popL1RequestQueue;
+ }
+
+ transition(MM, Exclusive_Unblock, ILX) {
+ g_recordLocalExclusive;
+ n_popResponseQueue;
+ }
+
+ transition(M, L1_GETS, OO) {
+ i_allocateTBE;
+ // should count 0 of course
+ h_countLocalSharersExceptRequestor;
+ d_sendDataToL1GETX;
+ r_setMRU;
+ s_deallocateTBE;
+ o_popL1RequestQueue;
+ }
+
+ transition(S, L1_GETS, SS) {
+ d_sendDataToL1GETS;
+ r_setMRU;
+ o_popL1RequestQueue;
+ }
+
+ transition(SS, Unblock, SLS) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ }
+
+ transition(O, L1_GETS, OO) {
+ d_sendDataToL1GETS;
+ r_setMRU;
+ o_popL1RequestQueue;
+ }
+
+ transition(OO, Unblock, OLS) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ }
+
+ transition(OO, Exclusive_Unblock, ILX) {
+ g_recordLocalExclusive
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ n_popResponseQueue;
+ }
+
+
+ // L1 WRITEBACKS
+ transition(ILO, L1_PUTO, ILOW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILOX, L1_PUTO, ILOXW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+
+ transition(ILOS, L1_PUTO, ILOSW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILOSX, L1_PUTO, ILOSXW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+
+ // hmmm...keep data or drop. Just drop for now
+ transition(ILOS, L1_PUTS_only, ILOW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILSW, Unblock, ILS) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ }
+
+ transition(ILOW, Unblock, ILO) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ }
+
+ transition(ILOSX, L1_PUTS_only, ILOXW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILOXW, Unblock, ILOX) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ }
+
+ // hmmm...keep data or drop. Just drop for now
+ transition(ILOS, L1_PUTS, ILOSW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILOSX, L1_PUTS, ILOSXW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILOSW, Unblock, ILOS) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ }
+
+ transition(ILOSXW, Unblock, ILOSX) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ }
+
+ transition(SLS, L1_PUTS, SLSW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(SLS, L1_PUTS_only, SW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(SW, {Unblock}, S) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ }
+
+ transition(OLS, L1_PUTS, OLSW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILS, L1_PUTS, ILSW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILS, L1_PUTS_only, IW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+ transition(OLS, L1_PUTS_only, OW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(OLSX, L1_PUTS_only, OXW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(OLSX, L1_PUTS, OLSXW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(OLSXW, {Unblock}, OLSX) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ }
+
+ transition(OW, {Unblock}, O) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ }
+
+ transition(OXW, {Unblock}, M) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ }
+
+ transition(ILX, L1_PUTX, ILXW ) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILXW, L1_WBDIRTYDATA, M) {
+ gg_clearLocalSharers;
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ u_writeDataToCache;
+ n_popResponseQueue;
+ }
+
+ // clean writeback
+ transition(ILXW, L1_WBCLEANDATA, M) {
+ gg_clearLocalSharers;
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ u_writeDataToCache;
+ n_popResponseQueue;
+ }
+
+ transition(ILXW, Unblock, ILX) {
+ // writeback canceled because L1 invalidated
+ n_popResponseQueue;
+ }
+
+ transition(ILSW, L1_WBCLEANDATA, SLS) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ u_writeDataToCache;
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ }
+
+ transition(IW, L1_WBCLEANDATA, S) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ u_writeDataToCache;
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+
+ }
+
+ // Owner can have dirty data
+ transition(ILOW, {L1_WBCLEANDATA, L1_WBDIRTYDATA}, O) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ gg_clearOwnerFromL1Response;
+ u_writeDataToCache;
+ n_popResponseQueue;
+ }
+
+ transition(ILOXW, L1_WBDIRTYDATA, M) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ gg_clearOwnerFromL1Response;
+ u_writeDataToCache;
+ n_popResponseQueue;
+ }
+
+ transition(ILOXW, L1_WBCLEANDATA, M) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ gg_clearOwnerFromL1Response;
+ u_writeDataToCache;
+ n_popResponseQueue;
+ }
+
+ transition(ILOSW, {L1_WBCLEANDATA, L1_WBDIRTYDATA}, OLS) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ gg_clearOwnerFromL1Response;
+ u_writeDataToCache;
+ n_popResponseQueue;
+ }
+
+ transition(ILOSXW, {L1_WBCLEANDATA, L1_WBDIRTYDATA}, OLSX) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ gg_clearOwnerFromL1Response;
+ u_writeDataToCache;
+ n_popResponseQueue;
+ }
+
+
+ transition(SLSW, {Unblock}, SLS) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ }
+
+ transition(OLSW, {Unblock}, OLS) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ }
+
+
+ // L2 WRITEBACKS
+ transition({I, S}, L2_Replacement, I) {
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(ILS, L2_Replacement) {
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(ILX, L2_Replacement ) {
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition({ILO, ILOS}, L2_Replacement ) {
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(SLS, L2_Replacement, ILS) {
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition({OLS, OLSX}, L2_Replacement, OLSI) {
+ y_copyCacheStateToDir;
+ b_issuePUTO_ls;
+ i_allocateTBE;
+ rr_deallocateL2CacheBlock;
+ }
+
+
+ transition(O, L2_Replacement, OI) {
+ b_issuePUTO;
+ i_allocateTBE;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(M, L2_Replacement, MI) {
+ b_issuePUTX;
+ i_allocateTBE;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(OLSI, Fwd_GETX, ILSI) {
+ t_recordFwdXID;
+ ee_sendLocalInv;
+ m_popRequestQueue;
+ }
+
+ transition(ILSI, IntAck) {
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(ILSI, All_Acks, MII) {
+ gg_clearLocalSharers;
+ c_sendDataFromTBEToFwdGETX;
+ n_popTriggerQueue;
+ }
+
+ transition(OLSI, Fwd_GETS) {
+ t_recordFwdSID;
+ c_sendDataFromTBEToFwdGETS;
+ m_popRequestQueue;
+ }
+
+ transition({MI, OI}, Fwd_GETS, OI) {
+ t_recordFwdSID;
+ c_sendDataFromTBEToFwdGETS;
+ m_popRequestQueue;
+ }
+
+ transition({MI, OI}, Fwd_GETX, MII) {
+ t_recordFwdXID;
+ c_sendDataFromTBEToFwdGETX;
+ m_popRequestQueue;
+ }
+
+ transition({MI, OI}, Writeback_Ack, I) {
+ qq_sendDataFromTBEToMemory;
+ s_deallocateTBE;
+ m_popRequestQueue;
+ }
+
+ transition(MII, Writeback_Nack, I) {
+ s_deallocateTBE;
+ m_popRequestQueue;
+ }
+
+ transition(OI, Writeback_Nack) {
+ b_issuePUTO;
+ m_popRequestQueue;
+ }
+
+ transition(OLSI, Writeback_Ack, ILS) {
+ qq_sendDataFromTBEToMemory;
+ s_deallocateTBE;
+ m_popRequestQueue;
+ }
+
+ transition(MII, Writeback_Ack, I) {
+ f_sendUnblock;
+ s_deallocateTBE;
+ m_popRequestQueue;
+ }
+
+ transition(ILSI, Writeback_Ack, ILS) {
+ f_sendUnblock;
+ s_deallocateTBE;
+ m_popRequestQueue;
+ }
+}
+
diff --git a/src/mem/protocol/MOESI_CMP_directory-dir.sm b/src/mem/protocol/MOESI_CMP_directory-dir.sm
new file mode 100644
index 000000000..a016836c2
--- /dev/null
+++ b/src/mem/protocol/MOESI_CMP_directory-dir.sm
@@ -0,0 +1,573 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+machine(Directory, "Directory protocol") {
+
+ // ** IN QUEUES **
+ MessageBuffer foo1, network="From", virtual_network="0", ordered="false"; // a mod-L2 bank -> this Dir
+ MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
+ MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false"; // a mod-L2 bank -> this Dir
+
+ MessageBuffer goo1, network="To", virtual_network="0", ordered="false";
+ MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
+ MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false"; // Dir -> mod-L2 bank
+
+
+ // STATES
+ enumeration(State, desc="Directory states", default="Directory_State_I") {
+ // Base states
+ I, desc="Invalid";
+ S, desc="Shared";
+ O, desc="Owner";
+ M, desc="Modified";
+
+ IS, desc="Blocked, was in idle";
+ SS, desc="Blocked, was in shared";
+ OO, desc="Blocked, was in owned";
+ MO, desc="Blocked, going to owner or maybe modified";
+ MM, desc="Blocked, going to modified";
+
+ MI, desc="Blocked on a writeback";
+ MIS, desc="Blocked on a writeback, but don't remove from sharers when received";
+ OS, desc="Blocked on a writeback";
+ OSS, desc="Blocked on a writeback, but don't remove from sharers when received";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ GETX, desc="A GETX arrives";
+ GETS, desc="A GETS arrives";
+ PUTX, desc="A PUTX arrives";
+ PUTO, desc="A PUTO arrives";
+ PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
+ Unblock, desc="An unblock message arrives";
+ Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
+ Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
+ Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
+ Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ State DirectoryState, desc="Directory state";
+ DataBlock DataBlk, desc="data for the block";
+ NetDest Sharers, desc="Sharers for this block";
+ NetDest Owner, desc="Owner of this block";
+ int WaitingUnblocks, desc="Number of acks we're waiting for";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+
+ State getState(Address addr) {
+ return directory[addr].DirectoryState;
+ }
+
+ void setState(Address addr, State state) {
+ if (directory.isPresent(addr)) {
+
+ if (state == State:I) {
+ assert(directory[addr].Owner.count() == 0);
+ assert(directory[addr].Sharers.count() == 0);
+ }
+
+ if (state == State:S) {
+ assert(directory[addr].Owner.count() == 0);
+ }
+
+ if (state == State:O) {
+ assert(directory[addr].Owner.count() == 1);
+ assert(directory[addr].Sharers.isSuperset(directory[addr].Owner) == false);
+ }
+
+ if (state == State:M) {
+ assert(directory[addr].Owner.count() == 1);
+ assert(directory[addr].Sharers.count() == 0);
+ }
+
+ if ((state != State:SS) && (state != State:OO)) {
+ assert(directory[addr].WaitingUnblocks == 0);
+ }
+
+ if ( (directory[addr].DirectoryState != State:I) && (state == State:I) ) {
+ directory[addr].DirectoryState := state;
+ // disable coherence checker
+ // sequencer.checkCoherence(addr);
+ }
+ else {
+ directory[addr].DirectoryState := state;
+ }
+ }
+ }
+
+ // if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
+ bool isBlockShared(Address addr) {
+ if (directory.isPresent(addr)) {
+ if (directory[addr].DirectoryState == State:I) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool isBlockExclusive(Address addr) {
+ if (directory.isPresent(addr)) {
+ if (directory[addr].DirectoryState == State:I) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+
+ // ** OUT_PORTS **
+ out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+// out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
+ out_port(goo1_out, ResponseMsg, goo1);
+
+ // ** IN_PORTS **
+
+ in_port(foo1_in, ResponseMsg, foo1) {
+
+ }
+
+ // in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
+ // if (unblockNetwork_in.isReady()) {
+ in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
+ if (unblockNetwork_in.isReady()) {
+ peek(unblockNetwork_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
+ if (directory[in_msg.Address].WaitingUnblocks == 1) {
+ trigger(Event:Last_Unblock, in_msg.Address);
+ } else {
+ trigger(Event:Unblock, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
+ trigger(Event:Exclusive_Unblock, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
+ trigger(Event:Dirty_Writeback, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
+ trigger(Event:Clean_Writeback, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(requestQueue_in, RequestMsg, requestToDir) {
+ if (requestQueue_in.isReady()) {
+ peek(requestQueue_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ trigger(Event:PUTX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTO) {
+ trigger(Event:PUTO, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
+ trigger(Event:PUTO_SHARERS, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Actions
+
+ action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:WB_ACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:WB_NACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(c_clearOwner, "c", desc="Clear the owner field") {
+ directory[address].Owner.clear();
+ }
+
+ action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
+ directory[address].Sharers.addNetDest(directory[address].Owner);
+ directory[address].Owner.clear();
+ }
+
+ action(cc_clearSharers, "\c", desc="Clear the sharers field") {
+ directory[address].Sharers.clear();
+ }
+
+ action(d_sendData, "d", desc="Send data to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ // enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+
+ if (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0) {
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ } else {
+ out_msg.Type := CoherenceResponseType:DATA;
+ }
+
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Dirty := false; // By definition, the block is now clean
+ out_msg.Acks := directory[address].Sharers.count();
+ if (directory[address].Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ directory[address].Owner.clear();
+ directory[address].Owner.add(in_msg.Sender);
+ }
+ }
+
+ action(f_forwardRequest, "f", desc="Forward request to owner") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.addNetDest(directory[in_msg.Address].Owner);
+ out_msg.Acks := directory[address].Sharers.count();
+ if (directory[address].Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ }
+ }
+ }
+
+ action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
+ peek(requestQueue_in, RequestMsg) {
+ if ((directory[in_msg.Address].Sharers.count() > 1) ||
+ ((directory[in_msg.Address].Sharers.count() > 0) && (directory[in_msg.Address].Sharers.isElement(in_msg.Requestor) == false))) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := in_msg.Requestor;
+ // out_msg.Destination := directory[in_msg.Address].Sharers;
+ out_msg.Destination.addNetDest(directory[in_msg.Address].Sharers);
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Invalidate_Control;
+ }
+ }
+ }
+ }
+
+ action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
+ requestQueue_in.dequeue();
+ }
+
+ action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
+ unblockNetwork_in.dequeue();
+ }
+
+ action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ assert(in_msg.Dirty);
+ assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ action(ll_checkDataInMemory, "\l", desc="Check PUTX/PUTO data is same as in the memory") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ assert(in_msg.Dirty == false);
+ assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
+
+ // NOTE: The following check would not be valid in a real
+ // implementation. We include the data in the "dataless"
+ // message so we can assert the clean data matches the datablock
+ // in memory
+ assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
+ }
+ }
+
+ action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ directory[address].Sharers.add(in_msg.Sender);
+ }
+ }
+
+ action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
+ directory[address].WaitingUnblocks := directory[address].WaitingUnblocks + 1;
+ }
+
+ action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
+ directory[address].WaitingUnblocks := directory[address].WaitingUnblocks - 1;
+ assert(directory[address].WaitingUnblocks >= 0);
+ }
+
+ // action(z_stall, "z", desc="Cannot be handled right now.") {
+ // Special name recognized as do nothing case
+ // }
+
+ action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
+ requestQueue_in.recycle();
+ }
+
+ // TRANSITIONS
+
+ transition(I, GETX, MM) {
+ d_sendData;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(S, GETX, MM) {
+ d_sendData;
+ g_sendInvalidations;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(I, GETS, IS) {
+ d_sendData;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({S, SS}, GETS, SS) {
+ d_sendData;
+ n_incrementOutstanding;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({I, S}, PUTO) {
+ b_sendWriteBackNack;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({I, S, O}, PUTX) {
+ b_sendWriteBackNack;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, GETX, MM) {
+ f_forwardRequest;
+ g_sendInvalidations;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({O, OO}, GETS, OO) {
+ f_forwardRequest;
+ n_incrementOutstanding;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, GETX, MM) {
+ f_forwardRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, GETS, MO) {
+ f_forwardRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, PUTX, MI) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ // happens if M->O transition happens on-chip
+ transition(M, PUTO, MI) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, PUTO_SHARERS, MIS) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, PUTO, OS) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, PUTO_SHARERS, OSS) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+
+ transition({MM, MO, MI, MIS, OS, OSS}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX}) {
+ zz_recycleRequest;
+ }
+
+ transition({MM, MO}, Exclusive_Unblock, M) {
+ cc_clearSharers;
+ e_ownerIsUnblocker;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MO, Unblock, O) {
+ m_addUnlockerToSharers;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX}) {
+ zz_recycleRequest;
+ }
+
+ transition(IS, GETS) {
+ zz_recycleRequest;
+ }
+
+ transition(IS, Unblock, S) {
+ m_addUnlockerToSharers;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(IS, Exclusive_Unblock, M) {
+ cc_clearSharers;
+ e_ownerIsUnblocker;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(SS, Unblock) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(SS, Last_Unblock, S) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OO, Unblock) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OO, Last_Unblock, O) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MI, Dirty_Writeback, I) {
+ c_clearOwner;
+ cc_clearSharers;
+ l_writeDataToMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MIS, Dirty_Writeback, S) {
+ c_moveOwnerToSharer;
+ l_writeDataToMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MIS, Clean_Writeback, S) {
+ c_moveOwnerToSharer;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OS, Dirty_Writeback, S) {
+ c_clearOwner;
+ l_writeDataToMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OSS, Dirty_Writeback, S) {
+ c_moveOwnerToSharer;
+ l_writeDataToMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OSS, Clean_Writeback, S) {
+ c_moveOwnerToSharer;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MI, Clean_Writeback, I) {
+ c_clearOwner;
+ cc_clearSharers;
+ ll_checkDataInMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OS, Clean_Writeback, S) {
+ c_clearOwner;
+ ll_checkDataInMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({MI, MIS}, Unblock, M) {
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({OS, OSS}, Unblock, O) {
+ j_popIncomingUnblockQueue;
+ }
+}
diff --git a/src/mem/protocol/MOESI_CMP_directory-msg.sm b/src/mem/protocol/MOESI_CMP_directory-msg.sm
new file mode 100644
index 000000000..08b4abec3
--- /dev/null
+++ b/src/mem/protocol/MOESI_CMP_directory-msg.sm
@@ -0,0 +1,126 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ GETX, desc="Get eXclusive";
+ GETS, desc="Get Shared";
+ PUTX, desc="Put eXclusive";
+ PUTO, desc="Put Owned";
+ PUTO_SHARERS, desc="Put Owned, but sharers exist so don't remove from sharers list";
+ PUTS, desc="Put Shared";
+ WB_ACK, desc="Writeback ack";
+ WB_ACK_DATA, desc="Writeback ack";
+ WB_NACK, desc="Writeback neg. ack";
+ INV, desc="Invalidation";
+}
+
+// CoherenceResponseType
+enumeration(CoherenceResponseType, desc="...") {
+ ACK, desc="ACKnowledgment, responder doesn't have a copy";
+ DATA, desc="Data";
+ DATA_EXCLUSIVE, desc="Data, no processor has a copy";
+ UNBLOCK, desc="Unblock";
+ UNBLOCK_EXCLUSIVE, desc="Unblock, we're in E/M";
+ WRITEBACK_CLEAN_DATA, desc="Clean writeback (contains data)";
+ WRITEBACK_CLEAN_ACK, desc="Clean writeback (contains no data)";
+ WRITEBACK_DIRTY_DATA, desc="Dirty writeback (contains data)";
+}
+
+// TriggerType
+enumeration(TriggerType, desc="...") {
+ ALL_ACKS, desc="See corresponding event";
+}
+
+// TriggerMsg
+structure(TriggerMsg, desc="...", interface="Message") {
+ Address Address, desc="Physical address for this request";
+ TriggerType Type, desc="Type of trigger";
+}
+
+// RequestMsg (and also forwarded requests)
+structure(RequestMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ MachineID Requestor, desc="Node who initiated the request";
+ MachineType RequestorMachine, desc="type of component";
+ NetDest Destination, desc="Multicast destination mask";
+ int Acks, desc="How many acks to expect";
+ MessageSizeType MessageSize, desc="size category of the message";
+ AccessModeType AccessMode, desc="user/supervisor access type";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+}
+
+// ResponseMsg (and also unblock requests)
+structure(ResponseMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
+ MachineID Sender, desc="Node who sent the data";
+ MachineType SenderMachine, desc="type of component sending msg";
+ NetDest Destination, desc="Node to whom the data is sent";
+ DataBlock DataBlk, desc="data for the cache line";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int Acks, desc="How many acks to expect";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+GenericRequestType convertToGenericType(CoherenceRequestType type) {
+ if(type == CoherenceRequestType:PUTX) {
+ return GenericRequestType:PUTX;
+ } else if(type == CoherenceRequestType:GETS) {
+ return GenericRequestType:GETS;
+ } else if(type == CoherenceRequestType:GETX) {
+ return GenericRequestType:GETX;
+ } else if(type == CoherenceRequestType:PUTS) {
+ return GenericRequestType:PUTS;
+ } else if(type == CoherenceRequestType:PUTX) {
+ return GenericRequestType:PUTS;
+ } else if(type == CoherenceRequestType:PUTO) {
+ return GenericRequestType:PUTO;
+ } else if(type == CoherenceRequestType:PUTO_SHARERS) {
+ return GenericRequestType:PUTO;
+ } else if(type == CoherenceRequestType:INV) {
+ return GenericRequestType:INV;
+ } else if(type == CoherenceRequestType:WB_ACK) {
+ return GenericRequestType:WB_ACK;
+ } else if(type == CoherenceRequestType:WB_ACK_DATA) {
+ return GenericRequestType:WB_ACK;
+ } else if(type == CoherenceRequestType:WB_NACK) {
+ return GenericRequestType:NACK;
+ } else {
+ DEBUG_EXPR(type);
+ error("invalid CoherenceRequestType");
+ }
+}
+
diff --git a/src/mem/protocol/MOESI_CMP_directory-perfectDir.sm b/src/mem/protocol/MOESI_CMP_directory-perfectDir.sm
new file mode 100644
index 000000000..7717434f8
--- /dev/null
+++ b/src/mem/protocol/MOESI_CMP_directory-perfectDir.sm
@@ -0,0 +1,573 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MOESI_CMP_directory-dir.sm 1.11 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
+ */
+
+machine(Directory, "Directory protocol") {
+
+ // ** IN QUEUES **
+ MessageBuffer foo1, network="From", virtual_network="0", ordered="false"; // a mod-L2 bank -> this Dir
+ MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
+ MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false"; // a mod-L2 bank -> this Dir
+
+ MessageBuffer goo1, network="To", virtual_network="0", ordered="false";
+ MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
+ MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false"; // Dir -> mod-L2 bank
+
+
+ // STATES
+ enumeration(State, desc="Directory states", default="Directory_State_I") {
+ // Base states
+ I, desc="Invalid";
+ S, desc="Shared";
+ O, desc="Owner";
+ M, desc="Modified";
+
+ IS, desc="Blocked, was in idle";
+ SS, desc="Blocked, was in shared";
+ OO, desc="Blocked, was in owned";
+ MO, desc="Blocked, going to owner or maybe modified";
+ MM, desc="Blocked, going to modified";
+
+ MI, desc="Blocked on a writeback";
+ MIS, desc="Blocked on a writeback, but don't remove from sharers when received";
+ OS, desc="Blocked on a writeback";
+ OSS, desc="Blocked on a writeback, but don't remove from sharers when received";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ GETX, desc="A GETX arrives";
+ GETS, desc="A GETS arrives";
+ PUTX, desc="A PUTX arrives";
+ PUTO, desc="A PUTO arrives";
+ PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
+ Unblock, desc="An unblock message arrives";
+ Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
+ Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
+ Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
+ Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ State DirectoryState, desc="Directory state";
+ DataBlock DataBlk, desc="data for the block";
+ NetDest Sharers, desc="Sharers for this block";
+ NetDest Owner, desc="Owner of this block";
+ int WaitingUnblocks, desc="Number of acks we're waiting for";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+
+ State getState(Address addr) {
+ return directory[addr].DirectoryState;
+ }
+
+ void setState(Address addr, State state) {
+ if (directory.isPresent(addr)) {
+
+ if (state == State:I) {
+ assert(directory[addr].Owner.count() == 0);
+ assert(directory[addr].Sharers.count() == 0);
+ }
+
+ if (state == State:S) {
+ assert(directory[addr].Owner.count() == 0);
+ }
+
+ if (state == State:O) {
+ assert(directory[addr].Owner.count() == 1);
+ assert(directory[addr].Sharers.isSuperset(directory[addr].Owner) == false);
+ }
+
+ if (state == State:M) {
+ assert(directory[addr].Owner.count() == 1);
+ assert(directory[addr].Sharers.count() == 0);
+ }
+
+ if ((state != State:SS) && (state != State:OO)) {
+ assert(directory[addr].WaitingUnblocks == 0);
+ }
+
+ if ( (directory[addr].DirectoryState != State:I) && (state == State:I) ) {
+ directory[addr].DirectoryState := state;
+ // disable coherence checker
+ // sequencer.checkCoherence(addr);
+ }
+ else {
+ directory[addr].DirectoryState := state;
+ }
+ }
+ }
+
+ // if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
+ bool isBlockShared(Address addr) {
+ if (directory.isPresent(addr)) {
+ if (directory[addr].DirectoryState == State:I) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool isBlockExclusive(Address addr) {
+ if (directory.isPresent(addr)) {
+ if (directory[addr].DirectoryState == State:I) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+
+ // ** OUT_PORTS **
+ out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+// out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
+ out_port(goo1_out, ResponseMsg, goo1);
+
+ // ** IN_PORTS **
+
+ in_port(foo1_in, ResponseMsg, foo1) {
+
+ }
+
+ // in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
+ // if (unblockNetwork_in.isReady()) {
+ in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
+ if (unblockNetwork_in.isReady()) {
+ peek(unblockNetwork_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
+ if (directory[in_msg.Address].WaitingUnblocks == 1) {
+ trigger(Event:Last_Unblock, in_msg.Address);
+ } else {
+ trigger(Event:Unblock, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
+ trigger(Event:Exclusive_Unblock, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
+ trigger(Event:Dirty_Writeback, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
+ trigger(Event:Clean_Writeback, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(requestQueue_in, RequestMsg, requestToDir) {
+ if (requestQueue_in.isReady()) {
+ peek(requestQueue_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ trigger(Event:PUTX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTO) {
+ trigger(Event:PUTO, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
+ trigger(Event:PUTO_SHARERS, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Actions
+
+ action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_CACHE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:WB_ACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_CACHE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:WB_NACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(c_clearOwner, "c", desc="Clear the owner field") {
+ directory[address].Owner.clear();
+ }
+
+ action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
+ directory[address].Sharers.addNetDest(directory[address].Owner);
+ directory[address].Owner.clear();
+ }
+
+ action(cc_clearSharers, "\c", desc="Clear the sharers field") {
+ directory[address].Sharers.clear();
+ }
+
+ action(d_sendData, "d", desc="Send data to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ // enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+
+ if (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0) {
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ } else {
+ out_msg.Type := CoherenceResponseType:DATA;
+ }
+
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Dirty := false; // By definition, the block is now clean
+ out_msg.Acks := directory[address].Sharers.count();
+ if (directory[address].Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ directory[address].Owner.clear();
+ directory[address].Owner.add(in_msg.Sender);
+ }
+ }
+
+ action(f_forwardRequest, "f", desc="Forward request to owner") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_CACHE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.addNetDest(directory[in_msg.Address].Owner);
+ out_msg.Acks := directory[address].Sharers.count();
+ if (directory[address].Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ }
+ }
+ }
+
+ action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
+ peek(requestQueue_in, RequestMsg) {
+ if ((directory[in_msg.Address].Sharers.count() > 1) ||
+ ((directory[in_msg.Address].Sharers.count() > 0) && (directory[in_msg.Address].Sharers.isElement(in_msg.Requestor) == false))) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_CACHE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := in_msg.Requestor;
+ // out_msg.Destination := directory[in_msg.Address].Sharers;
+ out_msg.Destination.addNetDest(directory[in_msg.Address].Sharers);
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Invalidate_Control;
+ }
+ }
+ }
+ }
+
+ action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
+ requestQueue_in.dequeue();
+ }
+
+ action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
+ unblockNetwork_in.dequeue();
+ }
+
+ action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ assert(in_msg.Dirty);
+ assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ action(ll_checkDataInMemory, "\l", desc="Check PUTX/PUTO data is same as in the memory") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ assert(in_msg.Dirty == false);
+ assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
+
+ // NOTE: The following check would not be valid in a real
+ // implementation. We include the data in the "dataless"
+ // message so we can assert the clean data matches the datablock
+ // in memory
+ assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
+ }
+ }
+
+ action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ directory[address].Sharers.add(in_msg.Sender);
+ }
+ }
+
+ action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
+ directory[address].WaitingUnblocks := directory[address].WaitingUnblocks + 1;
+ }
+
+ action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
+ directory[address].WaitingUnblocks := directory[address].WaitingUnblocks - 1;
+ assert(directory[address].WaitingUnblocks >= 0);
+ }
+
+ // action(z_stall, "z", desc="Cannot be handled right now.") {
+ // Special name recognized as do nothing case
+ // }
+
+ action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
+ requestQueue_in.recycle();
+ }
+
+ // TRANSITIONS
+
+ transition(I, GETX, MM) {
+ d_sendData;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(S, GETX, MM) {
+ d_sendData;
+ g_sendInvalidations;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(I, GETS, IS) {
+ d_sendData;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({S, SS}, GETS, SS) {
+ d_sendData;
+ n_incrementOutstanding;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({I, S}, PUTO) {
+ b_sendWriteBackNack;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({I, S, O}, PUTX) {
+ b_sendWriteBackNack;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, GETX, MM) {
+ f_forwardRequest;
+ g_sendInvalidations;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({O, OO}, GETS, OO) {
+ f_forwardRequest;
+ n_incrementOutstanding;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, GETX, MM) {
+ f_forwardRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, GETS, MO) {
+ f_forwardRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, PUTX, MI) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ // happens if M->O transition happens on-chip
+ transition(M, PUTO, MI) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, PUTO_SHARERS, MIS) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, PUTO, OS) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, PUTO_SHARERS, OSS) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+
+ transition({MM, MO, MI, MIS, OS, OSS}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX}) {
+ zz_recycleRequest;
+ }
+
+ transition({MM, MO}, Exclusive_Unblock, M) {
+ cc_clearSharers;
+ e_ownerIsUnblocker;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MO, Unblock, O) {
+ m_addUnlockerToSharers;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX}) {
+ zz_recycleRequest;
+ }
+
+ transition(IS, GETS) {
+ zz_recycleRequest;
+ }
+
+ transition(IS, Unblock, S) {
+ m_addUnlockerToSharers;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(IS, Exclusive_Unblock, M) {
+ cc_clearSharers;
+ e_ownerIsUnblocker;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(SS, Unblock) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(SS, Last_Unblock, S) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OO, Unblock) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OO, Last_Unblock, O) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MI, Dirty_Writeback, I) {
+ c_clearOwner;
+ cc_clearSharers;
+ l_writeDataToMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MIS, Dirty_Writeback, S) {
+ c_moveOwnerToSharer;
+ l_writeDataToMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MIS, Clean_Writeback, S) {
+ c_moveOwnerToSharer;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OS, Dirty_Writeback, S) {
+ c_clearOwner;
+ l_writeDataToMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OSS, Dirty_Writeback, S) {
+ c_moveOwnerToSharer;
+ l_writeDataToMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OSS, Clean_Writeback, S) {
+ c_moveOwnerToSharer;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MI, Clean_Writeback, I) {
+ c_clearOwner;
+ cc_clearSharers;
+ ll_checkDataInMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OS, Clean_Writeback, S) {
+ c_clearOwner;
+ ll_checkDataInMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({MI, MIS}, Unblock, M) {
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({OS, OSS}, Unblock, O) {
+ j_popIncomingUnblockQueue;
+ }
+}
diff --git a/src/mem/protocol/MOESI_CMP_directory.slicc b/src/mem/protocol/MOESI_CMP_directory.slicc
new file mode 100644
index 000000000..c552d7157
--- /dev/null
+++ b/src/mem/protocol/MOESI_CMP_directory.slicc
@@ -0,0 +1,5 @@
+MOESI_CMP_directory-msg.sm
+MOESI_CMP_directory-L2cache.sm
+MOESI_CMP_directory-L1cache.sm
+MOESI_CMP_directory-dir.sm
+standard_CMP-protocol.sm
diff --git a/src/mem/protocol/MOESI_CMP_directory_m-dir.sm b/src/mem/protocol/MOESI_CMP_directory_m-dir.sm
new file mode 100644
index 000000000..3a4d875c1
--- /dev/null
+++ b/src/mem/protocol/MOESI_CMP_directory_m-dir.sm
@@ -0,0 +1,652 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+machine(Directory, "Directory protocol") {
+
+ // ** IN QUEUES **
+ MessageBuffer foo1, network="From", virtual_network="0", ordered="false"; // a mod-L2 bank -> this Dir
+ MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
+ MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false"; // a mod-L2 bank -> this Dir
+
+ MessageBuffer goo1, network="To", virtual_network="0", ordered="false";
+ MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
+ MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false"; // Dir -> mod-L2 bank
+
+
+ // STATES
+ enumeration(State, desc="Directory states", default="Directory_State_I") {
+ // Base states
+ I, desc="Invalid";
+ S, desc="Shared";
+ O, desc="Owner";
+ M, desc="Modified";
+
+ IS, desc="Blocked, was in idle";
+ SS, desc="Blocked, was in shared";
+ OO, desc="Blocked, was in owned";
+ MO, desc="Blocked, going to owner or maybe modified";
+ MM, desc="Blocked, going to modified";
+
+ MI, desc="Blocked on a writeback";
+ MIS, desc="Blocked on a writeback, but don't remove from sharers when received";
+ OS, desc="Blocked on a writeback";
+ OSS, desc="Blocked on a writeback, but don't remove from sharers when received";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ GETX, desc="A GETX arrives";
+ GETS, desc="A GETS arrives";
+ PUTX, desc="A PUTX arrives";
+ PUTO, desc="A PUTO arrives";
+ PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
+ Unblock, desc="An unblock message arrives";
+ Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
+ Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
+ Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
+ Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
+ Memory_Data, desc="Fetched data from memory arrives";
+ Memory_Ack, desc="Writeback Ack from memory arrives";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ State DirectoryState, desc="Directory state";
+ DataBlock DataBlk, desc="data for the block";
+ NetDest Sharers, desc="Sharers for this block";
+ NetDest Owner, desc="Owner of this block";
+ int WaitingUnblocks, desc="Number of acks we're waiting for";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+ // to simulate detailed DRAM
+ external_type(MemoryControl, inport="yes", outport="yes") {
+
+ }
+
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+ MemoryControl memBuffer, constructor_hack="i";
+
+ State getState(Address addr) {
+ return directory[addr].DirectoryState;
+ }
+
+ void setState(Address addr, State state) {
+ if (directory.isPresent(addr)) {
+
+ if (state == State:I) {
+ assert(directory[addr].Owner.count() == 0);
+ assert(directory[addr].Sharers.count() == 0);
+ }
+
+ if (state == State:S) {
+ assert(directory[addr].Owner.count() == 0);
+ }
+
+ if (state == State:O) {
+ assert(directory[addr].Owner.count() == 1);
+ assert(directory[addr].Sharers.isSuperset(directory[addr].Owner) == false);
+ }
+
+ if (state == State:M) {
+ assert(directory[addr].Owner.count() == 1);
+ assert(directory[addr].Sharers.count() == 0);
+ }
+
+ if ((state != State:SS) && (state != State:OO)) {
+ assert(directory[addr].WaitingUnblocks == 0);
+ }
+
+ if ( (directory[addr].DirectoryState != State:I) && (state == State:I) ) {
+ directory[addr].DirectoryState := state;
+ // disable coherence checker
+ // sequencer.checkCoherence(addr);
+ }
+ else {
+ directory[addr].DirectoryState := state;
+ }
+ }
+ }
+
+ // if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
+ bool isBlockShared(Address addr) {
+ if (directory.isPresent(addr)) {
+ if (directory[addr].DirectoryState == State:I) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool isBlockExclusive(Address addr) {
+ if (directory.isPresent(addr)) {
+ if (directory[addr].DirectoryState == State:I) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+
+ // ** OUT_PORTS **
+ out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+// out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
+ out_port(goo1_out, ResponseMsg, goo1);
+ out_port(memQueue_out, MemoryMsg, memBuffer);
+
+ // ** IN_PORTS **
+
+ in_port(foo1_in, ResponseMsg, foo1) {
+
+ }
+
+ // in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
+ // if (unblockNetwork_in.isReady()) {
+ in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
+ if (unblockNetwork_in.isReady()) {
+ peek(unblockNetwork_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
+ if (directory[in_msg.Address].WaitingUnblocks == 1) {
+ trigger(Event:Last_Unblock, in_msg.Address);
+ } else {
+ trigger(Event:Unblock, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
+ trigger(Event:Exclusive_Unblock, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
+ trigger(Event:Dirty_Writeback, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
+ trigger(Event:Clean_Writeback, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(requestQueue_in, RequestMsg, requestToDir) {
+ if (requestQueue_in.isReady()) {
+ peek(requestQueue_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ trigger(Event:PUTX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTO) {
+ trigger(Event:PUTO, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
+ trigger(Event:PUTO_SHARERS, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, memBuffer) {
+ if (memQueue_in.isReady()) {
+ peek(memQueue_in, MemoryMsg) {
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:Memory_Data, in_msg.Address);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:Memory_Ack, in_msg.Address);
+ } else {
+ DEBUG_EXPR(in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Actions
+
+ action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:WB_ACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:WB_NACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(c_clearOwner, "c", desc="Clear the owner field") {
+ directory[address].Owner.clear();
+ }
+
+ action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
+ directory[address].Sharers.addNetDest(directory[address].Owner);
+ directory[address].Owner.clear();
+ }
+
+ action(cc_clearSharers, "\c", desc="Clear the sharers field") {
+ directory[address].Sharers.clear();
+ }
+
+ action(d_sendDataMsg, "d", desc="Send data to requestor") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ //out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := false; // By definition, the block is now clean
+ out_msg.Acks := in_msg.Acks;
+ if (in_msg.ReadX) {
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ } else {
+ out_msg.Type := CoherenceResponseType:DATA;
+ }
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ directory[address].Owner.clear();
+ directory[address].Owner.add(in_msg.Sender);
+ }
+ }
+
+ action(f_forwardRequest, "f", desc="Forward request to owner") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.addNetDest(directory[in_msg.Address].Owner);
+ out_msg.Acks := directory[address].Sharers.count();
+ if (directory[address].Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ }
+ }
+ }
+
+ action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
+ peek(requestQueue_in, RequestMsg) {
+ if ((directory[in_msg.Address].Sharers.count() > 1) ||
+ ((directory[in_msg.Address].Sharers.count() > 0) && (directory[in_msg.Address].Sharers.isElement(in_msg.Requestor) == false))) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := in_msg.Requestor;
+ // out_msg.Destination := directory[in_msg.Address].Sharers;
+ out_msg.Destination.addNetDest(directory[in_msg.Address].Sharers);
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Invalidate_Control;
+ }
+ }
+ }
+ }
+
+ action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
+ requestQueue_in.dequeue();
+ }
+
+ action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
+ unblockNetwork_in.dequeue();
+ }
+
+ action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ assert(in_msg.Dirty);
+ assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ action(ll_checkDataInMemory, "\l", desc="Check PUTX/PUTO data is same as in the memory") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ assert(in_msg.Dirty == false);
+ assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
+
+ // NOTE: The following check would not be valid in a real
+ // implementation. We include the data in the "dataless"
+ // message so we can assert the clean data matches the datablock
+ // in memory
+ assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
+ }
+ }
+
+ action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ directory[address].Sharers.add(in_msg.Sender);
+ }
+ }
+
+ action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
+ directory[address].WaitingUnblocks := directory[address].WaitingUnblocks + 1;
+ }
+
+ action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
+ directory[address].WaitingUnblocks := directory[address].WaitingUnblocks - 1;
+ assert(directory[address].WaitingUnblocks >= 0);
+ }
+
+ action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
+ memQueue_in.dequeue();
+ }
+
+ action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_READ;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := in_msg.Requestor;
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := false;
+ // These are not used by memory but are passed back here with the read data:
+ out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0);
+ out_msg.Acks := directory[address].Sharers.count();
+ if (directory[address].Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_WB;
+ out_msg.Sender := machineID;
+ //out_msg.OriginalRequestorMachId := in_msg.Requestor;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := false;
+ // Not used:
+ out_msg.ReadX := false;
+ out_msg.Acks := 0;
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+
+ // action(z_stall, "z", desc="Cannot be handled right now.") {
+ // Special name recognized as do nothing case
+ // }
+
+ action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
+ requestQueue_in.recycle();
+ }
+
+ // TRANSITIONS
+
+ transition(I, GETX, MM) {
+ qf_queueMemoryFetchRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(S, GETX, MM) {
+ qf_queueMemoryFetchRequest;
+ g_sendInvalidations;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(I, GETS, IS) {
+ qf_queueMemoryFetchRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({S, SS}, GETS, SS) {
+ qf_queueMemoryFetchRequest;
+ n_incrementOutstanding;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({I, S}, PUTO) {
+ b_sendWriteBackNack;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({I, S, O}, PUTX) {
+ b_sendWriteBackNack;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, GETX, MM) {
+ f_forwardRequest;
+ g_sendInvalidations;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({O, OO}, GETS, OO) {
+ f_forwardRequest;
+ n_incrementOutstanding;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, GETX, MM) {
+ f_forwardRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, GETS, MO) {
+ f_forwardRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, PUTX, MI) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ // happens if M->O transition happens on-chip
+ transition(M, PUTO, MI) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, PUTO_SHARERS, MIS) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, PUTO, OS) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, PUTO_SHARERS, OSS) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+
+ transition({MM, MO, MI, MIS, OS, OSS}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX}) {
+ zz_recycleRequest;
+ }
+
+ transition({MM, MO}, Exclusive_Unblock, M) {
+ cc_clearSharers;
+ e_ownerIsUnblocker;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MO, Unblock, O) {
+ m_addUnlockerToSharers;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX}) {
+ zz_recycleRequest;
+ }
+
+ transition(IS, GETS) {
+ zz_recycleRequest;
+ }
+
+ transition(IS, Unblock, S) {
+ m_addUnlockerToSharers;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(IS, Exclusive_Unblock, M) {
+ cc_clearSharers;
+ e_ownerIsUnblocker;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(SS, Unblock) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(SS, Last_Unblock, S) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OO, Unblock) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OO, Last_Unblock, O) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MI, Dirty_Writeback, I) {
+ c_clearOwner;
+ cc_clearSharers;
+ l_writeDataToMemory;
+ qw_queueMemoryWBRequest;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MIS, Dirty_Writeback, S) {
+ c_moveOwnerToSharer;
+ l_writeDataToMemory;
+ qw_queueMemoryWBRequest;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MIS, Clean_Writeback, S) {
+ c_moveOwnerToSharer;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OS, Dirty_Writeback, S) {
+ c_clearOwner;
+ l_writeDataToMemory;
+ qw_queueMemoryWBRequest;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OSS, Dirty_Writeback, S) {
+ c_moveOwnerToSharer;
+ l_writeDataToMemory;
+ qw_queueMemoryWBRequest;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OSS, Clean_Writeback, S) {
+ c_moveOwnerToSharer;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MI, Clean_Writeback, I) {
+ c_clearOwner;
+ cc_clearSharers;
+ ll_checkDataInMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OS, Clean_Writeback, S) {
+ c_clearOwner;
+ ll_checkDataInMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({MI, MIS}, Unblock, M) {
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({OS, OSS}, Unblock, O) {
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
+ d_sendDataMsg;
+ q_popMemQueue;
+ }
+
+ transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Ack) {
+ //a_sendAck;
+ q_popMemQueue;
+ }
+
+}
diff --git a/src/mem/protocol/MOESI_CMP_directory_m.slicc b/src/mem/protocol/MOESI_CMP_directory_m.slicc
new file mode 100644
index 000000000..3abe8603a
--- /dev/null
+++ b/src/mem/protocol/MOESI_CMP_directory_m.slicc
@@ -0,0 +1,5 @@
+MOESI_CMP_directory-msg.sm
+MOESI_CMP_directory-L2cache.sm
+MOESI_CMP_directory-L1cache.sm
+MOESI_CMP_directory_m-dir.sm
+standard_CMP-protocol.sm
diff --git a/src/mem/protocol/MOESI_CMP_token-L1cache.sm b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
new file mode 100644
index 000000000..ab58c5c00
--- /dev/null
+++ b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
@@ -0,0 +1,2041 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MOESI_CMP_token-L1cache.sm 1.22 05/01/19 15:55:39-06:00 beckmann@s0-28.cs.wisc.edu $
+ *
+ */
+
+machine(L1Cache, "Token protocol") {
+
+ // From this node's L1 cache TO the network
+ // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+ MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false";
+ // a local L1 -> this L2 bank
+ MessageBuffer responseFromL1Cache, network="To", virtual_network="2", ordered="false";
+ MessageBuffer persistentFromL1Cache, network="To", virtual_network="3", ordered="true";
+
+ // To this node's L1 cache FROM the network
+ // a L2 bank -> this L1
+ MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false";
+ // a L2 bank -> this L1
+ MessageBuffer responseToL1Cache, network="From", virtual_network="2", ordered="false";
+ MessageBuffer persistentToL1Cache, network="From", virtual_network="3", ordered="true";
+
+ // STATES
+ enumeration(State, desc="Cache states", default="L1Cache_State_I") {
+ // Base states
+ NP, "NP", desc="Not Present";
+ I, "I", desc="Idle";
+ S, "S", desc="Shared";
+ O, "O", desc="Owned";
+ M, "M", desc="Modified (dirty)";
+ MM, "MM", desc="Modified (dirty and locally modified)";
+ M_W, "M^W", desc="Modified (dirty), waiting";
+ MM_W, "MM^W", desc="Modified (dirty and locally modified), waiting";
+
+ // Transient States
+ IM, "IM", desc="Issued GetX";
+ SM, "SM", desc="Issued GetX, we still have an old copy of the line";
+ OM, "OM", desc="Issued GetX, received data";
+ IS, "IS", desc="Issued GetS";
+
+ // Locked states
+ I_L, "I^L", desc="Invalid, Locked";
+ S_L, "S^L", desc="Shared, Locked";
+ IM_L, "IM^L", desc="Invalid, Locked, trying to go to Modified";
+ SM_L, "SM^L", desc="Shared, Locked, trying to go to Modified";
+ IS_L, "IS^L", desc="Invalid, Locked, trying to go to Shared";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ Load, desc="Load request from the processor";
+ Ifetch, desc="I-fetch request from the processor";
+ Store, desc="Store request from the processor";
+ L1_Replacement, desc="L1 Replacement";
+
+ // Responses
+ Data_Shared, desc="Received a data message, we are now a sharer";
+ Data_Owner, desc="Received a data message, we are now the owner";
+ Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
+ Ack, desc="Received an ack message";
+ Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
+
+ // Requests
+ Transient_GETX, desc="A GetX from another processor";
+ Transient_Local_GETX, desc="A GetX from another processor";
+ Transient_GETS, desc="A GetS from another processor";
+ Transient_Local_GETS, desc="A GetS from another processor";
+ Transient_GETS_Last_Token, desc="A GetS from another processor";
+ Transient_Local_GETS_Last_Token, desc="A GetS from another processor";
+
+ // Lock/Unlock for distributed
+ Persistent_GETX, desc="Another processor has priority to read/write";
+ Persistent_GETS, desc="Another processor has priority to read";
+ Own_Lock_or_Unlock, desc="This processor now has priority";
+
+ // Triggers
+ Request_Timeout, desc="Timeout";
+ Use_TimeoutStarverX, desc="Timeout";
+ Use_TimeoutStarverS, desc="Timeout";
+ Use_TimeoutNoStarvers, desc="Timeout";
+
+ }
+
+ // TYPES
+
+ int getRetryThreshold();
+ int getFixedTimeoutLatency();
+ bool getDynamicTimeoutEnabled();
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int Tokens, desc="The number of tokens we're holding for the line";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Address Address, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ int IssueCount, default="0", desc="The number of times we've issued a request for this line.";
+ Address PC, desc="Program counter of request";
+
+ bool WentPersistent, default="false", desc="Request went persistent";
+ bool ExternalResponse, default="false", desc="Response came from an external controller";
+
+ AccessType AccessType, desc="Type of request (used for profiling)";
+ Time IssueTime, desc="Time the request was issued";
+ AccessModeType AccessMode, desc="user/supervisor access type";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+
+ TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
+ CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
+ CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
+
+ MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
+ Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+
+ bool starving, default="false";
+
+ PersistentTable persistentTable, constructor_hack="i";
+ TimerTable useTimerTable;
+ TimerTable reissueTimerTable;
+
+ int outstandingRequests, default="0";
+ int outstandingPersistentRequests, default="0";
+
+ int averageLatencyHysteresis, default="(8)"; // Constant that provides hysteresis for calculated the estimated average
+ int averageLatencyCounter, default="(500 << (*m_L1Cache_averageLatencyHysteresis_vec[i]))";
+
+ int averageLatencyEstimate() {
+ DEBUG_EXPR( (averageLatencyCounter >> averageLatencyHysteresis) );
+ profile_average_latency_estimate( (averageLatencyCounter >> averageLatencyHysteresis) );
+ return averageLatencyCounter >> averageLatencyHysteresis;
+ }
+
+ void updateAverageLatencyEstimate(int latency) {
+ DEBUG_EXPR( latency );
+ assert(latency >= 0);
+
+ // By subtracting the current average and then adding the most
+ // recent sample, we calculate an estimate of the recent average.
+ // If we simply used a running sum and divided by the total number
+ // of entries, the estimate of the average would adapt very slowly
+ // after the execution has run for a long time.
+ // averageLatencyCounter := averageLatencyCounter - averageLatencyEstimate() + latency;
+
+ averageLatencyCounter := averageLatencyCounter - averageLatencyEstimate() + latency;
+ }
+
+
+ Entry getCacheEntry(Address addr), return_by_ref="yes" {
+ if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory[addr];
+ } else {
+ return L1IcacheMemory[addr];
+ }
+ }
+
+ int getTokens(Address addr) {
+ if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory[addr].Tokens;
+ } else if (L1IcacheMemory.isTagPresent(addr)) {
+ return L1IcacheMemory[addr].Tokens;
+ } else {
+ return 0;
+ }
+ }
+
+ void changePermission(Address addr, AccessPermission permission) {
+ if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory.changePermission(addr, permission);
+ } else {
+ return L1IcacheMemory.changePermission(addr, permission);
+ }
+ }
+
+ bool isCacheTagPresent(Address addr) {
+ return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
+ }
+
+ State getState(Address addr) {
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+
+ if (L1_TBEs.isPresent(addr)) {
+ return L1_TBEs[addr].TBEState;
+ } else if (isCacheTagPresent(addr)) {
+ return getCacheEntry(addr).CacheState;
+ } else {
+ if ((persistentTable.isLocked(addr) == true) && (persistentTable.findSmallest(addr) != machineID)) {
+ // Not in cache, in persistent table, but this processor isn't highest priority
+ return State:I_L;
+ } else {
+ return State:NP;
+ }
+ }
+ }
+
+ void setState(Address addr, State state) {
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+
+ if (L1_TBEs.isPresent(addr)) {
+ assert(state != State:I);
+ assert(state != State:S);
+ assert(state != State:O);
+ assert(state != State:MM);
+ assert(state != State:M);
+ L1_TBEs[addr].TBEState := state;
+ }
+
+ if (isCacheTagPresent(addr)) {
+ // Make sure the token count is in range
+ assert(getCacheEntry(addr).Tokens >= 0);
+ assert(getCacheEntry(addr).Tokens <= max_tokens());
+
+ if ((state == State:I_L) ||
+ (state == State:IM_L) ||
+ (state == State:IS_L)) {
+ // Make sure we have no tokens in the "Invalid, locked" states
+ if (isCacheTagPresent(addr)) {
+ assert(getCacheEntry(addr).Tokens == 0);
+ }
+
+ // Make sure the line is locked
+ // assert(persistentTable.isLocked(addr));
+
+ // But we shouldn't have highest priority for it
+ // assert(persistentTable.findSmallest(addr) != id);
+
+ } else if ((state == State:S_L) ||
+ (state == State:SM_L)) {
+ assert(getCacheEntry(addr).Tokens >= 1);
+
+ // Make sure the line is locked...
+ // assert(persistentTable.isLocked(addr));
+
+ // ...But we shouldn't have highest priority for it...
+ // assert(persistentTable.findSmallest(addr) != id);
+
+ // ...And it must be a GETS request
+ // assert(persistentTable.typeOfSmallest(addr) == AccessType:Read);
+
+ } else {
+
+ // If there is an entry in the persistent table of this block,
+ // this processor needs to have an entry in the table for this
+ // block, and that entry better be the smallest (highest
+ // priority). Otherwise, the state should have been one of
+ // locked states
+
+ //if (persistentTable.isLocked(addr)) {
+ // assert(persistentTable.findSmallest(addr) == id);
+ //}
+ }
+
+ // in M and E you have all the tokens
+ if (state == State:MM || state == State:M || state == State:MM_W || state == State:M_W) {
+ assert(getCacheEntry(addr).Tokens == max_tokens());
+ }
+
+ // in NP you have no tokens
+ if (state == State:NP) {
+ assert(getCacheEntry(addr).Tokens == 0);
+ }
+
+ // You have at least one token in S-like states
+ if (state == State:S || state == State:SM) {
+ assert(getCacheEntry(addr).Tokens > 0);
+ }
+
+ // You have at least half the token in O-like states
+ if (state == State:O && state == State:OM) {
+ assert(getCacheEntry(addr).Tokens >= 1); // Must have at least one token
+ assert(getCacheEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
+ }
+
+ getCacheEntry(addr).CacheState := state;
+
+ // Set permission
+ if (state == State:MM ||
+ state == State:MM_W) {
+ changePermission(addr, AccessPermission:Read_Write);
+ } else if ((state == State:S) ||
+ (state == State:O) ||
+ (state == State:M) ||
+ (state == State:M_W) ||
+ (state == State:SM) ||
+ (state == State:S_L) ||
+ (state == State:SM_L) ||
+ (state == State:OM)) {
+ changePermission(addr, AccessPermission:Read_Only);
+ } else {
+ changePermission(addr, AccessPermission:Invalid);
+ }
+ }
+ }
+
+ Event mandatory_request_type_to_event(CacheRequestType type) {
+ if (type == CacheRequestType:LD) {
+ return Event:Load;
+ } else if (type == CacheRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
+ return Event:Store;
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+
+ AccessType cache_request_type_to_access_type(CacheRequestType type) {
+ if ((type == CacheRequestType:LD) || (type == CacheRequestType:IFETCH)) {
+ return AccessType:Read;
+ } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
+ return AccessType:Write;
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+
+ GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
+ if (machineIDToMachineType(sender) == MachineType:L1Cache) {
+ return GenericMachineType:L1Cache_wCC; // NOTE direct L1 hits should not call this
+ } else if (machineIDToMachineType(sender) == MachineType:L2Cache) {
+ if ( sender == (map_L1CacheMachId_to_L2Cache(addr,machineID))) {
+ return GenericMachineType:L2Cache;
+ } else {
+ return GenericMachineType:L2Cache_wCC;
+ }
+ } else {
+ return ConvertMachToGenericMach(machineIDToMachineType(sender));
+ }
+ }
+
+ bool okToIssueStarving(Address addr) {
+ return persistentTable.okToIssueStarving(addr);
+ }
+
+ void markPersistentEntries(Address addr) {
+ persistentTable.markEntries(addr);
+ }
+
+ MessageBuffer triggerQueue, ordered="false", random="false";
+
+ // ** OUT_PORTS **
+ out_port(persistentNetwork_out, PersistentMsg, persistentFromL1Cache);
+ out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
+ out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
+ out_port(requestRecycle_out, RequestMsg, requestToL1Cache);
+
+ // ** IN_PORTS **
+
+ // Use Timer
+ in_port(useTimerTable_in, Address, useTimerTable) {
+ if (useTimerTable_in.isReady()) {
+ if (persistentTable.isLocked(useTimerTable.readyAddress()) && (persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
+ if (persistentTable.typeOfSmallest(useTimerTable.readyAddress()) == AccessType:Write) {
+ trigger(Event:Use_TimeoutStarverX, useTimerTable.readyAddress());
+ }
+ else {
+ trigger(Event:Use_TimeoutStarverS, useTimerTable.readyAddress());
+ }
+ }
+ else {
+ trigger(Event:Use_TimeoutNoStarvers, useTimerTable.readyAddress());
+ }
+ }
+ }
+
+ // Reissue Timer
+ in_port(reissueTimerTable_in, Address, reissueTimerTable) {
+ if (reissueTimerTable_in.isReady()) {
+ trigger(Event:Request_Timeout, reissueTimerTable.readyAddress());
+ }
+ }
+
+
+
+ // Persistent Network
+ in_port(persistentNetwork_in, PersistentMsg, persistentToL1Cache) {
+ if (persistentNetwork_in.isReady()) {
+ peek(persistentNetwork_in, PersistentMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+
+ // Apply the lockdown or unlockdown message to the table
+ if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
+ persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
+ } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
+ persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
+ } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
+ persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
+ } else {
+ error("Unexpected message");
+ }
+
+ // React to the message based on the current state of the table
+ if (persistentTable.isLocked(in_msg.Address)) {
+ if (persistentTable.findSmallest(in_msg.Address) == machineID) {
+ // Our Own Lock - this processor is highest priority
+ trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
+ } else {
+ if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
+ trigger(Event:Persistent_GETS, in_msg.Address);
+ } else {
+ trigger(Event:Persistent_GETX, in_msg.Address);
+ }
+ }
+ } else {
+ // Unlock case - no entries in the table
+ trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
+ }
+ }
+ }
+ }
+
+
+ // Request Network
+ in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
+ if (requestNetwork_in.isReady()) {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ if (in_msg.isLocal) {
+ trigger(Event:Transient_Local_GETX, in_msg.Address);
+ }
+ else {
+ trigger(Event:Transient_GETX, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ if ( (L1DcacheMemory.isTagPresent(in_msg.Address) || L1IcacheMemory.isTagPresent(in_msg.Address)) && getCacheEntry(in_msg.Address).Tokens == 1) {
+ if (in_msg.isLocal) {
+ trigger(Event:Transient_Local_GETS_Last_Token, in_msg.Address);
+ }
+ else {
+ trigger(Event:Transient_GETS_Last_Token, in_msg.Address);
+ }
+ }
+ else {
+ if (in_msg.isLocal) {
+ trigger(Event:Transient_Local_GETS, in_msg.Address);
+ }
+ else {
+ trigger(Event:Transient_GETS, in_msg.Address);
+ }
+ }
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Response Network
+ in_port(responseNetwork_in, ResponseMsg, responseToL1Cache) {
+ if (responseNetwork_in.isReady()) {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+
+ // Mark TBE flag if response received off-chip. Use this to update average latency estimate
+ if ( in_msg.SenderMachine == MachineType:L2Cache ) {
+
+ if (in_msg.Sender == map_L1CacheMachId_to_L2Cache(in_msg.Address, machineID)) {
+ // came from an off-chip L2 cache
+ if (L1_TBEs.isPresent(in_msg.Address)) {
+ // L1_TBEs[in_msg.Address].ExternalResponse := true;
+ // profile_offchipL2_response(in_msg.Address);
+ }
+ }
+ else {
+ // profile_onchipL2_response(in_msg.Address );
+ }
+ } else if ( in_msg.SenderMachine == MachineType:Directory ) {
+ if (L1_TBEs.isPresent(in_msg.Address)) {
+ L1_TBEs[in_msg.Address].ExternalResponse := true;
+ // profile_memory_response( in_msg.Address);
+ }
+ } else if ( in_msg.SenderMachine == MachineType:L1Cache) {
+ if (isLocalProcessor(machineID, in_msg.Sender) == false) {
+ if (L1_TBEs.isPresent(in_msg.Address)) {
+ // L1_TBEs[in_msg.Address].ExternalResponse := true;
+ // profile_offchipL1_response(in_msg.Address );
+ }
+ }
+ else {
+ // profile_onchipL1_response(in_msg.Address );
+ }
+ } else {
+ error("unexpected SenderMachine");
+ }
+
+
+ if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Ack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
+ trigger(Event:Data_Owner, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ trigger(Event:Data_Shared, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ } else {
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Ack_All_Tokens, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ trigger(Event:Data_All_Tokens, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady()) {
+ peek(mandatoryQueue_in, CacheMsg) {
+ // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
+
+ if (in_msg.Type == CacheRequestType:IFETCH) {
+ // ** INSTRUCTION ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, try to write it to the L2
+ trigger(Event:L1_Replacement, in_msg.Address);
+ }
+
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ // No room in the L1, so we need to make room
+ trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+ } else {
+ // *** DATA ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, try to write it to the L2
+ trigger(Event:L1_Replacement, in_msg.Address);
+ }
+
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ // No room in the L1, so we need to make room
+ trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+
+ action(a_issueReadRequest, "a", desc="Issue GETS") {
+ if (L1_TBEs[address].IssueCount == 0) {
+ // Update outstanding requests
+ profile_outstanding_request(outstandingRequests);
+ outstandingRequests := outstandingRequests + 1;
+ }
+
+ if (L1_TBEs[address].IssueCount >= getRetryThreshold() ) {
+ // Issue a persistent request if possible
+ if (okToIssueStarving(address) && (starving == false)) {
+ enqueue(persistentNetwork_out, PersistentMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Persistent_Control;
+ out_msg.Prefetch := L1_TBEs[address].Prefetch;
+ out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ }
+ markPersistentEntries(address);
+ starving := true;
+
+ if (L1_TBEs[address].IssueCount == 0) {
+ profile_persistent_prediction(address, L1_TBEs[address].AccessType);
+ }
+
+ // Update outstanding requests
+ profile_outstanding_persistent_request(outstandingPersistentRequests);
+ outstandingPersistentRequests := outstandingPersistentRequests + 1;
+
+ // Increment IssueCount
+ L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
+
+ L1_TBEs[address].WentPersistent := true;
+
+ // Do not schedule a wakeup, a persistent requests will always complete
+ }
+ else {
+
+ // We'd like to issue a persistent request, but are not allowed
+ // to issue a P.R. right now. This, we do not increment the
+ // IssueCount.
+
+ // Set a wakeup timer
+ reissueTimerTable.set(address, 10);
+
+ }
+ } else {
+ // Make a normal request
+ enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
+ out_msg.RetryNum := L1_TBEs[address].IssueCount;
+ if (L1_TBEs[address].IssueCount == 0) {
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Reissue_Control;
+ }
+ out_msg.Prefetch := L1_TBEs[address].Prefetch;
+ out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ }
+
+ // send to other local L1s, with local bit set
+ enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination := getOtherLocalL1IDs(machineID);
+ out_msg.RetryNum := L1_TBEs[address].IssueCount;
+ out_msg.isLocal := true;
+ if (L1_TBEs[address].IssueCount == 0) {
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Reissue_Control;
+ }
+ out_msg.Prefetch := L1_TBEs[address].Prefetch;
+ out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ }
+
+ // Increment IssueCount
+ L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
+
+ // Set a wakeup timer
+
+ if (getDynamicTimeoutEnabled()) {
+ reissueTimerTable.set(address, 1.25 * averageLatencyEstimate());
+ } else {
+ reissueTimerTable.set(address, getFixedTimeoutLatency());
+ }
+
+ }
+ }
+
+ action(b_issueWriteRequest, "b", desc="Issue GETX") {
+
+ if (L1_TBEs[address].IssueCount == 0) {
+ // Update outstanding requests
+ profile_outstanding_request(outstandingRequests);
+ outstandingRequests := outstandingRequests + 1;
+ }
+
+ if (L1_TBEs[address].IssueCount >= getRetryThreshold() ) {
+ // Issue a persistent request if possible
+ if ( okToIssueStarving(address) && (starving == false)) {
+ enqueue(persistentNetwork_out, PersistentMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Persistent_Control;
+ out_msg.Prefetch := L1_TBEs[address].Prefetch;
+ out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ }
+ markPersistentEntries(address);
+ starving := true;
+
+ // Update outstanding requests
+ profile_outstanding_persistent_request(outstandingPersistentRequests);
+ outstandingPersistentRequests := outstandingPersistentRequests + 1;
+
+ if (L1_TBEs[address].IssueCount == 0) {
+ profile_persistent_prediction(address, L1_TBEs[address].AccessType);
+ }
+
+ // Increment IssueCount
+ L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
+
+ L1_TBEs[address].WentPersistent := true;
+
+ // Do not schedule a wakeup, a persistent requests will always complete
+ }
+ else {
+
+ // We'd like to issue a persistent request, but are not allowed
+ // to issue a P.R. right now. This, we do not increment the
+ // IssueCount.
+
+ // Set a wakeup timer
+ reissueTimerTable.set(address, 10);
+ }
+
+
+ } else {
+ // Make a normal request
+ enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
+ out_msg.RetryNum := L1_TBEs[address].IssueCount;
+
+ if (L1_TBEs[address].IssueCount == 0) {
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Reissue_Control;
+ }
+ out_msg.Prefetch := L1_TBEs[address].Prefetch;
+ out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ }
+
+ // send to other local L1s too
+ enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ out_msg.isLocal := true;
+ out_msg.Destination := getOtherLocalL1IDs(machineID);
+ out_msg.RetryNum := L1_TBEs[address].IssueCount;
+ if (L1_TBEs[address].IssueCount == 0) {
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Reissue_Control;
+ }
+ out_msg.Prefetch := L1_TBEs[address].Prefetch;
+ out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ }
+
+ // Increment IssueCount
+ L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
+
+ DEBUG_EXPR("incremented issue count");
+ DEBUG_EXPR(L1_TBEs[address].IssueCount);
+
+ // Set a wakeup timer
+ if (getDynamicTimeoutEnabled()) {
+ reissueTimerTable.set(address, 1.25 * averageLatencyEstimate());
+ } else {
+ reissueTimerTable.set(address, getFixedTimeoutLatency());
+ }
+ }
+ }
+
+ action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ // FIXME, should use a 3rd vnet
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ }
+ }
+ }
+
+ action(c_ownedReplacement, "c", desc="Issue writeback") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
+ out_msg.Tokens := getCacheEntry(address).Tokens;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.Type := CoherenceResponseType:WB_OWNED;
+
+ // always send the data?
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ }
+ getCacheEntry(address).Tokens := 0;
+ }
+
+ action(cc_sharedReplacement, "\c", desc="Issue dirty writeback") {
+
+ // don't send writeback if replacing block with no tokens
+ if (getCacheEntry(address).Tokens != 0) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
+ out_msg.Tokens := getCacheEntry(address).Tokens;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ // assert(getCacheEntry(address).Dirty == false);
+ out_msg.Dirty := false;
+
+ // always send the data?
+ if (getCacheEntry(address).Tokens > 1) {
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ out_msg.Type := CoherenceResponseType:WB_SHARED_DATA;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ out_msg.Type := CoherenceResponseType:WB_TOKENS;
+ }
+ }
+ getCacheEntry(address).Tokens := 0;
+ }
+ }
+
+
+ action(d_sendDataWithToken, "d", desc="Send data and a token from cache to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Tokens := 1;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ // out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.Dirty := false;
+ if (in_msg.isLocal) {
+ out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+ getCacheEntry(address).Tokens := getCacheEntry(address).Tokens - 1;
+ assert(getCacheEntry(address).Tokens >= 1);
+ }
+
+ action(d_sendDataWithNTokenIfAvail, "\dd", desc="Send data and a token from cache to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ if (getCacheEntry(address).Tokens > N_tokens()) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Tokens := N_tokens();
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ // out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.Dirty := false;
+ if (in_msg.isLocal) {
+ out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ getCacheEntry(address).Tokens := getCacheEntry(address).Tokens - N_tokens();
+ }
+ else if (getCacheEntry(address).Tokens > 1) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Tokens := 1;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ // out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.Dirty := false;
+ if (in_msg.isLocal) {
+ out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ getCacheEntry(address).Tokens := getCacheEntry(address).Tokens - 1;
+ }
+ }
+// assert(getCacheEntry(address).Tokens >= 1);
+ }
+
+ action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ assert(getCacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getCacheEntry(address).Tokens;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ if (in_msg.isLocal) {
+ out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+ getCacheEntry(address).Tokens := 0;
+ }
+
+ action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
+ // assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ if (getCacheEntry(address).Tokens > 0) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(getCacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getCacheEntry(address).Tokens;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ getCacheEntry(address).Tokens := 0;
+ }
+
+ action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
+ //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ assert(getCacheEntry(address).Tokens > 0);
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(getCacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getCacheEntry(address).Tokens;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ getCacheEntry(address).Tokens := 0;
+ }
+
+ action(f_sendAckWithAllButNorOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
+ //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ assert(getCacheEntry(address).Tokens > 0);
+ if (getCacheEntry(address).Tokens > 1) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(getCacheEntry(address).Tokens >= 1);
+ if (getCacheEntry(address).Tokens > N_tokens()) {
+ out_msg.Tokens := getCacheEntry(address).Tokens - N_tokens();
+ } else {
+ out_msg.Tokens := getCacheEntry(address).Tokens - 1;
+ }
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ if (getCacheEntry(address).Tokens > N_tokens()) {
+ getCacheEntry(address).Tokens := N_tokens();
+ } else {
+ getCacheEntry(address).Tokens := 1;
+ }
+ }
+
+ action(ff_sendDataWithAllButNorOneTokens, "\f", desc="Send data and out tokens but one to starver") {
+ //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ assert(getCacheEntry(address).Tokens > 0);
+ if (getCacheEntry(address).Tokens > 1) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(getCacheEntry(address).Tokens >= 1);
+ if (getCacheEntry(address).Tokens > N_tokens()) {
+ out_msg.Tokens := getCacheEntry(address).Tokens - N_tokens();
+ } else {
+ out_msg.Tokens := getCacheEntry(address).Tokens - 1;
+ }
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ if (getCacheEntry(address).Tokens > N_tokens()) {
+ getCacheEntry(address).Tokens := N_tokens();
+ } else {
+ getCacheEntry(address).Tokens := 1;
+ }
+ }
+ }
+
+ action(g_bounceResponseToStarver, "g", desc="Redirect response to starving processor") {
+ // assert(persistentTable.isLocked(address));
+
+ peek(responseNetwork_in, ResponseMsg) {
+ // assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ // FIXME, should use a 3rd vnet in some cases
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.MessageSize := in_msg.MessageSize;
+ }
+ }
+ }
+
+
+ action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ sequencer.readCallback(address, getCacheEntry(address).DataBlk, GenericMachineType:L1Cache, PrefetchBit:No);
+ }
+
+ action(x_external_load_hit, "x", desc="Notify sequencer the load completed.") {
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ peek(responseNetwork_in, ResponseMsg) {
+
+ sequencer.readCallback(address, getCacheEntry(address).DataBlk, getNondirectHitMachType(in_msg.Address, in_msg.Sender), PrefetchBit:No);
+ }
+ }
+
+ action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ sequencer.writeCallback(address, getCacheEntry(address).DataBlk, GenericMachineType:L1Cache, PrefetchBit:No);
+ getCacheEntry(address).Dirty := true;
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ }
+
+ action(xx_external_store_hit, "\x", desc="Notify sequencer that store completed.") {
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ peek(responseNetwork_in, ResponseMsg) {
+ sequencer.writeCallback(address, getCacheEntry(address).DataBlk, getNondirectHitMachType(in_msg.Address, in_msg.Sender), PrefetchBit:No);
+ }
+ getCacheEntry(address).Dirty := true;
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE") {
+ check_allocate(L1_TBEs);
+ L1_TBEs.allocate(address);
+ L1_TBEs[address].IssueCount := 0;
+ peek(mandatoryQueue_in, CacheMsg) {
+ L1_TBEs[address].PC := in_msg.ProgramCounter;
+ L1_TBEs[address].AccessType := cache_request_type_to_access_type(in_msg.Type);
+ L1_TBEs[address].Prefetch := in_msg.Prefetch;
+ L1_TBEs[address].AccessMode := in_msg.AccessMode;
+ }
+ L1_TBEs[address].IssueTime := get_time();
+ }
+
+
+ action(j_unsetReissueTimer, "j", desc="Unset reissue timer.") {
+ if (reissueTimerTable.isSet(address)) {
+ reissueTimerTable.unset(address);
+ }
+ }
+
+ action(jj_unsetUseTimer, "\j", desc="Unset use timer.") {
+ useTimerTable.unset(address);
+ }
+
+
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue();
+ }
+
+ action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
+ persistentNetwork_in.dequeue();
+ }
+
+ action(m_popRequestQueue, "m", desc="Pop request queue.") {
+ requestNetwork_in.dequeue();
+ }
+
+ action(n_popResponseQueue, "n", desc="Pop response queue") {
+ responseNetwork_in.dequeue();
+ }
+
+ action(o_scheduleUseTimeout, "o", desc="Schedule a use timeout.") {
+ useTimerTable.set(address, 50);
+ }
+
+ action(p_informL2AboutTokenLoss, "p", desc="Inform L2 about loss of all tokens") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:INV;
+ out_msg.Tokens := 0;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.DestMachine := MachineType:L2Cache;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+
+ action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Tokens != 0);
+ DEBUG_EXPR("MRM_DEBUG L1 received tokens");
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.Tokens);
+ getCacheEntry(address).Tokens := getCacheEntry(address).Tokens + in_msg.Tokens;
+ DEBUG_EXPR(getCacheEntry(address).Tokens);
+
+ if (getCacheEntry(address).Dirty == false && in_msg.Dirty) {
+ getCacheEntry(address).Dirty := true;
+ }
+ }
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+
+ if (L1_TBEs[address].WentPersistent) {
+ // assert(starving == true);
+ outstandingRequests := outstandingRequests - 1;
+ enqueue(persistentNetwork_out, PersistentMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Persistent_Control;
+ }
+ starving := false;
+ }
+
+ // Update average latency
+ if (L1_TBEs[address].IssueCount <= 1) {
+ if (L1_TBEs[address].ExternalResponse == true) {
+ updateAverageLatencyEstimate(time_to_int(get_time()) - time_to_int(L1_TBEs[address].IssueTime));
+ }
+ }
+
+ // Profile
+ //if (L1_TBEs[address].WentPersistent) {
+ // profile_token_retry(address, L1_TBEs[address].AccessType, 2);
+ //}
+ //else {
+ // profile_token_retry(address, L1_TBEs[address].AccessType, 1);
+ //}
+
+ profile_token_retry(address, L1_TBEs[address].AccessType, L1_TBEs[address].IssueCount);
+ L1_TBEs.deallocate(address);
+ }
+
+ action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
+ if (getCacheEntry(address).Tokens > 0) {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ assert(getCacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getCacheEntry(address).Tokens;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+ getCacheEntry(address).Tokens := 0;
+ }
+
+ action(u_writeDataToCache, "u", desc="Write data to cache") {
+ peek(responseNetwork_in, ResponseMsg) {
+ getCacheEntry(address).DataBlk := in_msg.DataBlk;
+ if (getCacheEntry(address).Dirty == false && in_msg.Dirty) {
+ getCacheEntry(address).Dirty := in_msg.Dirty;
+ }
+
+ }
+ }
+
+ action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L1DcacheMemory.deallocate(address);
+ } else {
+ L1IcacheMemory.deallocate(address);
+ }
+ }
+
+ action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
+ if (L1DcacheMemory.isTagPresent(address) == false) {
+ L1DcacheMemory.allocate(address);
+ }
+ }
+
+ action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
+ if (L1IcacheMemory.isTagPresent(address) == false) {
+ L1IcacheMemory.allocate(address);
+ }
+ }
+
+ action(uu_profileMiss, "\u", desc="Profile the demand miss") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ // profile_miss(in_msg, id);
+ }
+ }
+
+ action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
+ }
+ }
+
+
+ action(z_stall, "z", desc="Stall") {
+
+ }
+
+ action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
+ mandatoryQueue_in.recycle();
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // Transitions for Load/Store/L2_Replacement from transient states
+ transition({IM, SM, OM, IS, IM_L, IS_L, I_L, S_L, SM_L, M_W, MM_W}, L1_Replacement) {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({IM, SM, OM, IS, IM_L, IS_L, SM_L}, Store) {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({IM, IS, IM_L, IS_L}, {Load, Ifetch}) {
+ zz_recycleMandatoryQueue;
+ }
+
+
+ // Lockdowns
+ transition({NP, I, S, O, M, MM, M_W, MM_W, IM, SM, OM, IS}, Own_Lock_or_Unlock) {
+ l_popPersistentQueue;
+ }
+
+ // Transitions from NP
+ transition(NP, Load, IS) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueReadRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(NP, Ifetch, IS) {
+ pp_allocateL1ICacheBlock;
+ i_allocateTBE;
+ a_issueReadRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(NP, Store, IM) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ b_issueWriteRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
+ bb_bounceResponse;
+ n_popResponseQueue;
+ }
+
+ transition(NP, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) {
+ m_popRequestQueue;
+ }
+
+ transition(NP, {Persistent_GETX, Persistent_GETS}, I_L) {
+ l_popPersistentQueue;
+ }
+
+ // Transitions from Idle
+ transition(I, Load, IS) {
+ i_allocateTBE;
+ a_issueReadRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, Ifetch, IS) {
+ i_allocateTBE;
+ a_issueReadRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, Store, IM) {
+ i_allocateTBE;
+ b_issueWriteRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, L1_Replacement) {
+ cc_sharedReplacement;
+ gg_deallocateL1CacheBlock;
+ }
+
+ transition(I, {Transient_GETX, Transient_Local_GETX}) {
+ t_sendAckWithCollectedTokens;
+ m_popRequestQueue;
+ }
+
+ transition(I, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
+ m_popRequestQueue;
+ }
+
+ transition(I, {Persistent_GETX, Persistent_GETS}, I_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(I_L, {Persistent_GETX, Persistent_GETS}) {
+ l_popPersistentQueue;
+ }
+
+ transition(I, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(I, Data_Shared, S) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(I, Data_Owner, O) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(I, Data_All_Tokens, M) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ // Transitions from Shared
+ transition({S, SM, S_L, SM_L}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Store, SM) {
+ i_allocateTBE;
+ b_issueWriteRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, L1_Replacement, I) {
+ cc_sharedReplacement; // Only needed in some cases
+ gg_deallocateL1CacheBlock;
+ }
+
+ transition(S, {Transient_GETX, Transient_Local_GETX}, I) {
+ t_sendAckWithCollectedTokens;
+ p_informL2AboutTokenLoss;
+ m_popRequestQueue;
+ }
+
+ // only owner responds to non-local requests
+ transition(S, Transient_GETS) {
+ m_popRequestQueue;
+ }
+
+ transition(S, Transient_Local_GETS) {
+ d_sendDataWithToken;
+ m_popRequestQueue;
+ }
+
+ transition(S, {Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token}) {
+ m_popRequestQueue;
+ }
+
+ transition({S, S_L}, Persistent_GETX, I_L) {
+ e_sendAckWithCollectedTokens;
+ p_informL2AboutTokenLoss;
+ l_popPersistentQueue;
+ }
+
+ transition(S, Persistent_GETS, S_L) {
+ f_sendAckWithAllButNorOneTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(S_L, Persistent_GETS) {
+ l_popPersistentQueue;
+ }
+
+ transition(S, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Data_Shared) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Data_Owner, O) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Data_All_Tokens, M) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ // Transitions from Owned
+ transition({O, OM}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, Store, OM) {
+ i_allocateTBE;
+ b_issueWriteRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, L1_Replacement, I) {
+ c_ownedReplacement;
+ gg_deallocateL1CacheBlock;
+ }
+
+ transition(O, {Transient_GETX, Transient_Local_GETX}, I) {
+ dd_sendDataWithAllTokens;
+ p_informL2AboutTokenLoss;
+ m_popRequestQueue;
+ }
+
+ transition(O, Persistent_GETX, I_L) {
+ ee_sendDataWithAllTokens;
+ p_informL2AboutTokenLoss;
+ l_popPersistentQueue;
+ }
+
+ transition(O, Persistent_GETS, S_L) {
+ ff_sendDataWithAllButNorOneTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(O, Transient_GETS) {
+ d_sendDataWithToken;
+ m_popRequestQueue;
+ }
+
+ transition(O, Transient_Local_GETS) {
+ d_sendDataWithToken;
+ m_popRequestQueue;
+ }
+
+ // ran out of tokens, wait for it to go persistent
+ transition(O, {Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token}) {
+ m_popRequestQueue;
+ }
+
+ transition(O, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(O, Ack_All_Tokens, M) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(O, Data_Shared) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(O, Data_All_Tokens, M) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ // Transitions from Modified
+ transition({MM, MM_W}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition({MM, MM_W}, Store) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM, L1_Replacement, I) {
+ c_ownedReplacement;
+ gg_deallocateL1CacheBlock;
+ }
+
+ transition(MM, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}, I) {
+ dd_sendDataWithAllTokens;
+ p_informL2AboutTokenLoss;
+ m_popRequestQueue;
+ }
+
+ transition({MM_W}, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) { // Ignore the request
+ m_popRequestQueue;
+ }
+
+ // Implement the migratory sharing optimization, even for persistent requests
+ transition(MM, {Persistent_GETX, Persistent_GETS}, I_L) {
+ ee_sendDataWithAllTokens;
+ p_informL2AboutTokenLoss;
+ l_popPersistentQueue;
+ }
+
+ // ignore persistent requests in lockout period
+ transition(MM_W, {Persistent_GETX, Persistent_GETS}) {
+ l_popPersistentQueue;
+ }
+
+
+ transition(MM_W, Use_TimeoutNoStarvers, MM) {
+ s_deallocateTBE;
+ jj_unsetUseTimer;
+ }
+
+ // Transitions from Dirty Exclusive
+ transition({M, M_W}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Store, MM) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M_W, Store, MM_W) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, L1_Replacement, I) {
+ c_ownedReplacement;
+ gg_deallocateL1CacheBlock;
+ }
+
+ transition(M, {Transient_GETX, Transient_Local_GETX}, I) {
+ dd_sendDataWithAllTokens;
+ p_informL2AboutTokenLoss;
+ m_popRequestQueue;
+ }
+
+ transition(M, Transient_Local_GETS, O) {
+ d_sendDataWithToken;
+ m_popRequestQueue;
+ }
+
+ transition(M, Transient_GETS, O) {
+ d_sendDataWithNTokenIfAvail;
+ m_popRequestQueue;
+ }
+
+ transition(M_W, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) { // Ignore the request
+ m_popRequestQueue;
+ }
+
+ transition(M, Persistent_GETX, I_L) {
+ ee_sendDataWithAllTokens;
+ p_informL2AboutTokenLoss;
+ l_popPersistentQueue;
+ }
+
+ transition(M, Persistent_GETS, S_L) {
+ ff_sendDataWithAllButNorOneTokens;
+ l_popPersistentQueue;
+ }
+
+ // ignore persistent requests in lockout period
+ transition(M_W, {Persistent_GETX, Persistent_GETS}) {
+ l_popPersistentQueue;
+ }
+
+ transition(M_W, Use_TimeoutStarverS, S_L) {
+ s_deallocateTBE;
+ ff_sendDataWithAllButNorOneTokens;
+ jj_unsetUseTimer;
+ }
+
+ // someone unlocked during timeout
+ transition(M_W, Use_TimeoutNoStarvers, M) {
+ s_deallocateTBE;
+ jj_unsetUseTimer;
+ }
+
+ transition(M_W, Use_TimeoutStarverX, I_L) {
+ s_deallocateTBE;
+ ee_sendDataWithAllTokens;
+ p_informL2AboutTokenLoss;
+ jj_unsetUseTimer;
+ }
+
+
+
+ // migratory
+ transition(MM_W, {Use_TimeoutStarverX, Use_TimeoutStarverS}, I_L) {
+ s_deallocateTBE;
+ ee_sendDataWithAllTokens;
+ p_informL2AboutTokenLoss;
+ jj_unsetUseTimer;
+
+ }
+
+
+ // Transient_GETX and Transient_GETS in transient states
+ transition(OM, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
+ m_popRequestQueue; // Even if we have the data, we can pretend we don't have it yet.
+ }
+
+ transition(IS, {Transient_GETX, Transient_Local_GETX}) {
+ t_sendAckWithCollectedTokens;
+ m_popRequestQueue;
+ }
+
+ transition(IS, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
+ m_popRequestQueue;
+ }
+
+ transition(IS, {Persistent_GETX, Persistent_GETS}, IS_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(IS_L, {Persistent_GETX, Persistent_GETS}) {
+ l_popPersistentQueue;
+ }
+
+ transition(IM, {Persistent_GETX, Persistent_GETS}, IM_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(IM_L, {Persistent_GETX, Persistent_GETS}) {
+ l_popPersistentQueue;
+ }
+
+ transition({SM, SM_L}, Persistent_GETX, IM_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(SM, Persistent_GETS, SM_L) {
+ f_sendAckWithAllButNorOneTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(SM_L, Persistent_GETS) {
+ l_popPersistentQueue;
+ }
+
+ transition(OM, Persistent_GETX, IM_L) {
+ ee_sendDataWithAllTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(OM, Persistent_GETS, SM_L) {
+ ff_sendDataWithAllButNorOneTokens;
+ l_popPersistentQueue;
+ }
+
+ // Transitions from IM/SM
+
+ transition({IM, SM}, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(IM, Data_Shared, SM) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(IM, Data_Owner, OM) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(IM, Data_All_Tokens, MM_W) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ xx_external_store_hit;
+ o_scheduleUseTimeout;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(SM, Data_Shared) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(SM, Data_Owner, OM) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(SM, Data_All_Tokens, MM_W) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ xx_external_store_hit;
+ o_scheduleUseTimeout;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition({IM, SM}, {Transient_GETX, Transient_Local_GETX}, IM) { // We don't have the data yet, but we might have collected some tokens. We give them up here to avoid livelock
+ t_sendAckWithCollectedTokens;
+ m_popRequestQueue;
+ }
+
+ transition({IM, SM}, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
+ m_popRequestQueue;
+ }
+
+ transition({IM, SM}, Request_Timeout) {
+ j_unsetReissueTimer;
+ b_issueWriteRequest;
+ }
+
+ // Transitions from OM
+
+ transition(OM, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(OM, Ack_All_Tokens, MM_W) {
+ q_updateTokensFromResponse;
+ xx_external_store_hit;
+ o_scheduleUseTimeout;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(OM, Data_Shared) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(OM, Data_All_Tokens, MM_W) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ xx_external_store_hit;
+ o_scheduleUseTimeout;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(OM, Request_Timeout) {
+ j_unsetReissueTimer;
+ b_issueWriteRequest;
+ }
+
+ // Transitions from IS
+
+ transition(IS, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Data_Shared, S) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ x_external_load_hit;
+ s_deallocateTBE;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Data_Owner, O) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ x_external_load_hit;
+ s_deallocateTBE;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Data_All_Tokens, M_W) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ x_external_load_hit;
+ o_scheduleUseTimeout;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Request_Timeout) {
+ j_unsetReissueTimer;
+ a_issueReadRequest;
+ }
+
+ // Transitions from I_L
+
+ transition(I_L, Load, IS_L) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueReadRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I_L, Ifetch, IS_L) {
+ pp_allocateL1ICacheBlock;
+ i_allocateTBE;
+ a_issueReadRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I_L, Store, IM_L) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ b_issueWriteRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+
+ // Transitions from S_L
+
+ transition(S_L, Store, SM_L) {
+ i_allocateTBE;
+ b_issueWriteRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ // Other transitions from *_L states
+
+ transition({I_L, IM_L, IS_L, S_L, SM_L}, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS, Transient_GETX, Transient_Local_GETX}) {
+ m_popRequestQueue;
+ }
+
+ transition({I_L, IM_L, IS_L, S_L, SM_L}, Ack) {
+ g_bounceResponseToStarver;
+ n_popResponseQueue;
+ }
+
+ transition({I_L, IM_L, S_L, SM_L}, {Data_Shared, Data_Owner}) {
+ g_bounceResponseToStarver;
+ n_popResponseQueue;
+ }
+
+ transition({I_L, S_L}, Data_All_Tokens) {
+ g_bounceResponseToStarver;
+ n_popResponseQueue;
+ }
+
+ transition(IS_L, Request_Timeout) {
+ j_unsetReissueTimer;
+ a_issueReadRequest;
+ }
+
+ transition({IM_L, SM_L}, Request_Timeout) {
+ j_unsetReissueTimer;
+ b_issueWriteRequest;
+ }
+
+ // Opportunisticly Complete the memory operation in the following
+ // cases. Note: these transitions could just use
+ // g_bounceResponseToStarver, but if we have the data and tokens, we
+ // might as well complete the memory request while we have the
+ // chance (and then immediately forward on the data)
+
+ transition(IM_L, Data_All_Tokens, MM_W) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ xx_external_store_hit;
+ j_unsetReissueTimer;
+ o_scheduleUseTimeout;
+ n_popResponseQueue;
+ }
+
+ transition(SM_L, Data_All_Tokens, S_L) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ xx_external_store_hit;
+ ff_sendDataWithAllButNorOneTokens;
+ s_deallocateTBE;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(IS_L, Data_Shared, I_L) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ x_external_load_hit;
+ s_deallocateTBE;
+ e_sendAckWithCollectedTokens;
+ p_informL2AboutTokenLoss;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(IS_L, Data_Owner, I_L) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ x_external_load_hit;
+ ee_sendDataWithAllTokens;
+ s_deallocateTBE;
+ p_informL2AboutTokenLoss;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(IS_L, Data_All_Tokens, M_W) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ x_external_load_hit;
+ j_unsetReissueTimer;
+ o_scheduleUseTimeout;
+ n_popResponseQueue;
+ }
+
+
+ // Own_Lock_or_Unlock
+
+ transition(I_L, Own_Lock_or_Unlock, I) {
+ l_popPersistentQueue;
+ }
+
+ transition(S_L, Own_Lock_or_Unlock, S) {
+ l_popPersistentQueue;
+ }
+
+ transition(IM_L, Own_Lock_or_Unlock, IM) {
+ l_popPersistentQueue;
+ }
+
+ transition(IS_L, Own_Lock_or_Unlock, IS) {
+ l_popPersistentQueue;
+ }
+
+ transition(SM_L, Own_Lock_or_Unlock, SM) {
+ l_popPersistentQueue;
+ }
+}
+
diff --git a/src/mem/protocol/MOESI_CMP_token-L2cache.sm b/src/mem/protocol/MOESI_CMP_token-L2cache.sm
new file mode 100644
index 000000000..21fbf0b95
--- /dev/null
+++ b/src/mem/protocol/MOESI_CMP_token-L2cache.sm
@@ -0,0 +1,1424 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+machine(L2Cache, "Token protocol") {
+
+ // L2 BANK QUEUES
+ // From local bank of L2 cache TO the network
+ MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="0", ordered="false"; // this L2 bank -> a local L1
+ MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="1", ordered="false"; // this L2 bank -> mod-directory
+ MessageBuffer responseFromL2Cache, network="To", virtual_network="2", ordered="false"; // this L2 bank -> a local L1 || mod-directory
+
+
+ // FROM the network to this local bank of L2 cache
+ MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="false"; // a local L1 -> this L2 bank
+ MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="1", ordered="false"; // mod-directory -> this L2 bank
+ MessageBuffer responseToL2Cache, network="From", virtual_network="2", ordered="false"; // a local L1 || mod-directory -> this L2 bank
+ MessageBuffer persistentToL2Cache, network="From", virtual_network="3", ordered="true";
+
+ // STATES
+ enumeration(State, desc="L2 Cache states", default="L2Cache_State_I") {
+ // Base states
+ NP, desc="Not Present";
+ I, desc="Idle";
+ S, desc="Shared, not present in any local L1s";
+ O, desc="Owned, not present in any L1s";
+ M, desc="Modified, not present in any L1s";
+
+ // Locked states
+ I_L, "I^L", desc="Invalid, Locked";
+ S_L, "S^L", desc="Shared, Locked";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+
+ // Requests
+ L1_GETS, desc="local L1 GETS request";
+ L1_GETS_Last_Token, desc="local L1 GETS request";
+ L1_GETX, desc="local L1 GETX request";
+ L1_INV, desc="L1 no longer has tokens";
+ Transient_GETX, desc="A GetX from another processor";
+ Transient_GETS, desc="A GetS from another processor";
+ Transient_GETS_Last_Token, desc="A GetS from another processor";
+
+ // events initiated by this L2
+ L2_Replacement, desc="L2 Replacement", format="!r";
+
+ // events of external L2 responses
+
+ // Responses
+ Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
+ Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
+ Writeback_All_Tokens, desc="Received a writeback from L1";
+ Writeback_Owned, desc="Received a writeback from L1";
+
+
+ Data_Shared, desc="Received a data message, we are now a sharer";
+ Data_Owner, desc="Received a data message, we are now the owner";
+ Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
+ Ack, desc="Received an ack message";
+ Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
+
+ // Lock/Unlock
+ Persistent_GETX, desc="Another processor has priority to read/write";
+ Persistent_GETS, desc="Another processor has priority to read";
+ Own_Lock_or_Unlock, desc="This processor now has priority";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int Tokens, desc="The number of tokens we're holding for the line";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+
+
+ structure(DirEntry, desc="...") {
+ Set Sharers, desc="Set of the internal processors that want the block in shared state";
+ bool exclusive, default="false", desc="if local exclusive is likely";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ void setMRU(Address);
+ }
+
+ external_type(PerfectCacheMemory) {
+ void allocate(Address);
+ void deallocate(Address);
+ DirEntry lookup(Address);
+ bool isTagPresent(Address);
+ }
+
+
+ CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)+"_L2"';
+
+ PersistentTable persistentTable, constructor_hack="i";
+ PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";
+
+
+ bool getFilteringEnabled();
+
+ Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory[addr];
+ }
+ }
+
+ int getTokens(Address addr) {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory[addr].Tokens;
+ } else {
+ return 0;
+ }
+ }
+
+ void changePermission(Address addr, AccessPermission permission) {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory.changePermission(addr, permission);
+ }
+ }
+
+ bool isCacheTagPresent(Address addr) {
+ return (L2cacheMemory.isTagPresent(addr) );
+ }
+
+ State getState(Address addr) {
+ if (isCacheTagPresent(addr)) {
+ return getL2CacheEntry(addr).CacheState;
+ } else if (persistentTable.isLocked(addr) == true) {
+ return State:I_L;
+ } else {
+ return State:NP;
+ }
+ }
+
+ string getStateStr(Address addr) {
+ return L2Cache_State_to_string(getState(addr));
+ }
+
+ void setState(Address addr, State state) {
+
+
+ if (isCacheTagPresent(addr)) {
+ // Make sure the token count is in range
+ assert(getL2CacheEntry(addr).Tokens >= 0);
+ assert(getL2CacheEntry(addr).Tokens <= max_tokens());
+
+ // Make sure we have no tokens in L
+ if ((state == State:I_L) ) {
+ if (isCacheTagPresent(addr)) {
+ assert(getL2CacheEntry(addr).Tokens == 0);
+ }
+ }
+
+ // in M and E you have all the tokens
+ if (state == State:M ) {
+ assert(getL2CacheEntry(addr).Tokens == max_tokens());
+ }
+
+ // in NP you have no tokens
+ if (state == State:NP) {
+ assert(getL2CacheEntry(addr).Tokens == 0);
+ }
+
+ // You have at least one token in S-like states
+ if (state == State:S ) {
+ assert(getL2CacheEntry(addr).Tokens > 0);
+ }
+
+ // You have at least half the token in O-like states
+ if (state == State:O ) {
+ assert(getL2CacheEntry(addr).Tokens >= 1); // Must have at least one token
+ // assert(getL2CacheEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
+ }
+
+ getL2CacheEntry(addr).CacheState := state;
+
+ // Set permission
+ if (state == State:I) {
+ changePermission(addr, AccessPermission:Invalid);
+ } else if (state == State:S || state == State:O ) {
+ changePermission(addr, AccessPermission:Read_Only);
+ } else if (state == State:M ) {
+ changePermission(addr, AccessPermission:Read_Write);
+ } else {
+ changePermission(addr, AccessPermission:Invalid);
+ }
+ }
+ }
+
+ void removeSharer(Address addr, NodeID id) {
+
+ if (localDirectory.isTagPresent(addr)) {
+ localDirectory[addr].Sharers.remove(id);
+ if (localDirectory[addr].Sharers.count() == 0) {
+ localDirectory.deallocate(addr);
+ }
+ }
+ }
+
+ bool sharersExist(Address addr) {
+ if (localDirectory.isTagPresent(addr)) {
+ if (localDirectory[addr].Sharers.count() > 0) {
+ return true;
+ }
+ else {
+ return false;
+ }
+ }
+ else {
+ return false;
+ }
+ }
+
+ bool exclusiveExists(Address addr) {
+ if (localDirectory.isTagPresent(addr)) {
+ if (localDirectory[addr].exclusive == true) {
+ return true;
+ }
+ else {
+ return false;
+ }
+ }
+ else {
+ return false;
+ }
+ }
+
+ // assumes that caller will check to make sure tag is present
+ Set getSharers(Address addr) {
+ return localDirectory[addr].Sharers;
+ }
+
+ void setNewWriter(Address addr, NodeID id) {
+ if (localDirectory.isTagPresent(addr) == false) {
+ localDirectory.allocate(addr);
+ }
+ localDirectory[addr].Sharers.clear();
+ localDirectory[addr].Sharers.add(id);
+ localDirectory[addr].exclusive := true;
+ }
+
+ void addNewSharer(Address addr, NodeID id) {
+ if (localDirectory.isTagPresent(addr) == false) {
+ localDirectory.allocate(addr);
+ }
+ localDirectory[addr].Sharers.add(id);
+ // localDirectory[addr].exclusive := false;
+ }
+
+ void clearExclusiveBitIfExists(Address addr) {
+ if (localDirectory.isTagPresent(addr) == true) {
+ localDirectory[addr].exclusive := false;
+ }
+ }
+
+ // ** OUT_PORTS **
+ out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
+ out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
+ out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
+
+
+
+ // ** IN_PORTS **
+
+ // Persistent Network
+ in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
+ if (persistentNetwork_in.isReady()) {
+ peek(persistentNetwork_in, PersistentMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+
+ if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
+ persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
+ } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
+ persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
+ } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
+ persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
+ } else {
+ error("Unexpected message");
+ }
+
+ // React to the message based on the current state of the table
+ if (persistentTable.isLocked(in_msg.Address)) {
+
+ if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
+ trigger(Event:Persistent_GETS, in_msg.Address);
+ } else {
+ trigger(Event:Persistent_GETX, in_msg.Address);
+ }
+ }
+ else {
+ trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
+ }
+ }
+ }
+ }
+
+
+ // Request Network
+ in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
+ if (requestNetwork_in.isReady()) {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:Transient_GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
+ trigger(Event:Transient_GETS_Last_Token, in_msg.Address);
+ }
+ else {
+ trigger(Event:Transient_GETS, in_msg.Address);
+ }
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
+ if (L1requestNetwork_in.isReady()) {
+ peek(L1requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:L1_GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
+ trigger(Event:L1_GETS_Last_Token, in_msg.Address);
+ }
+ else {
+ trigger(Event:L1_GETS, in_msg.Address);
+ }
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+
+ // Response Network
+ in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
+ if (responseNetwork_in.isReady()) {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Ack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
+ trigger(Event:Data_Owner, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ trigger(Event:Data_Shared, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
+
+ if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
+
+ // either room is available or the block is already present
+
+ if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
+ assert(in_msg.Dirty == false);
+ trigger(Event:Writeback_Tokens, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
+ assert(in_msg.Dirty == false);
+ trigger(Event:Writeback_Shared_Data, in_msg.Address);
+ }
+ else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
+ //assert(in_msg.Dirty == false);
+ trigger(Event:Writeback_Owned, in_msg.Address);
+ }
+ }
+ else {
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ } else if (in_msg.Type == CoherenceResponseType:INV) {
+ trigger(Event:L1_INV, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ } else {
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Ack_All_Tokens, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ trigger(Event:Data_All_Tokens, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
+ if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
+
+ // either room is available or the block is already present
+
+ if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
+ assert(in_msg.Dirty == false);
+ assert( (getState(in_msg.Address) != State:NP) && (getState(in_msg.Address) != State:I) );
+ trigger(Event:Writeback_All_Tokens, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
+ assert(in_msg.Dirty == false);
+ trigger(Event:Writeback_All_Tokens, in_msg.Address);
+ }
+ else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
+ trigger(Event:Writeback_All_Tokens, in_msg.Address);
+ }
+ }
+ else {
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ } else if (in_msg.Type == CoherenceResponseType:INV) {
+ trigger(Event:L1_INV, in_msg.Address);
+ } else {
+ DEBUG_EXPR(in_msg.Type);
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+ }
+
+
+ // ACTIONS
+
+ action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
+
+ peek(L1requestNetwork_in, RequestMsg) {
+
+ // if this is a retry or no local sharers, broadcast normally
+
+ // if (in_msg.RetryNum > 0 || (in_msg.Type == CoherenceRequestType:GETX && exclusiveExists(in_msg.Address) == false) || (in_msg.Type == CoherenceRequestType:GETS && sharersExist(in_msg.Address) == false)) {
+ enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := in_msg.Address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.RequestorMachine := in_msg.RequestorMachine;
+ //out_msg.Destination.broadcast(MachineType:L2Cache);
+ out_msg.RetryNum := in_msg.RetryNum;
+ out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+ out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.AccessMode := in_msg.AccessMode;
+ out_msg.Prefetch := in_msg.Prefetch;
+ } //enqueue
+ // } // if
+
+ //profile_filter_action(0);
+ } // peek
+ } //action
+
+
+ action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ // FIXME, should use a 3rd vnet
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ }
+ }
+ }
+
+ action(c_cleanReplacement, "c", desc="Issue clean writeback") {
+ if (getL2CacheEntry(address).Tokens > 0) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ getL2CacheEntry(address).Tokens := 0;
+ }
+ }
+
+ action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+
+ if (getL2CacheEntry(address).Dirty) {
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ out_msg.Type := CoherenceResponseType:ACK_OWNER;
+ }
+ }
+ getL2CacheEntry(address).Tokens := 0;
+ }
+
+ action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ if (getL2CacheEntry(address).Tokens > N_tokens()) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Tokens := N_tokens();
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - N_tokens();
+ }
+ else {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Tokens := 1;
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
+ }
+ }
+ }
+
+ action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ assert(getL2CacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ getL2CacheEntry(address).Tokens := 0;
+ }
+
+ action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
+ if (getL2CacheEntry(address).Tokens > 0) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(getL2CacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ getL2CacheEntry(address).Tokens := 0;
+ }
+
+ action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(getL2CacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ getL2CacheEntry(address).Tokens := 0;
+ }
+
+ action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
+ //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ assert(getL2CacheEntry(address).Tokens > 0);
+ if (getL2CacheEntry(address).Tokens > 1) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(getL2CacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ getL2CacheEntry(address).Tokens := 1;
+ }
+
+ action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
+ //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ assert(getL2CacheEntry(address).Tokens > 0);
+ if (getL2CacheEntry(address).Tokens > 1) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(getL2CacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ getL2CacheEntry(address).Tokens := 1;
+ }
+ }
+
+
+
+ action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
+ // assert(persistentTable.isLocked(address));
+ peek(responseNetwork_in, ResponseMsg) {
+ // FIXME, should use a 3rd vnet in some cases
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.MessageSize := in_msg.MessageSize;
+ }
+ }
+ }
+
+ action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
+ //assert(persistentTable.isLocked(address));
+ peek(responseNetwork_in, ResponseMsg) {
+ // FIXME, should use a 3rd vnet in some cases
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ } else {
+ out_msg.Type := CoherenceResponseType:ACK;
+ }
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.MessageSize := in_msg.MessageSize;
+ }
+ }
+ }
+
+ action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
+ // assert(persistentTable.isLocked(address));
+ peek(responseNetwork_in, ResponseMsg) {
+ // FIXME, should use a 3rd vnet in some cases
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.MessageSize := in_msg.MessageSize;
+ }
+ }
+ }
+
+
+ action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
+ peek(responseNetwork_in, ResponseMsg) {
+ removeSharer(in_msg.Address, machineIDToNodeID(in_msg.Sender));
+ }
+ }
+
+ action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
+ peek(requestNetwork_in, RequestMsg) {
+ if (getFilteringEnabled() == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Address) == false) {
+ profile_filter_action(1);
+ DEBUG_EXPR("filtered message");
+ DEBUG_EXPR(in_msg.RetryNum);
+ }
+ else {
+ enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ out_msg.Address := in_msg.Address;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.RequestorMachine := in_msg.RequestorMachine;
+ out_msg.Destination := getLocalL1IDs(machineID);
+ out_msg.Type := in_msg.Type;
+ out_msg.isLocal := false;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.AccessMode := in_msg.AccessMode;
+ out_msg.Prefetch := in_msg.Prefetch;
+ }
+ profile_filter_action(0);
+ }
+ }
+ }
+
+
+ action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ assert(getL2CacheEntry(address).Tokens > 0);
+ //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
+ out_msg.Tokens := 1;
+ }
+ getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
+ }
+ }
+
+ action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ assert(getL2CacheEntry(address).Tokens > 0);
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
+ out_msg.Tokens := 1;
+ }
+ getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
+ }
+ }
+
+ action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
+ peek(L1requestNetwork_in, RequestMsg) {
+// assert(getL2CacheEntry(address).Tokens == max_tokens());
+ //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
+ //out_msg.Tokens := max_tokens();
+ out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ }
+ getL2CacheEntry(address).Tokens := 0;
+ }
+ }
+
+ action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
+ persistentNetwork_in.dequeue();
+ }
+
+ action(m_popRequestQueue, "m", desc="Pop request queue.") {
+ requestNetwork_in.dequeue();
+ }
+
+ action(n_popResponseQueue, "n", desc="Pop response queue") {
+ responseNetwork_in.dequeue();
+ }
+
+ action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
+ L1requestNetwork_in.dequeue();
+ }
+
+
+ action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Tokens != 0);
+ getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens + in_msg.Tokens;
+
+ // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
+ // may not trigger this action.
+ if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
+ getL2CacheEntry(address).Dirty := true;
+ }
+ }
+ }
+
+ action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
+
+ peek(L1requestNetwork_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ setNewWriter(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ addNewSharer(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
+ }
+ }
+ }
+
+ action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
+ clearExclusiveBitIfExists(address);
+ }
+
+ action( r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
+ if(isCacheTagPresent(address)) {
+ L2cacheMemory.setMRU(address);
+ }
+ }
+
+ action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
+ if (getL2CacheEntry(address).Tokens > 0) {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ assert(getL2CacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+ getL2CacheEntry(address).Tokens := 0;
+ }
+
+ action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
+ if (getL2CacheEntry(address).Tokens > 0) {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ assert(getL2CacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+ getL2CacheEntry(address).Tokens := 0;
+ }
+
+ action(u_writeDataToCache, "u", desc="Write data to cache") {
+ peek(responseNetwork_in, ResponseMsg) {
+ getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
+ if ((getL2CacheEntry(address).Dirty == false) && in_msg.Dirty) {
+ getL2CacheEntry(address).Dirty := in_msg.Dirty;
+ }
+ }
+ }
+
+ action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
+ L2cacheMemory.allocate(address);
+ }
+
+ action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ L2cacheMemory.deallocate(address);
+ }
+
+ action(uu_profileMiss, "\u", desc="Profile the demand miss") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ // AccessModeType not implemented
+ profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, machineIDToNodeID(in_msg.Requestor));
+ }
+ }
+
+
+ action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(getL2CacheEntry(address).DataBlk == in_msg.DataBlk);
+ }
+ }
+
+ action(z_stall, "z", desc="Stall") {
+ }
+
+
+
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
+
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
+ l_popPersistentQueue;
+ }
+
+
+ // Transitions from NP
+
+ transition(NP, {Transient_GETX, Transient_GETS}) {
+ // forward message to local sharers
+ r_clearExclusive;
+ j_forwardTransientRequestToLocalSharers;
+ m_popRequestQueue;
+ }
+
+
+ transition(NP, {L1_GETS, L1_GETX}) {
+ a_broadcastLocalRequest;
+ r_markNewSharer;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
+ bb_bounceResponse;
+ n_popResponseQueue;
+ }
+
+ transition(NP, Writeback_Shared_Data, S) {
+ vv_allocateL2CacheBlock;
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(NP, Writeback_Tokens, I) {
+ vv_allocateL2CacheBlock;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(NP, Writeback_All_Tokens, M) {
+ vv_allocateL2CacheBlock;
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(NP, Writeback_Owned, O) {
+ vv_allocateL2CacheBlock;
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+
+ transition(NP, {Persistent_GETX, Persistent_GETS}, I_L) {
+ l_popPersistentQueue;
+ }
+
+ // Transitions from Idle
+
+ transition(I, {L1_GETS, L1_GETS_Last_Token}) {
+ a_broadcastLocalRequest;
+ tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
+ r_markNewSharer;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(I, L1_GETX) {
+ a_broadcastLocalRequest;
+ tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
+ r_markNewSharer;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(I, L2_Replacement) {
+ c_cleanReplacement; // Only needed in some cases
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
+ r_clearExclusive;
+ t_sendAckWithCollectedTokens;
+ j_forwardTransientRequestToLocalSharers;
+ m_popRequestQueue;
+ }
+
+ transition(I, {Persistent_GETX, Persistent_GETS}, I_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+
+ transition(I, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(I, Data_Shared, S) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(I, Writeback_Shared_Data, S) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(I, Writeback_Tokens) {
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(I, Data_Owner, O) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(I, Writeback_Owned, O) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(I, Data_All_Tokens, M) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+
+ transition(I, Writeback_All_Tokens, M) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ // Transitions from Shared
+
+ transition(S, L2_Replacement, I) {
+ c_cleanReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(S, Transient_GETX, I) {
+ r_clearExclusive;
+ t_sendAckWithCollectedTokens;
+ j_forwardTransientRequestToLocalSharers;
+ m_popRequestQueue;
+ }
+
+ transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
+ j_forwardTransientRequestToLocalSharers;
+ r_clearExclusive;
+ m_popRequestQueue;
+ }
+
+ transition(S, Persistent_GETX, I_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+
+ transition(S, Persistent_GETS, S_L) {
+ f_sendAckWithAllButOneTokens;
+ l_popPersistentQueue;
+ }
+
+
+ transition(S, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Data_Shared) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Writeback_Tokens) {
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(S, Writeback_Shared_Data) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+
+ transition(S, Data_Owner, O) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Writeback_Owned, O) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(S, Data_All_Tokens, M) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Writeback_All_Tokens, M) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(S, L1_GETX, I) {
+ a_broadcastLocalRequest;
+ tt_sendLocalAckWithCollectedTokens;
+ r_markNewSharer;
+ r_setMRU;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+
+ transition(S, L1_GETS) {
+ k_dataFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ o_popL1RequestQueue;
+ }
+
+ transition(S, L1_GETS_Last_Token, I) {
+
+ k_dataFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ o_popL1RequestQueue;
+ }
+
+ // Transitions from Owned
+
+ transition(O, L2_Replacement, I) {
+ cc_dirtyReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(O, Transient_GETX, I) {
+ r_clearExclusive;
+ dd_sendDataWithAllTokens;
+ j_forwardTransientRequestToLocalSharers;
+ m_popRequestQueue;
+ }
+
+ transition(O, Persistent_GETX, I_L) {
+ ee_sendDataWithAllTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(O, Persistent_GETS, S_L) {
+ ff_sendDataWithAllButOneTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(O, Transient_GETS) {
+ // send multiple tokens
+ r_clearExclusive;
+ d_sendDataWithTokens;
+ m_popRequestQueue;
+ }
+
+ transition(O, Transient_GETS_Last_Token) {
+ // WAIT FOR IT TO GO PERSISTENT
+ r_clearExclusive;
+ m_popRequestQueue;
+ }
+
+ transition(O, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(O, Ack_All_Tokens, M) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(O, Data_Shared) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+
+ transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(O, Data_All_Tokens, M) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(O, Writeback_All_Tokens, M) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(O, L1_GETS) {
+ k_dataFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ o_popL1RequestQueue;
+ }
+
+ transition(O, L1_GETS_Last_Token, I) {
+ k_dataOwnerFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ o_popL1RequestQueue;
+ }
+
+ transition(O, L1_GETX, I) {
+ a_broadcastLocalRequest;
+ k_dataAndAllTokensFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ // Transitions from M
+
+ transition(M, L2_Replacement, I) {
+ cc_dirtyReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ // MRM_DEBUG: Give up all tokens even for GETS? ???
+ transition(M, {Transient_GETX, Transient_GETS}, I) {
+ r_clearExclusive;
+ dd_sendDataWithAllTokens;
+ m_popRequestQueue;
+ }
+
+ transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
+ ee_sendDataWithAllTokens;
+ l_popPersistentQueue;
+ }
+
+
+ transition(M, L1_GETS, O) {
+ k_dataFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ o_popL1RequestQueue;
+ }
+
+ transition(M, L1_GETX, I) {
+ k_dataAndAllTokensFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ o_popL1RequestQueue;
+ }
+
+
+ //Transitions from locked states
+
+ transition({I_L, S_L}, Ack) {
+ gg_bounceResponseToStarver;
+ n_popResponseQueue;
+ }
+
+ transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
+ gg_bounceResponseToStarver;
+ n_popResponseQueue;
+ }
+
+ transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
+ gg_bounceWBSharedToStarver;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
+ gg_bounceWBOwnedToStarver;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(S_L, L2_Replacement, I) {
+ c_cleanReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(I_L, L2_Replacement, I) {
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(I_L, Own_Lock_or_Unlock, I) {
+ l_popPersistentQueue;
+ }
+
+ transition(S_L, Own_Lock_or_Unlock, S) {
+ l_popPersistentQueue;
+ }
+
+ transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
+ r_clearExclusive;
+ m_popRequestQueue;
+ }
+
+ transition(I_L, {L1_GETX, L1_GETS}) {
+ a_broadcastLocalRequest;
+ r_markNewSharer;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(S_L, L1_GETX, I_L) {
+ a_broadcastLocalRequest;
+ tt_sendLocalAckWithCollectedTokens;
+ r_markNewSharer;
+ r_setMRU;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(S_L, L1_GETS) {
+ k_dataFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ o_popL1RequestQueue;
+ }
+
+ transition(S_L, L1_GETS_Last_Token, I_L) {
+ k_dataFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ o_popL1RequestQueue;
+ }
+
+ transition(S_L, Persistent_GETX, I_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(S_L, Persistent_GETS) {
+ l_popPersistentQueue;
+ }
+
+ transition(I_L, {Persistent_GETX, Persistent_GETS}) {
+ l_popPersistentQueue;
+ }
+}
diff --git a/src/mem/protocol/MOESI_CMP_token-dir.sm b/src/mem/protocol/MOESI_CMP_token-dir.sm
new file mode 100644
index 000000000..1592fd123
--- /dev/null
+++ b/src/mem/protocol/MOESI_CMP_token-dir.sm
@@ -0,0 +1,435 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+
+machine(Directory, "Token protocol") {
+
+ MessageBuffer requestFromDir, network="To", virtual_network="1", ordered="false";
+ MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false";
+
+ MessageBuffer persistentToDir, network="From", virtual_network="3", ordered="true";
+ MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false";
+ MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false";
+
+ // STATES
+ enumeration(State, desc="Directory states", default="Directory_State_O") {
+ // Base states
+ O, desc="Owner";
+ NO, desc="Not Owner";
+ L, desc="Locked";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ GETX, desc="A GETX arrives";
+ GETS, desc="A GETS arrives";
+ Lockdown, desc="A lockdown request arrives";
+ Unlockdown, desc="An un-lockdown request arrives";
+ Data_Owner, desc="Data arrive";
+ Ack_Owner, desc="Owner token arrived without data because it was clean";
+ Tokens, desc="Tokens arrive";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ State DirectoryState, desc="Directory state";
+ DataBlock DataBlk, desc="data for the block";
+ int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
+
+ // The following state is provided to allow for bandwidth
+ // efficient directory-like operation. However all of this state
+ // is 'soft state' that does not need to be correct (as long as
+ // you're eventually willing to resort to broadcast.)
+
+ Set Owner, desc="Probable Owner of the line. More accurately, the set of processors who need to see a GetS or GetO. We use a Set for convenience, but only one bit is set at a time.";
+ Set Sharers, desc="Probable sharers of the line. More accurately, the set of processors who need to see a GetX";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+
+ PersistentTable persistentTable, constructor_hack="i";
+
+ State getState(Address addr) {
+ return directory[addr].DirectoryState;
+ }
+
+ void setState(Address addr, State state) {
+ directory[addr].DirectoryState := state;
+
+ if (state == State:L) {
+ assert(directory[addr].Tokens == 0);
+ }
+
+ // We have one or zero owners
+ assert((directory[addr].Owner.count() == 0) || (directory[addr].Owner.count() == 1));
+
+ // Make sure the token count is in range
+ assert(directory[addr].Tokens >= 0);
+ assert(directory[addr].Tokens <= max_tokens());
+
+ if (state == State:O) {
+ assert(directory[addr].Tokens >= 1); // Must have at least one token
+ // assert(directory[addr].Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
+ }
+ }
+
+ // ** OUT_PORTS **
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+ out_port(requestNetwork_out, RequestMsg, requestFromDir);
+
+ // ** IN_PORTS **
+
+ in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
+ if (persistentNetwork_in.isReady()) {
+ peek(persistentNetwork_in, PersistentMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+
+ if (distributedPersistentEnabled()) {
+ // Apply the lockdown or unlockdown message to the table
+ if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
+ persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
+ } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
+ persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
+ } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
+ persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
+ } else {
+ error("Invalid message");
+ }
+
+ // React to the message based on the current state of the table
+ if (persistentTable.isLocked(in_msg.Address)) {
+ trigger(Event:Lockdown, in_msg.Address); // locked
+ } else {
+ trigger(Event:Unlockdown, in_msg.Address); // unlocked
+ }
+ }
+ else {
+ if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
+ trigger(Event:Lockdown, in_msg.Address); // locked
+ } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
+ trigger(Event:Lockdown, in_msg.Address); // locked
+ } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
+ trigger(Event:Unlockdown, in_msg.Address); // unlocked
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+ }
+
+ in_port(requestNetwork_in, RequestMsg, requestToDir) {
+ if (requestNetwork_in.isReady()) {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:GETX, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(responseNetwork_in, ResponseMsg, responseToDir) {
+ if (responseNetwork_in.isReady()) {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
+ trigger(Event:Data_Owner, in_msg.Address);
+ } else if ((in_msg.Type == CoherenceResponseType:ACK) ||
+ (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
+ trigger(Event:Tokens, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
+ trigger(Event:Ack_Owner, in_msg.Address);
+ } else {
+ DEBUG_EXPR(in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Actions
+
+ action(a_sendTokens, "a", desc="Send tokens to requestor") {
+ // Only send a message if we have tokens to send
+ if (directory[address].Tokens > 0) {
+ peek(requestNetwork_in, RequestMsg) {
+ // enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_CACHE_LATENCY") {// FIXME?
+ enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_LATENCY") {// FIXME?
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Tokens := directory[in_msg.Address].Tokens;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ directory[address].Tokens := 0;
+ }
+ }
+
+ action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
+ // Only send a message if we have tokens to send
+ if (directory[address].Tokens > 0) {
+ // enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_CACHE_LATENCY") {// FIXME?
+ enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_LATENCY") {// FIXME?
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := directory[address].Tokens;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ directory[address].Tokens := 0;
+ }
+ }
+
+ action(d_sendDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(in_msg.Requestor);
+ assert(directory[address].Tokens > 0);
+ out_msg.Tokens := directory[in_msg.Address].Tokens;
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ directory[address].Tokens := 0;
+ }
+
+ action(dd_sendDataWithAllTokensToStarver, "\d", desc="Send data and tokens to starver") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(directory[address].Tokens > 0);
+ out_msg.Tokens := directory[address].Tokens;
+ out_msg.DataBlk := directory[address].DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ directory[address].Tokens := 0;
+ }
+
+ action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Tokens >= 1);
+ directory[address].Tokens := directory[address].Tokens + in_msg.Tokens;
+ }
+ }
+
+ action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
+ requestNetwork_in.dequeue();
+ }
+
+ action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
+ responseNetwork_in.dequeue();
+ }
+
+ action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
+ persistentNetwork_in.dequeue();
+ }
+
+ action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ action(n_checkIncomingMsg, "n", desc="Check incoming token message") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
+ assert(in_msg.Dirty == false);
+ assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
+ assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
+ }
+ }
+
+ action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
+ peek(responseNetwork_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ }
+ }
+ }
+
+ action(s_bounceDatalessOwnerToken, "s", desc="Bounce clean owner token to starving processor") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
+ assert(in_msg.Dirty == false);
+ assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
+
+ // NOTE: The following check would not be valid in a real
+ // implementation. We include the data in the "dataless"
+ // message so we can assert the clean data matches the datablock
+ // in memory
+ assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
+
+ // Bounce the message, but "re-associate" the data and the owner
+ // token. In essence we're converting an ACK_OWNER message to a
+ // DATA_OWNER message, keeping the number of tokens the same.
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+
+ // TRANSITIONS
+
+ // Trans. from O
+ transition(O, GETX, NO) {
+ d_sendDataWithAllTokens;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(O, GETS, NO) {
+ d_sendDataWithAllTokens;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(O, Lockdown, L) {
+ dd_sendDataWithAllTokensToStarver;
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(O, Tokens) {
+ f_incrementTokens;
+ k_popIncomingResponseQueue;
+ }
+
+ // Trans. from NO
+ transition(NO, GETX) {
+ a_sendTokens;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(NO, GETS) {
+ j_popIncomingRequestQueue;
+ }
+
+ transition(NO, Lockdown, L) {
+ aa_sendTokensToStarver;
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(NO, Data_Owner, O) {
+ m_writeDataToMemory;
+ f_incrementTokens;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(NO, Ack_Owner, O) {
+ n_checkIncomingMsg;
+ f_incrementTokens;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(NO, Tokens) {
+ f_incrementTokens;
+ k_popIncomingResponseQueue;
+ }
+
+ // Trans. from L
+ transition(L, {GETX, GETS}) {
+ j_popIncomingRequestQueue;
+ }
+
+ transition(L, Lockdown) {
+ l_popIncomingPersistentQueue;
+ }
+
+ // we could change this to write the data to memory and send it cleanly
+ transition(L, Data_Owner) {
+ r_bounceResponse;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(L, Tokens) {
+ r_bounceResponse;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(L, Ack_Owner) {
+ s_bounceDatalessOwnerToken;
+ k_popIncomingResponseQueue;
+ }
+
+
+ transition(L, Unlockdown, NO) {
+ l_popIncomingPersistentQueue;
+ }
+
+}
diff --git a/src/mem/protocol/MOESI_CMP_token-msg.sm b/src/mem/protocol/MOESI_CMP_token-msg.sm
new file mode 100644
index 000000000..2a75ce644
--- /dev/null
+++ b/src/mem/protocol/MOESI_CMP_token-msg.sm
@@ -0,0 +1,123 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ GETX, desc="Get eXclusive";
+ GETS, desc="Get Shared";
+}
+
+// PersistentType
+enumeration(PersistentRequestType, desc="...") {
+ GETX_PERSISTENT, desc="...";
+ GETS_PERSISTENT, desc="...";
+ DEACTIVATE_PERSISTENT,desc="...";
+}
+
+// CoherenceResponseType
+enumeration(CoherenceResponseType, desc="...") {
+ DATA_OWNER, desc="Data";
+ ACK_OWNER, desc="data-less owner token";
+ DATA_SHARED, desc="Data";
+ ACK, desc="ACKnowledgment";
+ WB_TOKENS, desc="L1 to L2 writeback";
+ WB_SHARED_DATA, desc="L1 to L2 writeback with data";
+ WB_OWNED, desc="L1 to L2 writeback with data";
+ INV, desc="L1 informing L2 of loss of all tokens";
+}
+
+// TriggerType
+enumeration(TriggerType, desc="...") {
+ REQUEST_TIMEOUT, desc="See corresponding event";
+ USE_TIMEOUT, desc="See corresponding event";
+}
+
+// TriggerMsg
+structure(TriggerMsg, desc="...", interface="Message") {
+ Address Address, desc="Physical address for this request";
+ TriggerType Type, desc="Type of trigger";
+}
+
+// PersistentMsg
+structure(PersistentMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ PersistentRequestType Type, desc="Type of starvation request";
+ MachineID Requestor, desc="Node who initiated the request";
+ MachineType RequestorMachine, desc="Type of machine who requested";
+ NetDest Destination, desc="Destination set";
+ MachineType DestMachine, desc="type of destination component";
+ MessageSizeType MessageSize, desc="size category of the message";
+ AccessModeType AccessMode, desc="user/supervisor access type";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+}
+
+// RequestMsg
+structure(RequestMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ MachineID Requestor, desc="Node who initiated the request";
+ MachineType RequestorMachine, desc="Type of machine who requested";
+ MachineType DestMachine, desc="Type of destination machine";
+ NetDest Destination, desc="Multicast destination mask";
+ bool isLocal, desc="Is this request from a local L1";
+ int RetryNum, desc="retry sequence number";
+ MessageSizeType MessageSize, desc="size category of the message";
+ AccessModeType AccessMode, desc="user/supervisor access type";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+}
+
+// ResponseMsg
+structure(ResponseMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
+ MachineID Sender, desc="Node who sent the data";
+ MachineType SenderMachine, desc="component that is sending";
+ NetDest Destination, desc="Node to whom the data is sent";
+ MachineType DestMachine, desc="What component receives the data";
+ int Tokens, desc="Number of tokens being transfered for this line";
+ DataBlock DataBlk, desc="data for the cache line";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+GenericRequestType convertToGenericType(CoherenceRequestType type) {
+ if(type == CoherenceRequestType:GETS) {
+ return GenericRequestType:GETS;
+ } else if(type == CoherenceRequestType:GETX) {
+ return GenericRequestType:GETX;
+ } else {
+ DEBUG_EXPR(type);
+ error("invalid CoherenceRequestType");
+ }
+}
diff --git a/src/mem/protocol/MOESI_CMP_token.slicc b/src/mem/protocol/MOESI_CMP_token.slicc
new file mode 100644
index 000000000..ae4a6d6ec
--- /dev/null
+++ b/src/mem/protocol/MOESI_CMP_token.slicc
@@ -0,0 +1,5 @@
+MOESI_CMP_token-msg.sm
+MOESI_CMP_token-L1cache.sm
+MOESI_CMP_token-L2cache.sm
+MOESI_CMP_token-dir.sm
+standard_CMP-protocol.sm
diff --git a/src/mem/protocol/MOESI_SMP_directory-cache.sm b/src/mem/protocol/MOESI_SMP_directory-cache.sm
new file mode 100644
index 000000000..77edeb90c
--- /dev/null
+++ b/src/mem/protocol/MOESI_SMP_directory-cache.sm
@@ -0,0 +1,981 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+machine(L1Cache, "Directory protocol") {
+
+ MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="false";
+ MessageBuffer responseFromCache, network="To", virtual_network="2", ordered="false";
+ MessageBuffer unblockFromCache, network="To", virtual_network="3", ordered="false";
+
+ MessageBuffer forwardToCache, network="From", virtual_network="1", ordered="false";
+ MessageBuffer responseToCache, network="From", virtual_network="2", ordered="false";
+
+ // STATES
+ enumeration(State, desc="Cache states", default="L1Cache_State_I") {
+ // Base states
+ NP, desc="Not Present";
+ I, desc="Idle";
+ S, desc="Shared";
+ O, desc="Owned";
+ E, desc="Exclusive (clean)";
+ M, desc="Modified (dirty)";
+ MM, desc="Modified (dirty and locally modified)";
+
+ // Transient States
+ IM, "IM", desc="Issued GetX";
+ SM, "SM", desc="Issued GetX, we still have an old copy of the line";
+ OM, "OM", desc="Issued GetX, received data";
+ IS, "IS", desc="Issued GetS";
+ OI, "OI", desc="Issued PutO, waiting for ack";
+ MI, "MI", desc="Issued PutX, waiting for ack";
+ II, "II", desc="Issued PutX/O, saw Fwd_GETS or Fwd_GETX, waiting for ack";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ Load, desc="Load request from the processor";
+ Ifetch, desc="I-fetch request from the processor";
+ Store, desc="Store request from the processor";
+ L2_Replacement, desc="Replacement";
+ L1_to_L2, desc="L1 to L2 transfer";
+ L2_to_L1D, desc="L2 to L1-Data transfer";
+ L2_to_L1I, desc="L2 to L1-Instruction transfer";
+
+ // Requests
+ Own_GETX, desc="We observe our own GetX forwarded back to us";
+ Fwd_GETX, desc="A GetX from another processor";
+ Fwd_GETS, desc="A GetS from another processor";
+ Inv, desc="Invalidations from the directory";
+
+ // Responses
+ Ack, desc="Received an ack message";
+ Data, desc="Received a data message, responder has a shared copy";
+ Exclusive_Data_Clean, desc="Received a data message, no other processor has it, data is clean";
+ Exclusive_Data_Dirty, desc="Received a data message, no other processor has it, data is dirty";
+
+ Writeback_Ack, desc="Writeback O.K. from directory";
+ Writeback_Nack, desc="Writeback not O.K. from directory";
+
+ // Triggers
+ All_acks, desc="Received all required data and message acks";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int NumPendingMsgs, default="0", desc="Number of acks/data messages that this processor is waiting for";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ MessageBuffer mandatoryQueue, abstract_chip_ptr="true", ordered="false";
+ Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+ StoreBuffer storeBuffer, abstract_chip_ptr="true", constructor_hack="i";
+
+ TBETable TBEs, template_hack="<L1Cache_TBE>";
+ CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
+ CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
+ CacheMemory L2cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L2"', abstract_chip_ptr="true";
+
+ Entry getCacheEntry(Address addr), return_by_ref="yes" {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory[addr];
+ } else if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory[addr];
+ } else {
+ return L1IcacheMemory[addr];
+ }
+ }
+
+ void changePermission(Address addr, AccessPermission permission) {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory.changePermission(addr, permission);
+ } else if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory.changePermission(addr, permission);
+ } else {
+ return L1IcacheMemory.changePermission(addr, permission);
+ }
+ }
+
+ bool isCacheTagPresent(Address addr) {
+ return (L2cacheMemory.isTagPresent(addr) || L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
+ }
+
+ State getState(Address addr) {
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+ assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+ assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+
+ if(TBEs.isPresent(addr)) {
+ return TBEs[addr].TBEState;
+ } else if (isCacheTagPresent(addr)) {
+ return getCacheEntry(addr).CacheState;
+ }
+ return State:NP;
+ }
+
+ void setState(Address addr, State state) {
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+ assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+ assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+
+ if (TBEs.isPresent(addr)) {
+ TBEs[addr].TBEState := state;
+ }
+
+ if (isCacheTagPresent(addr)) {
+ getCacheEntry(addr).CacheState := state;
+
+ if (state == State:E) {
+ assert(getCacheEntry(addr).Dirty == false);
+ }
+
+ if ((state == State:M) || (state == State:MM)) {
+ assert(getCacheEntry(addr).Dirty == true);
+ }
+
+ // Set permission
+ if (state == State:MM) {
+ changePermission(addr, AccessPermission:Read_Write);
+ } else if ((state == State:S) ||
+ (state == State:O) ||
+ (state == State:M) ||
+ (state == State:E) ||
+ (state == State:SM) ||
+ (state == State:OM)) {
+ changePermission(addr, AccessPermission:Read_Only);
+ } else {
+ changePermission(addr, AccessPermission:Invalid);
+ }
+ }
+ }
+
+ Event mandatory_request_type_to_event(CacheRequestType type) {
+ if (type == CacheRequestType:LD) {
+ return Event:Load;
+ } else if (type == CacheRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
+ return Event:Store;
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+
+ MessageBuffer triggerQueue, ordered="true";
+
+ // ** OUT_PORTS **
+
+ out_port(requestNetwork_out, RequestMsg, requestFromCache);
+ out_port(responseNetwork_out, ResponseMsg, responseFromCache);
+ out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+
+ // ** IN_PORTS **
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
+ if (triggerQueue_in.isReady()) {
+ peek(triggerQueue_in, TriggerMsg) {
+ if (in_msg.Type == TriggerType:ALL_ACKS) {
+ trigger(Event:All_acks, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Nothing from the request network
+
+ // Forward Network
+ in_port(forwardToCache_in, RequestMsg, forwardToCache) {
+ if (forwardToCache_in.isReady()) {
+ peek(forwardToCache_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ if (in_msg.Requestor == machineID) {
+ trigger(Event:Own_GETX, in_msg.Address);
+ } else {
+ trigger(Event:Fwd_GETX, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:Fwd_GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:INV) {
+ trigger(Event:Inv, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
+ trigger(Event:Writeback_Ack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
+ trigger(Event:Writeback_Nack, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Response Network
+ in_port(responseToCache_in, ResponseMsg, responseToCache) {
+ if (responseToCache_in.isReady()) {
+ peek(responseToCache_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Ack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA) {
+ trigger(Event:Data, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE_CLEAN) {
+ trigger(Event:Exclusive_Data_Clean, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE_DIRTY) {
+ trigger(Event:Exclusive_Data_Dirty, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Nothing from the unblock network
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady()) {
+ peek(mandatoryQueue_in, CacheMsg) {
+
+ // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
+
+ if (in_msg.Type == CacheRequestType:IFETCH) {
+ // ** INSTRUCTION ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, try to write it to the L2
+ if (L2cacheMemory.cacheAvail(in_msg.Address)) {
+ trigger(Event:L1_to_L2, in_msg.Address);
+ } else {
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1
+ if (L2cacheMemory.isTagPresent(in_msg.Address)) {
+ // L2 has it (maybe not with the right permissions)
+ trigger(Event:L2_to_L1I, in_msg.Address);
+ } else {
+ // We have room, the L2 doesn't have it, so the L1 fetches the line
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ }
+ } else {
+ // No room in the L1, so we need to make room
+ if (L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.Address))) {
+ // The L2 has room, so we move the line from the L1 to the L2
+ trigger(Event:L1_to_L2, L1IcacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ // The L2 does not have room, so we replace a line from the L2
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.Address)));
+ }
+ }
+ }
+ } else {
+ // *** DATA ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, try to write it to the L2
+ if (L2cacheMemory.cacheAvail(in_msg.Address)) {
+ trigger(Event:L1_to_L2, in_msg.Address);
+ } else {
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1
+ if (L2cacheMemory.isTagPresent(in_msg.Address)) {
+ // L2 has it (maybe not with the right permissions)
+ trigger(Event:L2_to_L1D, in_msg.Address);
+ } else {
+ // We have room, the L2 doesn't have it, so the L1 fetches the line
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ }
+ } else {
+ // No room in the L1, so we need to make room
+ if (L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.Address))) {
+ // The L2 has room, so we move the line from the L1 to the L2
+ trigger(Event:L1_to_L2, L1DcacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ // The L2 does not have room, so we replace a line from the L2
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.Address)));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+
+ action(a_issueGETS, "a", desc="Issue GETS") {
+ enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ // TBEs[address].NumPendingMsgs := numberOfNodes(); // One from each other processor (n-1) plus the memory (+1)
+ }
+ }
+
+ action(b_issueGETX, "b", desc="Issue GETX") {
+ enqueue(requestNetwork_out, RequestMsg, latency="(ISSUE_LATENCY-1)") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ // TBEs[address].NumPendingMsgs := numberOfNodes(); // One from each other processor (n-1) plus the memory (+1)
+ }
+ }
+
+ action(d_issuePUTX, "d", desc="Issue PUTX") {
+ enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(dd_issuePUTO, "\d", desc="Issue PUTO") {
+ enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTO;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(e_sendData, "e", desc="Send data from cache to requestor") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE_DIRTY;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(f_sendAck, "f", desc="Send ack from cache to requestor") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Acks := 0 - 1; // -1
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(g_sendUnblock, "g", desc="Send unblock to memory") {
+ enqueue(unblockNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ }
+ }
+
+ action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
+ enqueue(unblockNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ }
+ }
+
+ action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ sequencer.readCallback(address, getCacheEntry(address).DataBlk);
+ }
+
+ action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ sequencer.writeCallback(address, getCacheEntry(address).DataBlk);
+ getCacheEntry(address).Dirty := true;
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE") {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ TBEs[address].DataBlk := getCacheEntry(address).DataBlk; // Data only used for writebacks
+ TBEs[address].Dirty := getCacheEntry(address).Dirty;
+ }
+
+ action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
+ triggerQueue_in.dequeue();
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue();
+ }
+
+ action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
+ forwardToCache_in.dequeue();
+ }
+
+ action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
+ peek(responseToCache_in, ResponseMsg) {
+ TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
+ }
+ }
+
+ action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
+ peek(forwardToCache_in, RequestMsg) {
+ TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
+ }
+ }
+
+ action(n_popResponseQueue, "n", desc="Pop response queue") {
+ responseToCache_in.dequeue();
+ }
+
+ action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
+ if (TBEs[address].NumPendingMsgs == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.Address := address;
+ out_msg.Type := TriggerType:ALL_ACKS;
+ }
+ }
+ }
+
+ action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.Dirty := TBEs[address].Dirty;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
+ enqueue(unblockNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.Dirty := TBEs[address].Dirty;
+ if (TBEs[address].Dirty) {
+ out_msg.Type := CoherenceResponseType:WRITEBACK_DIRTY;
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ } else {
+ out_msg.Type := CoherenceResponseType:WRITEBACK_CLEAN;
+ // NOTE: in a real system this would not send data. We send
+ // data here only so we can check it at the memory
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ }
+
+ action(u_writeDataToCache, "u", desc="Write data to cache") {
+ peek(responseToCache_in, ResponseMsg) {
+ getCacheEntry(address).DataBlk := in_msg.DataBlk;
+ getCacheEntry(address).Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
+ peek(responseToCache_in, ResponseMsg) {
+ assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
+ getCacheEntry(address).DataBlk := in_msg.DataBlk;
+ getCacheEntry(address).Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(kk_deallocateL1CacheBlock, "\k", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L1DcacheMemory.deallocate(address);
+ } else {
+ L1IcacheMemory.deallocate(address);
+ }
+ }
+
+ action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
+ if (L1DcacheMemory.isTagPresent(address) == false) {
+ L1DcacheMemory.allocate(address);
+ }
+ }
+
+ action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
+ if (L1IcacheMemory.isTagPresent(address) == false) {
+ L1IcacheMemory.allocate(address);
+ }
+ }
+
+ action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
+ L2cacheMemory.allocate(address);
+ }
+
+ action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ L2cacheMemory.deallocate(address);
+ }
+
+ action(ss_copyFromL1toL2, "\s", desc="Copy data block from L1 (I or D) to L2") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L2cacheMemory[address] := L1DcacheMemory[address];
+ } else {
+ L2cacheMemory[address] := L1IcacheMemory[address];
+ }
+ }
+
+ action(tt_copyFromL2toL1, "\t", desc="Copy data block from L2 to L1 (I or D)") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L1DcacheMemory[address] := L2cacheMemory[address];
+ } else {
+ L1IcacheMemory[address] := L2cacheMemory[address];
+ }
+ }
+
+ action(uu_profileMiss, "\u", desc="Profile the demand miss") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ profile_miss(in_msg, id);
+ }
+ }
+
+ action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
+ mandatoryQueue_in.recycle();
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // Transitions for Load/Store/L2_Replacement from transient states
+ transition({IM, SM, OM, IS, OI, MI, II}, {Store, L2_Replacement}) {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({IM, IS, OI, MI, II}, {Load, Ifetch}) {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({IM, SM, OM, IS, OI, MI, II}, L1_to_L2) {
+ zz_recycleMandatoryQueue;
+ }
+
+ // Transitions moving data between the L1 and L2 caches
+ transition({I, S, O, E, M, MM}, L1_to_L2) {
+ vv_allocateL2CacheBlock;
+ ss_copyFromL1toL2; // Not really needed for state I
+ kk_deallocateL1CacheBlock;
+ }
+
+ transition({I, S, O, E, M, MM}, L2_to_L1D) {
+ ii_allocateL1DCacheBlock;
+ tt_copyFromL2toL1; // Not really needed for state I
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition({I, S, O, E, M, MM}, L2_to_L1I) {
+ jj_allocateL1ICacheBlock;
+ tt_copyFromL2toL1; // Not really needed for state I
+ rr_deallocateL2CacheBlock;
+ }
+
+ // Transitions from Idle
+ transition({NP, I}, Load, IS) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP, I}, Ifetch, IS) {
+ jj_allocateL1ICacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP, I}, Store, IM) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ b_issueGETX;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, L2_Replacement) {
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition({NP, I}, Inv) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ // Transitions from Shared
+ transition({S, SM}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Store, SM) {
+ i_allocateTBE;
+ b_issueGETX;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, L2_Replacement, I) {
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(S, Inv, I) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ // Transitions from Owned
+ transition({O, OM}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, Store, OM) {
+ i_allocateTBE;
+ b_issueGETX;
+ // p_decrementNumberOfMessagesByOne;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, L2_Replacement, OI) {
+ i_allocateTBE;
+ dd_issuePUTO;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(O, Fwd_GETX, I) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ transition(O, Fwd_GETS) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ // Transitions from MM
+ transition(MM, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM, Store) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM, L2_Replacement, MI) {
+ i_allocateTBE;
+ d_issuePUTX;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(MM, Fwd_GETX, I) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ transition(MM, Fwd_GETS, I) {
+ ee_sendDataExclusive;
+ l_popForwardQueue;
+ }
+
+ // Transitions from M
+ transition({E, M}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition({E, M}, Store, MM) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition({E, M}, L2_Replacement, MI) {
+ i_allocateTBE;
+ d_issuePUTX;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition({E, M}, Fwd_GETX, I) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ transition({E, M}, Fwd_GETS, O) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ // Transitions from IM
+
+ transition(IM, Inv) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(IM, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IM, Data, OM) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ // Transitions from SM
+ transition(SM, Inv, IM) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(SM, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(SM, Data, OM) {
+ v_writeDataToCacheVerify;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ // Transitions from OM
+ transition(OM, Own_GETX) {
+ mm_decrementNumberOfMessages;
+ o_checkForCompletion;
+ l_popForwardQueue;
+ }
+
+ transition(OM, Fwd_GETX, IM) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ transition(OM, Fwd_GETS, OM) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ transition(OM, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(OM, All_acks, MM) {
+ hh_store_hit;
+ gg_sendUnblockExclusive;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+ // Transitions from IS
+
+ transition(IS, Inv) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(IS, Data, S) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ h_load_hit;
+ g_sendUnblock;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Exclusive_Data_Clean, E) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ h_load_hit;
+ gg_sendUnblockExclusive;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Exclusive_Data_Dirty, M) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ h_load_hit;
+ gg_sendUnblockExclusive;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ // Transitions from OI/MI
+
+ transition(MI, Fwd_GETS) {
+ q_sendDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition(MI, Fwd_GETX, II) {
+ q_sendDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition(OI, Fwd_GETS) {
+ q_sendDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition(OI, Fwd_GETX, II) {
+ q_sendDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition({OI, MI}, Writeback_Ack, I) {
+ qq_sendDataFromTBEToMemory;
+ s_deallocateTBE;
+ l_popForwardQueue;
+ }
+
+ transition(MI, Writeback_Nack, OI) {
+ // FIXME: This might cause deadlock by re-using the writeback
+ // channel, we should handle this case differently.
+ dd_issuePUTO;
+ l_popForwardQueue;
+ }
+
+ // Transitions from II
+ transition(II, Writeback_Ack, I) {
+ g_sendUnblock;
+ s_deallocateTBE;
+ l_popForwardQueue;
+ }
+
+ transition(II, Writeback_Nack, I) {
+ s_deallocateTBE;
+ l_popForwardQueue;
+ }
+
+ transition(II, Inv) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+}
+
diff --git a/src/mem/protocol/MOESI_SMP_directory-dir.sm b/src/mem/protocol/MOESI_SMP_directory-dir.sm
new file mode 100644
index 000000000..b45600448
--- /dev/null
+++ b/src/mem/protocol/MOESI_SMP_directory-dir.sm
@@ -0,0 +1,495 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+machine(Directory, "Directory protocol") {
+
+ MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
+ MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false";
+
+ MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false";
+ MessageBuffer unblockToDir, network="From", virtual_network="3", ordered="false";
+
+ // STATES
+ enumeration(State, desc="Directory states", default="Directory_State_I") {
+ // Base states
+ I, desc="Invalid";
+ S, desc="Shared";
+ O, desc="Owner";
+ M, desc="Modified";
+
+ IS, desc="Blocked, was in idle";
+ SS, desc="Blocked, was in shared";
+ OO, desc="Blocked, was in owned";
+ MO, desc="Blocked, going to owner or maybe modified";
+ MM, desc="Blocked, going to modified";
+
+ MI, desc="Blocked on a writeback";
+ OS, desc="Blocked on a writeback";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ GETX, desc="A GETX arrives";
+ GETS, desc="A GETS arrives";
+ PUTX, desc="A PUTX arrives";
+ PUTO, desc="A PUTO arrives";
+ Unblock, desc="An unblock message arrives";
+ Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
+ Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
+ Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
+ Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ State DirectoryState, desc="Directory state";
+ DataBlock DataBlk, desc="data for the block";
+ NetDest Sharers, desc="Sharers for this block";
+ NetDest Owner, desc="Owner of this block";
+ int WaitingUnblocks, desc="Number of acks we're waiting for";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+ // External function
+ void profile_sharing(Address addr, AccessType type, NodeID requestor, Set sharers, Set owner);
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+
+ State getState(Address addr) {
+ return directory[addr].DirectoryState;
+ }
+
+ void setState(Address addr, State state) {
+ if (directory.isPresent(addr)) {
+
+ if ((state == State:I) || (state == State:IS)) {
+ assert(directory[addr].Owner.count() == 0);
+ assert(directory[addr].Sharers.count() == 0);
+ }
+
+ if ((state == State:S) || (state == State:SS)) {
+ assert(directory[addr].Owner.count() == 0);
+ assert(directory[addr].Sharers.count() != 0);
+ }
+
+ if ((state == State:O) || (state == State:OO)) {
+ assert(directory[addr].Owner.count() == 1);
+ assert(directory[addr].Sharers.isSuperset(directory[addr].Owner) == false);
+ }
+
+ if (state == State:M) {
+ assert(directory[addr].Owner.count() == 1);
+ assert(directory[addr].Sharers.count() == 0);
+ }
+
+ if ((state != State:SS) && (state != State:OO)) {
+ assert(directory[addr].WaitingUnblocks == 0);
+ }
+
+ directory[addr].DirectoryState := state;
+ }
+ }
+
+ // ** OUT_PORTS **
+ out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+ out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
+
+ // ** IN_PORTS **
+
+ in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
+ if (unblockNetwork_in.isReady()) {
+ peek(unblockNetwork_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
+ if (directory[in_msg.Address].WaitingUnblocks == 1) {
+ trigger(Event:Last_Unblock, in_msg.Address);
+ } else {
+ trigger(Event:Unblock, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
+ trigger(Event:Exclusive_Unblock, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY) {
+ trigger(Event:Dirty_Writeback, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN) {
+ trigger(Event:Clean_Writeback, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(requestQueue_in, RequestMsg, requestToDir) {
+ if (requestQueue_in.isReady()) {
+ peek(requestQueue_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ trigger(Event:PUTX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTO) {
+ trigger(Event:PUTO, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Actions
+
+ action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:WB_ACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:WB_NACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(c_clearOwner, "c", desc="Clear the owner field") {
+ directory[address].Owner.clear();
+ }
+
+ action(cc_clearSharers, "\c", desc="Clear the sharers field") {
+ directory[address].Sharers.clear();
+ }
+
+ action(d_sendData, "d", desc="Send data to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+
+ if (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0) {
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE_CLEAN;
+ } else {
+ out_msg.Type := CoherenceResponseType:DATA;
+ }
+
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Dirty := false; // By definition, the block is now clean
+ out_msg.Acks := directory[address].Sharers.count();
+ if (directory[address].Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ directory[address].Owner.clear();
+ directory[address].Owner.add(in_msg.Sender);
+ }
+ }
+
+ action(f_forwardRequest, "f", desc="Forward request to owner") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination := directory[in_msg.Address].Owner;
+ out_msg.Acks := directory[address].Sharers.count();
+ if (directory[address].Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ }
+ }
+ }
+
+ action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
+ peek(requestQueue_in, RequestMsg) {
+ if ((directory[in_msg.Address].Sharers.count() > 1) ||
+ ((directory[in_msg.Address].Sharers.count() > 0) && (directory[in_msg.Address].Sharers.isElement(in_msg.Requestor) == false))) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination := directory[in_msg.Address].Sharers;
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ }
+ }
+ }
+ }
+
+ action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
+ // Profile the request
+ peek(requestQueue_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ // profile_sharing(address, AccessType:Write, machineIDToNodeID(in_msg.Requestor), directory[address].Sharers, directory[address].Owner);
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ // profile_sharing(address, AccessType:Read, machineIDToNodeID(in_msg.Requestor), directory[address].Sharers, directory[address].Owner);
+ }
+ }
+
+ requestQueue_in.dequeue();
+ }
+
+ action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
+ unblockNetwork_in.dequeue();
+ }
+
+ action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ assert(in_msg.Dirty);
+ assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ action(ll_checkDataInMemory, "\l", desc="Check PUTX/PUTO data is same as in the memory") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ assert(in_msg.Dirty == false);
+ assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
+
+ // NOTE: The following check would not be valid in a real
+ // implementation. We include the data in the "dataless"
+ // message so we can assert the clean data matches the datablock
+ // in memory
+ assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
+ }
+ }
+
+ action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ directory[address].Sharers.add(in_msg.Sender);
+ }
+ }
+
+ action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
+ directory[address].WaitingUnblocks := directory[address].WaitingUnblocks + 1;
+ }
+
+ action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
+ directory[address].WaitingUnblocks := directory[address].WaitingUnblocks - 1;
+ assert(directory[address].WaitingUnblocks >= 0);
+ }
+
+ // action(z_stall, "z", desc="Cannot be handled right now.") {
+ // Special name recognized as do nothing case
+ // }
+
+ action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
+ requestQueue_in.recycle();
+ }
+
+ // TRANSITIONS
+
+ transition(I, GETX, MM) {
+ d_sendData;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(S, GETX, MM) {
+ d_sendData;
+ g_sendInvalidations;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(I, GETS, IS) {
+ d_sendData;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({S, SS}, GETS, SS) {
+ d_sendData;
+ n_incrementOutstanding;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({I, S, M}, PUTO) {
+ b_sendWriteBackNack;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({I, S, O}, PUTX) {
+ b_sendWriteBackNack;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, GETX, MM) {
+ f_forwardRequest;
+ g_sendInvalidations;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({O, OO}, GETS, OO) {
+ f_forwardRequest;
+ n_incrementOutstanding;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, GETX, MM) {
+ f_forwardRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, GETS, MO) {
+ f_forwardRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, PUTX, MI) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, PUTO, OS) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({MM, MO, MI, OS}, {GETS, GETX, PUTO, PUTX}) {
+ zz_recycleRequest;
+ }
+
+ transition({MM, MO}, Exclusive_Unblock, M) {
+ cc_clearSharers;
+ e_ownerIsUnblocker;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MO, Unblock, O) {
+ m_addUnlockerToSharers;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({IS, SS, OO}, {GETX, PUTO, PUTX}) {
+ zz_recycleRequest;
+ }
+
+ transition(IS, GETS) {
+ zz_recycleRequest;
+ }
+
+ transition(IS, Unblock, S) {
+ m_addUnlockerToSharers;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(IS, Exclusive_Unblock, M) {
+ cc_clearSharers;
+ e_ownerIsUnblocker;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(SS, Unblock) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(SS, Last_Unblock, S) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OO, Unblock) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OO, Last_Unblock, O) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MI, Dirty_Writeback, I) {
+ c_clearOwner;
+ cc_clearSharers;
+ l_writeDataToMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OS, Dirty_Writeback, S) {
+ c_clearOwner;
+ l_writeDataToMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MI, Clean_Writeback, I) {
+ c_clearOwner;
+ cc_clearSharers;
+ ll_checkDataInMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OS, Clean_Writeback, S) {
+ c_clearOwner;
+ ll_checkDataInMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MI, Unblock, M) {
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OS, Unblock, O) {
+ j_popIncomingUnblockQueue;
+ }
+}
diff --git a/src/mem/protocol/MOESI_SMP_directory-msg.sm b/src/mem/protocol/MOESI_SMP_directory-msg.sm
new file mode 100644
index 000000000..1b7424308
--- /dev/null
+++ b/src/mem/protocol/MOESI_SMP_directory-msg.sm
@@ -0,0 +1,89 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ GETX, desc="Get eXclusive";
+ GETS, desc="Get Shared";
+ PUTX, desc="Put eXclusive";
+ PUTO, desc="Put Owned";
+ WB_ACK, desc="Writeback ack";
+ WB_NACK, desc="Writeback neg. ack";
+ INV, desc="Invalidation";
+}
+
+// CoherenceResponseType
+enumeration(CoherenceResponseType, desc="...") {
+ ACK, desc="ACKnowledgment, responder doesn't have a copy";
+ DATA, desc="Data";
+ DATA_EXCLUSIVE_CLEAN, desc="Data, no other processor has a copy, data is clean";
+ DATA_EXCLUSIVE_DIRTY, desc="Data, no other processor has a copy, data is dirty";
+ UNBLOCK, desc="Unblock";
+ UNBLOCK_EXCLUSIVE, desc="Unblock, we're in E/M";
+ WRITEBACK_CLEAN, desc="Clean writeback (no data)";
+ WRITEBACK_DIRTY, desc="Dirty writeback (contains data)";
+}
+
+// TriggerType
+enumeration(TriggerType, desc="...") {
+ ALL_ACKS, desc="See corresponding event";
+}
+
+// TriggerMsg
+structure(TriggerMsg, desc="...", interface="Message") {
+ Address Address, desc="Physical address for this request";
+ TriggerType Type, desc="Type of trigger";
+}
+
+// RequestMsg (and also forwarded requests)
+structure(RequestMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest Destination, desc="Multicast destination mask";
+ int Acks, desc="How many acks to expect";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+// ResponseMsg (and also unblock requests)
+structure(ResponseMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
+ MachineID Sender, desc="Node who sent the data";
+ NetDest Destination, desc="Node to whom the data is sent";
+ DataBlock DataBlk, desc="data for the cache line";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int Acks, desc="How many acks to expect";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
diff --git a/src/mem/protocol/MOESI_SMP_directory.slicc b/src/mem/protocol/MOESI_SMP_directory.slicc
new file mode 100644
index 000000000..e82aaf7f7
--- /dev/null
+++ b/src/mem/protocol/MOESI_SMP_directory.slicc
@@ -0,0 +1,4 @@
+MOESI_SMP_directory-msg.sm
+MOESI_SMP_directory-cache.sm
+MOESI_SMP_directory-dir.sm
+standard_SMP-protocol.sm
diff --git a/src/mem/protocol/MOESI_SMP_token-cache.sm b/src/mem/protocol/MOESI_SMP_token-cache.sm
new file mode 100644
index 000000000..e39f73b18
--- /dev/null
+++ b/src/mem/protocol/MOESI_SMP_token-cache.sm
@@ -0,0 +1,1734 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MOESI_token-cache.sm 1.10 05/01/19 15:41:25-06:00 beckmann@emperor11.cs.wisc.edu $
+ *
+ */
+
+machine(L1Cache, "Token protocol") {
+
+ MessageBuffer requestFromCache, network="To", virtual_network="1", ordered="false";
+ MessageBuffer responseFromCache, network="To", virtual_network="0", ordered="false";
+ MessageBuffer persistentFromCache, network="To", virtual_network="2", ordered="true";
+
+ MessageBuffer requestToCache, network="From", virtual_network="1", ordered="false";
+ MessageBuffer responseToCache, network="From", virtual_network="0", ordered="false";
+ MessageBuffer persistentToCache, network="From", virtual_network="2", ordered="true";
+
+
+ // STATES
+ enumeration(State, desc="Cache states", default="L1Cache_State_I") {
+ // Base states
+ NP, "NP", desc="Not Present";
+ I, "I", desc="Idle";
+ S, "S", desc="Shared";
+ O, "O", desc="Owned";
+ M, "M", desc="Modified (dirty)";
+ MM, "MM", desc="Modified (dirty and locally modified)";
+ M_W, "M^W", desc="Modified (dirty), waiting";
+ MM_W, "MM^W", desc="Modified (dirty and locally modified), waiting";
+
+ // Transient States
+ IM, "IM", desc="Issued GetX";
+ SM, "SM", desc="Issued GetX, we still have an old copy of the line";
+ OM, "OM", desc="Issued GetX, received data";
+ IS, "IS", desc="Issued GetS";
+
+ // Locked states
+ I_L, "I^L", desc="Invalid, Locked";
+ S_L, "S^L", desc="Shared, Locked";
+ IM_L, "IM^L", desc="Invalid, Locked, trying to go to Modified";
+ SM_L, "SM^L", desc="Shared, Locked, trying to go to Modified";
+ IS_L, "IS^L", desc="Invalid, Locked, trying to go to Shared";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ Load, desc="Load request from the processor";
+ Ifetch, desc="I-fetch request from the processor";
+ Store, desc="Store request from the processor";
+ L2_Replacement, desc="L2 Replacement";
+ L1_to_L2, desc="L1 to L2 transfer";
+ L2_to_L1D, desc="L2 to L1-Data transfer";
+ L2_to_L1I, desc="L2 to L1-Instruction transfer";
+
+ // Responses
+ Data_Shared, desc="Received a data message, we are now a sharer";
+ Data_Shared_All_Tokens, desc="Received a data message, we are now a sharer, we now have all the tokens";
+ Data_Owner, desc="Received a data message, we are now the owner";
+ Data_Owner_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
+ Ack, desc="Received an ack message";
+ Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
+
+ // Requests
+ Transient_GETX, desc="A GetX from another processor";
+ Transient_GETS, desc="A GetS from another processor";
+
+ // Lock/Unlock
+ Persistent_GETX, desc="Another processor has priority to read/write";
+ Persistent_GETS, desc="Another processor has priority to read";
+ Own_Lock_or_Unlock, desc="This processor now has priority";
+
+ // Triggers
+ Request_Timeout, desc="Timeout";
+ Use_Timeout, desc="Timeout";
+
+ }
+
+ // TYPES
+
+ int getRetryThreshold();
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ DataBlock DataBlk, desc="data for the block, required by CacheMemory";
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int Tokens, desc="The number of tokens we're holding for the line";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ int IssueCount, default="0", desc="The number of times we've issued a request for this line.";
+ Address PC, desc="Program counter of request";
+ AccessType AccessType, desc="Type of request (used for profiling)";
+ Time IssueTime, desc="Time the request was issued";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ external_type(TimerTable, inport="yes") {
+ bool isReady();
+ Address readyAddress();
+ void set(Address, int);
+ void unset(Address);
+ bool isSet(Address);
+ }
+
+ MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
+ Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+
+ TBETable TBEs, template_hack="<L1Cache_TBE>";
+ CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
+ CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
+ CacheMemory L2cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L2"', abstract_chip_ptr="true";
+ PersistentTable persistentTable, constructor_hack="i";
+ TimerTable useTimerTable;
+ TimerTable reissueTimerTable;
+
+ int outstandingRequests, default="0";
+ int outstandingPersistentRequests, default="0";
+ void profile_outstanding_request(int outstanding);
+ void profile_outstanding_persistent_request(int outstanding);
+
+ int averageLatencyHysteresis, default="(8)"; // Constant that provides hysteresis for calculated the estimated average
+ int averageLatencyCounter, default="(500 << (*(m_L1Cache_averageLatencyHysteresis_vec[i])))";
+ // int averageLatencyCounter, default="(250)";
+
+ int averageLatencyEstimate() {
+ return averageLatencyCounter >> averageLatencyHysteresis;
+ }
+
+ void updateAverageLatencyEstimate(int latency) {
+ assert(latency >= 0);
+
+ // By subtracting the current average and then adding the most
+ // recent sample, we calculate an estimate of the recent average.
+ // If we simply used a running sum and divided by the total number
+ // of entries, the estimate of the average would adapt very slowly
+ // after the execution has run for a long time.
+ averageLatencyCounter := averageLatencyCounter - averageLatencyEstimate() + latency;
+ }
+
+ Entry getCacheEntry(Address addr), return_by_ref="yes" {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory[addr];
+ } else if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory[addr];
+ } else {
+ return L1IcacheMemory[addr];
+ }
+ }
+
+ int getTokens(Address addr) {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory[addr].Tokens;
+ } else if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory[addr].Tokens;
+ } else if (L1IcacheMemory.isTagPresent(addr)) {
+ return L1IcacheMemory[addr].Tokens;
+ } else {
+ return 0;
+ }
+ }
+
+ void changePermission(Address addr, AccessPermission permission) {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory.changePermission(addr, permission);
+ } else if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory.changePermission(addr, permission);
+ } else {
+ return L1IcacheMemory.changePermission(addr, permission);
+ }
+ }
+
+ bool isCacheTagPresent(Address addr) {
+ return (L2cacheMemory.isTagPresent(addr) || L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
+ }
+
+ State getState(Address addr) {
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+ assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+ assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+
+ if (TBEs.isPresent(addr)) {
+ return TBEs[addr].TBEState;
+ } else if (isCacheTagPresent(addr)) {
+ return getCacheEntry(addr).CacheState;
+ } else if ((persistentTable.isLocked(addr) == true) && (persistentTable.findSmallest(addr) != machineID)) {
+ // Not in cache, in persistent table, but this processor isn't highest priority
+ return State:I_L;
+ } else {
+ return State:NP;
+ }
+ }
+
+ void setState(Address addr, State state) {
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+ assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+ assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+
+ assert(outstandingPersistentRequests >= 0);
+ assert(outstandingRequests >= 0);
+
+ if (useTimerTable.isSet(addr)) {
+ assert((state == State:M_W) || (state == State:MM_W));
+ } else {
+ assert(state != State:M_W);
+ assert(state != State:MM_W);
+ }
+
+ if (reissueTimerTable.isSet(addr)) {
+ assert((state == State:IS) ||
+ (state == State:IM) ||
+ (state == State:SM) ||
+ (state == State:OM) ||
+ (state == State:IS_L) ||
+ (state == State:IM_L) ||
+ (state == State:SM_L));
+ } else if (TBEs.isPresent(addr) && TBEs[addr].IssueCount < getRetryThreshold()) {
+ // If the timer is not set, you better have issued a persistent request
+ assert(state != State:IS);
+ assert(state != State:IM);
+ assert(state != State:SM);
+ assert(state != State:OM);
+ assert(state != State:IS_L);
+ assert(state != State:IM_L);
+ assert(state != State:SM_L);
+ }
+
+ if (TBEs.isPresent(addr) && (TBEs[addr].IssueCount > getRetryThreshold())) {
+ assert(reissueTimerTable.isSet(addr) == false);
+ }
+
+ if (TBEs.isPresent(addr)) {
+ assert(state != State:I);
+ assert(state != State:S);
+ assert(state != State:O);
+ assert(state != State:MM);
+ assert(state != State:M);
+ TBEs[addr].TBEState := state;
+ }
+
+ if (isCacheTagPresent(addr)) {
+ // Make sure the token count is in range
+ assert(getCacheEntry(addr).Tokens >= 0);
+ assert(getCacheEntry(addr).Tokens <= max_tokens());
+
+ if ((state == State:I_L) ||
+ (state == State:IM_L) ||
+ (state == State:IS_L)) {
+ // Make sure we have no tokens in the "Invalid, locked" states
+ if (isCacheTagPresent(addr)) {
+ assert(getCacheEntry(addr).Tokens == 0);
+ }
+
+ // Make sure the line is locked
+ assert(persistentTable.isLocked(addr));
+
+ // But we shouldn't have highest priority for it
+ assert(persistentTable.findSmallest(addr) != machineID);
+
+ } else if ((state == State:S_L) ||
+ (state == State:SM_L)) {
+ // Make sure we have only one token in the "Shared, locked" states
+ assert(getCacheEntry(addr).Tokens == 1);
+
+ // Make sure the line is locked...
+ assert(persistentTable.isLocked(addr));
+
+ // ...But we shouldn't have highest priority for it...
+ assert(persistentTable.findSmallest(addr) != machineID);
+
+ // ...And it must be a GETS request
+ assert(persistentTable.typeOfSmallest(addr) == AccessType:Read);
+
+ } else {
+
+ // If there is an entry in the persistent table of this block,
+ // this processor needs to have an entry in the table for this
+ // block, and that entry better be the smallest (highest
+ // priority). Otherwise, the state should have been one of
+ // locked states
+
+ if (persistentTable.isLocked(addr)) {
+ assert(persistentTable.findSmallest(addr) == machineID);
+ }
+ }
+
+ // in M and E you have all the tokens
+ if (state == State:MM || state == State:M || state == State:MM_W || state == State:M_W) {
+ assert(getCacheEntry(addr).Tokens == max_tokens());
+ }
+
+ // in NP you have no tokens
+ if (state == State:NP) {
+ assert(getCacheEntry(addr).Tokens == 0);
+ }
+
+ // You have at least one token in S-like states
+ if (state == State:S || state == State:SM) {
+ assert(getCacheEntry(addr).Tokens > 0);
+ }
+
+ // You have at least half the token in O-like states
+ if (state == State:O && state == State:OM) {
+ assert(getCacheEntry(addr).Tokens >= 1); // Must have at least one token
+ assert(getCacheEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
+ }
+
+ getCacheEntry(addr).CacheState := state;
+
+ // Set permission
+ if (state == State:MM ||
+ state == State:MM_W) {
+ changePermission(addr, AccessPermission:Read_Write);
+ } else if ((state == State:S) ||
+ (state == State:O) ||
+ (state == State:M) ||
+ (state == State:M_W) ||
+ (state == State:SM) ||
+ (state == State:SM_L) ||
+ (state == State:OM)) {
+ changePermission(addr, AccessPermission:Read_Only);
+ } else {
+ changePermission(addr, AccessPermission:Invalid);
+ }
+ }
+ }
+
+ Event mandatory_request_type_to_event(CacheRequestType type) {
+ if (type == CacheRequestType:LD) {
+ return Event:Load;
+ } else if (type == CacheRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
+ return Event:Store;
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+
+ AccessType cache_request_type_to_access_type(CacheRequestType type) {
+ if ((type == CacheRequestType:LD) || (type == CacheRequestType:IFETCH)) {
+ return AccessType:Read;
+ } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
+ return AccessType:Write;
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+
+ // ** OUT_PORTS **
+ out_port(persistentNetwork_out, PersistentMsg, persistentFromCache);
+ out_port(requestNetwork_out, RequestMsg, requestFromCache);
+ out_port(responseNetwork_out, ResponseMsg, responseFromCache);
+
+ // ** IN_PORTS **
+
+ // Use Timer
+ in_port(useTimerTable_in, Address, useTimerTable) {
+ if (useTimerTable_in.isReady()) {
+ trigger(Event:Use_Timeout, useTimerTable.readyAddress());
+ }
+ }
+
+ // Reissue Timer
+ in_port(reissueTimerTable_in, Address, reissueTimerTable) {
+ if (reissueTimerTable_in.isReady()) {
+ trigger(Event:Request_Timeout, reissueTimerTable.readyAddress());
+ }
+ }
+
+ // Persistent Network
+ in_port(persistentNetwork_in, PersistentMsg, persistentToCache) {
+ if (persistentNetwork_in.isReady()) {
+ peek(persistentNetwork_in, PersistentMsg) {
+
+ // Apply the lockdown or unlockdown message to the table
+ if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
+ persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
+ } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
+ persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
+ } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
+ persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
+ } else {
+ error("Unexpected message");
+ }
+
+ // React to the message based on the current state of the table
+ if (persistentTable.isLocked(in_msg.Address)) {
+ if (persistentTable.findSmallest(in_msg.Address) == machineID) {
+ // Our Own Lock - this processor is highest priority
+ trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
+ } else {
+ if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
+ trigger(Event:Persistent_GETS, in_msg.Address);
+ } else {
+ trigger(Event:Persistent_GETX, in_msg.Address);
+ }
+ }
+ } else {
+ // Unlock case - no entries in the table
+ trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
+ }
+ }
+ }
+ }
+
+
+ // Request Network
+ in_port(requestNetwork_in, RequestMsg, requestToCache) {
+ if (requestNetwork_in.isReady()) {
+ peek(requestNetwork_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:Transient_GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:Transient_GETS, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Response Network
+ in_port(responseNetwork_in, ResponseMsg, responseToCache) {
+ if (responseNetwork_in.isReady()) {
+ peek(responseNetwork_in, ResponseMsg) {
+
+ if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Ack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
+ trigger(Event:Data_Owner, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ trigger(Event:Data_Shared, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ } else {
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Ack_All_Tokens, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
+ trigger(Event:Data_Owner_All_Tokens, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ trigger(Event:Data_Shared_All_Tokens, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady()) {
+ peek(mandatoryQueue_in, CacheMsg) {
+ // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
+
+ if (in_msg.Type == CacheRequestType:IFETCH) {
+ // ** INSTRUCTION ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, try to write it to the L2
+ if (L2cacheMemory.cacheAvail(in_msg.Address)) {
+ trigger(Event:L1_to_L2, in_msg.Address);
+ } else {
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1
+ if (L2cacheMemory.isTagPresent(in_msg.Address)) {
+ // L2 has it (maybe not with the right permissions)
+ trigger(Event:L2_to_L1I, in_msg.Address);
+ } else {
+ // We have room, the L2 doesn't have it, so the L1 fetches the line
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ }
+ } else {
+ // No room in the L1, so we need to make room
+ if (L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.Address))) {
+ // The L2 has room, so we move the line from the L1 to the L2
+ trigger(Event:L1_to_L2, L1IcacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ // The L2 does not have room, so we replace a line from the L2
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.Address)));
+ }
+ }
+ }
+ } else {
+ // *** DATA ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, try to write it to the L2
+ if (L2cacheMemory.cacheAvail(in_msg.Address)) {
+ trigger(Event:L1_to_L2, in_msg.Address);
+ } else {
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1
+ if (L2cacheMemory.isTagPresent(in_msg.Address)) {
+ // L2 has it (maybe not with the right permissions)
+ trigger(Event:L2_to_L1D, in_msg.Address);
+ } else {
+ // We have room, the L2 doesn't have it, so the L1 fetches the line
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ }
+ } else {
+ // No room in the L1, so we need to make room
+ if (L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.Address))) {
+ // The L2 has room, so we move the line from the L1 to the L2
+ trigger(Event:L1_to_L2, L1DcacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ // The L2 does not have room, so we replace a line from the L2
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.Address)));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+
+ action(a_issueRequest, "a", desc="Issue GETS or GETX request (transient or persistent)") {
+
+ if (TBEs[address].IssueCount == 0) {
+ // Update outstanding requests
+ profile_outstanding_request(outstandingRequests);
+ outstandingRequests := outstandingRequests + 1;
+ }
+
+ if (TBEs[address].IssueCount < getRetryThreshold()) {
+ // Issue a normal request
+ enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.add(map_Address_to_Directory(address));
+
+ if (TBEs[address].AccessType == AccessType:Read) {
+ out_msg.Type := CoherenceRequestType:GETS;
+ } else {
+ out_msg.Type := CoherenceRequestType:GETX;
+ }
+
+ if (TBEs[address].IssueCount == 0) {
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Reissue_Control;
+ }
+ }
+
+ // Increment IssueCount
+ TBEs[address].IssueCount := TBEs[address].IssueCount + 1;
+
+ // Set a wakeup timer
+ reissueTimerTable.set(address, 2*averageLatencyEstimate());
+
+ } else {
+ // Try to issue a Persistent Request
+ if (persistentTable.okToIssueStarving(address)) {
+ // Issue a persistent request
+ enqueue(persistentNetwork_out, PersistentMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ if (TBEs[address].AccessType == AccessType:Read) {
+ out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
+ } else {
+ out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
+ }
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Persistent_Control;
+ }
+ persistentTable.markEntries(address);
+
+ // Update outstanding requests
+ profile_outstanding_persistent_request(outstandingPersistentRequests);
+ outstandingPersistentRequests := outstandingPersistentRequests + 1;
+
+ // Increment IssueCount
+ TBEs[address].IssueCount := TBEs[address].IssueCount + 1;
+
+ // Do not schedule a wakeup, a persistent requests will always complete
+
+ } else {
+ // We'd like to issue a persistent request, but are not allowed
+ // to issue a P.R. right now. This, we do not increment the
+ // IssueCount.
+
+
+ // Set a wakeup timer
+ reissueTimerTable.set(address, 10);
+ }
+ }
+ }
+
+ action(b_bounceResponse, "b", desc="Bounce tokens and data to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ // FIXME, should use a 3rd vnet
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.DestMachine := MachineType:Directory;
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ }
+ }
+ }
+
+ action(c_cleanReplacement, "c", desc="Issue clean writeback") {
+ if (getCacheEntry(address).Tokens > 0) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.DestMachine := MachineType:Directory;
+ out_msg.Tokens := getCacheEntry(address).Tokens;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ getCacheEntry(address).Tokens := 0;
+ }
+ }
+
+ action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.DestMachine := MachineType:Directory;
+ out_msg.Tokens := getCacheEntry(address).Tokens;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ if (getCacheEntry(address).Dirty) {
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ } else {
+ out_msg.Type := CoherenceResponseType:ACK_OWNER;
+ // NOTE: in a real system this would not send data. We send
+ // data here only so we can check it at the memory
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ getCacheEntry(address).Tokens := 0;
+ }
+
+ action(d_sendDataWithToken, "d", desc="Send data and a token from cache to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.Tokens := 1;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ getCacheEntry(address).Tokens := getCacheEntry(address).Tokens - 1;
+ assert(getCacheEntry(address).Tokens >= 1);
+ }
+
+ action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ assert(getCacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getCacheEntry(address).Tokens;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ getCacheEntry(address).Tokens := 0;
+ }
+
+ action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
+ assert(persistentTable.findSmallest(address) != machineID); // Make sure we never bounce tokens to ourself
+ if (getCacheEntry(address).Tokens > 0) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.DestMachine := MachineType:L1Cache;
+ assert(getCacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getCacheEntry(address).Tokens;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ getCacheEntry(address).Tokens := 0;
+ }
+
+ action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
+ assert(persistentTable.findSmallest(address) != machineID); // Make sure we never bounce tokens to ourself
+ assert(getCacheEntry(address).Tokens > 0);
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.DestMachine := MachineType:L1Cache;
+ assert(getCacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getCacheEntry(address).Tokens;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ getCacheEntry(address).Tokens := 0;
+ }
+
+ action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
+ assert(persistentTable.findSmallest(address) != machineID); // Make sure we never bounce tokens to ourself
+ assert(getCacheEntry(address).Tokens > 0);
+ if (getCacheEntry(address).Tokens > 1) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.DestMachine := MachineType:L1Cache;
+ assert(getCacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getCacheEntry(address).Tokens - 1;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ getCacheEntry(address).Tokens := 1;
+ }
+
+ action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
+ assert(persistentTable.findSmallest(address) != machineID); // Make sure we never bounce tokens to ourself
+ assert(getCacheEntry(address).Tokens > 0);
+ if (getCacheEntry(address).Tokens > 1) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.DestMachine := MachineType:L1Cache;
+ assert(getCacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getCacheEntry(address).Tokens - 1;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ getCacheEntry(address).Tokens := 1;
+ }
+ }
+
+ action(g_bounceResponseToStarver, "g", desc="Redirect response to starving processor") {
+ assert(persistentTable.isLocked(address));
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(persistentTable.findSmallest(address) != machineID); // Make sure we never bounce tokens to ourself
+ // FIXME, should use a 3rd vnet in some cases
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.MessageSize := in_msg.MessageSize;
+ }
+ }
+ }
+
+ action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ sequencer.readCallback(address, getCacheEntry(address).DataBlk);
+ }
+
+ action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ sequencer.writeCallback(address, getCacheEntry(address).DataBlk);
+ getCacheEntry(address).Dirty := true;
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE") {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ TBEs[address].IssueCount := 0;
+ peek(mandatoryQueue_in, CacheMsg) {
+ TBEs[address].PC := in_msg.ProgramCounter;
+ TBEs[address].AccessType := cache_request_type_to_access_type(in_msg.Type);
+ }
+ TBEs[address].IssueTime := get_time();
+ }
+
+ action(j_unsetReissueTimer, "j", desc="Unset reissue timer.") {
+ if (reissueTimerTable.isSet(address)) {
+ reissueTimerTable.unset(address);
+ }
+ }
+
+ action(jj_unsetUseTimer, "\j", desc="Unset use timer.") {
+ useTimerTable.unset(address);
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue();
+ }
+
+ action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
+ persistentNetwork_in.dequeue();
+ }
+
+ action(m_popRequestQueue, "m", desc="Pop request queue.") {
+ requestNetwork_in.dequeue();
+ }
+
+ action(n_popResponseQueue, "n", desc="Pop response queue") {
+ responseNetwork_in.dequeue();
+ }
+
+ action(o_scheduleUseTimeout, "o", desc="Schedule a use timeout.") {
+ useTimerTable.set(address, 15);
+ }
+
+ action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Tokens != 0);
+ getCacheEntry(address).Tokens := getCacheEntry(address).Tokens + in_msg.Tokens;
+ }
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+ outstandingRequests := outstandingRequests - 1;
+ if (TBEs[address].IssueCount > getRetryThreshold()) {
+ outstandingPersistentRequests := outstandingPersistentRequests - 1;
+ enqueue(persistentNetwork_out, PersistentMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Persistent_Control;
+ }
+ }
+
+ // Update average latency
+ updateAverageLatencyEstimate(time_to_int(get_time()) - time_to_int(TBEs[address].IssueTime));
+
+ // Profile
+ profile_token_retry(address, TBEs[address].AccessType, TBEs[address].IssueCount);
+ TBEs.deallocate(address);
+ }
+
+ action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
+ if (getCacheEntry(address).Tokens > 0) {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ assert(getCacheEntry(address).Tokens >= 1);
+ out_msg.Tokens := getCacheEntry(address).Tokens;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+ getCacheEntry(address).Tokens := 0;
+ }
+
+ action(u_writeDataToCache, "u", desc="Write data to cache") {
+ peek(responseNetwork_in, ResponseMsg) {
+ getCacheEntry(address).DataBlk := in_msg.DataBlk;
+ getCacheEntry(address).Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L1DcacheMemory.deallocate(address);
+ } else {
+ L1IcacheMemory.deallocate(address);
+ }
+ }
+
+ action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
+ if (L1DcacheMemory.isTagPresent(address) == false) {
+ L1DcacheMemory.allocate(address);
+ }
+ }
+
+ action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
+ if (L1IcacheMemory.isTagPresent(address) == false) {
+ L1IcacheMemory.allocate(address);
+ }
+ }
+
+ action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
+ L2cacheMemory.allocate(address);
+ }
+
+ action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ L2cacheMemory.deallocate(address);
+ }
+
+ action(ss_copyFromL1toL2, "\s", desc="Copy data block from L1 (I or D) to L2") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L2cacheMemory[address] := L1DcacheMemory[address];
+ } else {
+ L2cacheMemory[address] := L1IcacheMemory[address];
+ }
+ }
+
+ action(tt_copyFromL2toL1, "\t", desc="Copy data block from L2 to L1 (I or D)") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L1DcacheMemory[address] := L2cacheMemory[address];
+ } else {
+ L1IcacheMemory[address] := L2cacheMemory[address];
+ }
+ }
+
+ action(uu_profileMiss, "\u", desc="Profile the demand miss") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ profile_miss(in_msg, id);
+ }
+ }
+
+ action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
+ }
+ }
+
+ // action(z_stall, "z", desc="Stall") {
+ // }
+
+ action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
+ mandatoryQueue_in.recycle();
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // Transitions for Load/Store/L2_Replacement from transient states
+ transition({IM, SM, OM, IS, IM_L, IS_L, I_L, S_L, SM_L, M_W, MM_W}, L2_Replacement) {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({IM, SM, OM, IS, IM_L, IS_L, SM_L}, Store) {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({IM, IS, IM_L, IS_L}, {Load, Ifetch}) {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({IM, SM, OM, IS, I_L, IM_L, IS_L, S_L, SM_L}, {L1_to_L2, L2_to_L1D, L2_to_L1I}) {
+ zz_recycleMandatoryQueue;
+ }
+
+ // Transitions moving data between the L1 and L2 caches
+ transition({I, S, O, M, MM, M_W, MM_W}, L1_to_L2) {
+ vv_allocateL2CacheBlock;
+ ss_copyFromL1toL2;
+ gg_deallocateL1CacheBlock;
+ }
+
+ transition({I, S, O, M, MM, M_W, MM_W}, L2_to_L1D) {
+ ii_allocateL1DCacheBlock;
+ tt_copyFromL2toL1;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition({I, S, O, M, MM, M_W, MM_W}, L2_to_L1I) {
+ pp_allocateL1ICacheBlock;
+ tt_copyFromL2toL1;
+ rr_deallocateL2CacheBlock;
+ }
+
+ // Locks
+ transition({NP, I, S, O, M, MM, M_W, MM_W, IM, SM, OM, IS}, Own_Lock_or_Unlock) {
+ l_popPersistentQueue;
+ }
+
+ // Transitions from NP
+ transition(NP, Load, IS) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(NP, Ifetch, IS) {
+ pp_allocateL1ICacheBlock;
+ i_allocateTBE;
+ a_issueRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(NP, Store, IM) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(NP, {Ack, Data_Shared, Data_Owner, Data_Owner_All_Tokens}) {
+ b_bounceResponse;
+ n_popResponseQueue;
+ }
+
+ transition(NP, {Transient_GETX, Transient_GETS}) {
+ m_popRequestQueue;
+ }
+
+ transition(NP, {Persistent_GETX, Persistent_GETS}, I_L) {
+ l_popPersistentQueue;
+ }
+
+ // Transitions from Idle
+ transition(I, Load, IS) {
+ i_allocateTBE;
+ a_issueRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, Ifetch, IS) {
+ i_allocateTBE;
+ a_issueRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, Store, IM) {
+ i_allocateTBE;
+ a_issueRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, L2_Replacement) {
+ c_cleanReplacement; // Only needed in some cases
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(I, Transient_GETX) {
+ t_sendAckWithCollectedTokens;
+ m_popRequestQueue;
+ }
+
+ transition(I, Transient_GETS) {
+ m_popRequestQueue;
+ }
+
+ transition(I, {Persistent_GETX, Persistent_GETS}, I_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(I_L, {Persistent_GETX, Persistent_GETS}) {
+ l_popPersistentQueue;
+ }
+
+ transition(I, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(I, Data_Shared, S) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(I, Data_Owner, O) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(I, Data_Owner_All_Tokens, M) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ // Transitions from Shared
+ transition({S, SM, S_L, SM_L}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Store, SM) {
+ i_allocateTBE;
+ a_issueRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, L2_Replacement, I) {
+ c_cleanReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(S, Transient_GETX, I) {
+ t_sendAckWithCollectedTokens;
+ m_popRequestQueue;
+ }
+
+ transition(S, Transient_GETS) {
+ m_popRequestQueue;
+ }
+
+ transition({S, S_L}, Persistent_GETX, I_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(S, Persistent_GETS, S_L) {
+ f_sendAckWithAllButOneTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(S_L, Persistent_GETS) {
+ l_popPersistentQueue;
+ }
+
+ transition(S, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Data_Shared) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Data_Owner, O) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Data_Owner_All_Tokens, M) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ // Transitions from Owned
+ transition({O, OM}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, Store, OM) {
+ i_allocateTBE;
+ a_issueRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, L2_Replacement, I) {
+ cc_dirtyReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(O, Transient_GETX, I) {
+ dd_sendDataWithAllTokens;
+ m_popRequestQueue;
+ }
+
+ transition(O, Persistent_GETX, I_L) {
+ ee_sendDataWithAllTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(O, Persistent_GETS, S_L) {
+ ff_sendDataWithAllButOneTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(O, Transient_GETS) {
+ d_sendDataWithToken;
+ m_popRequestQueue;
+ }
+
+ transition(O, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(O, Ack_All_Tokens, M) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(O, Data_Shared) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(O, Data_Shared_All_Tokens, M) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ // Transitions from Modified
+ transition({MM, MM_W}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition({MM, MM_W}, Store) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM, L2_Replacement, I) {
+ cc_dirtyReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(MM, {Transient_GETX, Transient_GETS}, I) {
+ dd_sendDataWithAllTokens;
+ m_popRequestQueue;
+ }
+
+ transition(MM_W, {Transient_GETX, Transient_GETS}) { // Ignore the request
+ m_popRequestQueue;
+ }
+
+ // Implement the migratory sharing optimization, even for persistent requests
+ transition(MM, {Persistent_GETX, Persistent_GETS}, I_L) {
+ ee_sendDataWithAllTokens;
+ l_popPersistentQueue;
+ }
+
+ // Implement the migratory sharing optimization, even for persistent requests
+ transition(MM_W, {Persistent_GETX, Persistent_GETS}, I_L) {
+ s_deallocateTBE;
+ ee_sendDataWithAllTokens;
+ jj_unsetUseTimer;
+ l_popPersistentQueue;
+ }
+
+ transition(MM_W, Use_Timeout, MM) {
+ s_deallocateTBE;
+ jj_unsetUseTimer;
+ }
+
+ // Transitions from Dirty Exclusive
+ transition({M, M_W}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Store, MM) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M_W, Store, MM_W) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, L2_Replacement, I) {
+ cc_dirtyReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(M, Transient_GETX, I) {
+ dd_sendDataWithAllTokens;
+ m_popRequestQueue;
+ }
+
+ transition(M, Transient_GETS, O) {
+ d_sendDataWithToken;
+ m_popRequestQueue;
+ }
+
+ transition(M_W,{Transient_GETX, Transient_GETS}) { // Ignore the request
+ m_popRequestQueue;
+ }
+
+ transition(M, Persistent_GETX, I_L) {
+ ee_sendDataWithAllTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(M, Persistent_GETS, S_L) {
+ ff_sendDataWithAllButOneTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(M_W, Persistent_GETX, I_L) {
+ s_deallocateTBE;
+ ee_sendDataWithAllTokens;
+ jj_unsetUseTimer;
+ l_popPersistentQueue;
+ }
+
+ transition(M_W, Persistent_GETS, S_L) {
+ s_deallocateTBE;
+ ff_sendDataWithAllButOneTokens;
+ jj_unsetUseTimer;
+ l_popPersistentQueue;
+ }
+
+ transition(M_W, Use_Timeout, M) {
+ s_deallocateTBE;
+ jj_unsetUseTimer;
+ }
+
+ // Transient_GETX and Transient_GETS in transient states
+ transition(OM, {Transient_GETX, Transient_GETS}) {
+ m_popRequestQueue; // Even if we have the data, we can pretend we don't have it yet.
+ }
+
+ transition(IS, Transient_GETX) {
+ t_sendAckWithCollectedTokens;
+ m_popRequestQueue;
+ }
+
+ transition(IS, Transient_GETS) {
+ m_popRequestQueue;
+ }
+
+ transition(IS, {Persistent_GETX, Persistent_GETS}, IS_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(IS_L, {Persistent_GETX, Persistent_GETS}) {
+ l_popPersistentQueue;
+ }
+
+ transition(IM, {Persistent_GETX, Persistent_GETS}, IM_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(IM_L, {Persistent_GETX, Persistent_GETS}) {
+ l_popPersistentQueue;
+ }
+
+ transition({SM, SM_L}, Persistent_GETX, IM_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(SM, Persistent_GETS, SM_L) {
+ f_sendAckWithAllButOneTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(SM_L, Persistent_GETS) {
+ l_popPersistentQueue;
+ }
+
+ transition(OM, Persistent_GETX, IM_L) {
+ ee_sendDataWithAllTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(OM, Persistent_GETS, SM_L) {
+ ff_sendDataWithAllButOneTokens;
+ l_popPersistentQueue;
+ }
+
+ // Transitions from IM/SM
+
+ transition({IM, SM}, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(IM, Data_Shared, SM) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(IM, Data_Owner, OM) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(IM, Data_Owner_All_Tokens, MM_W) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ hh_store_hit;
+ o_scheduleUseTimeout;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(SM, Data_Shared) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(SM, Data_Owner, OM) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(SM, Data_Owner_All_Tokens, MM_W) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ hh_store_hit;
+ o_scheduleUseTimeout;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition({IM, SM}, Transient_GETX, IM) {
+ t_sendAckWithCollectedTokens;
+ m_popRequestQueue;
+ }
+
+ transition({IM, SM}, Transient_GETS) {
+ m_popRequestQueue;
+ }
+
+ transition({IM, SM}, Request_Timeout) {
+ j_unsetReissueTimer;
+ a_issueRequest;
+ }
+
+ // Transitions from OM
+
+ transition(OM, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(OM, Ack_All_Tokens, MM_W) {
+ q_updateTokensFromResponse;
+ hh_store_hit;
+ o_scheduleUseTimeout;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(OM, Data_Shared) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(OM, Data_Shared_All_Tokens, MM_W) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ hh_store_hit;
+ o_scheduleUseTimeout;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(OM, Request_Timeout) {
+ j_unsetReissueTimer;
+ a_issueRequest;
+ }
+
+ // Transitions from IS
+
+ transition(IS, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Data_Shared, S) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_load_hit;
+ s_deallocateTBE;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Data_Owner, O) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_load_hit;
+ s_deallocateTBE;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Data_Owner_All_Tokens, M_W) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_load_hit;
+ o_scheduleUseTimeout;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Request_Timeout) {
+ j_unsetReissueTimer;
+ a_issueRequest;
+ }
+
+ // Transitions from I_L
+
+ transition(I_L, Load, IS_L) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I_L, Ifetch, IS_L) {
+ pp_allocateL1ICacheBlock;
+ i_allocateTBE;
+ a_issueRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I_L, Store, IM_L) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+
+ // Transitions from S_L
+
+ transition(S_L, Store, SM_L) {
+ i_allocateTBE;
+ a_issueRequest;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ // Other transitions from *_L states
+
+ transition({I_L, IM_L, IS_L, S_L, SM_L}, {Transient_GETS, Transient_GETX}) {
+ m_popRequestQueue;
+ }
+
+ transition({I_L, IM_L, IS_L, S_L, SM_L}, Ack) {
+ g_bounceResponseToStarver;
+ n_popResponseQueue;
+ }
+
+ transition({I_L, IM_L, S_L, SM_L}, {Data_Shared, Data_Owner}) {
+ g_bounceResponseToStarver;
+ n_popResponseQueue;
+ }
+
+ transition({I_L, S_L}, Data_Owner_All_Tokens) {
+ g_bounceResponseToStarver;
+ n_popResponseQueue;
+ }
+
+ transition(IS_L, Request_Timeout) {
+ j_unsetReissueTimer;
+ a_issueRequest;
+ }
+
+ transition({IM_L, SM_L}, Request_Timeout) {
+ j_unsetReissueTimer;
+ a_issueRequest;
+ }
+
+ // Opportunisticly Complete the memory operation in the following
+ // cases. Note: these transitions could just use
+ // g_bounceResponseToStarver, but if we have the data and tokens, we
+ // might as well complete the memory request while we have the
+ // chance (and then immediately forward on the data)
+
+ transition(IM_L, Data_Owner_All_Tokens, I_L) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ hh_store_hit;
+ ee_sendDataWithAllTokens;
+ s_deallocateTBE;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(SM_L, Data_Owner_All_Tokens, S_L) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ hh_store_hit;
+ ff_sendDataWithAllButOneTokens;
+ s_deallocateTBE;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(IS_L, Data_Shared, I_L) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_load_hit;
+ s_deallocateTBE;
+ e_sendAckWithCollectedTokens;
+ j_unsetReissueTimer;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(IS_L, {Data_Owner, Data_Owner_All_Tokens}, I_L) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_load_hit;
+ ee_sendDataWithAllTokens;
+ s_deallocateTBE;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ // Own_Lock_or_Unlock
+
+ transition(I_L, Own_Lock_or_Unlock, I) {
+ l_popPersistentQueue;
+ }
+
+ transition(S_L, Own_Lock_or_Unlock, S) {
+ l_popPersistentQueue;
+ }
+
+ transition(IM_L, Own_Lock_or_Unlock, IM) {
+ l_popPersistentQueue;
+ }
+
+ transition(IS_L, Own_Lock_or_Unlock, IS) {
+ l_popPersistentQueue;
+ }
+
+ transition(SM_L, Own_Lock_or_Unlock, SM) {
+ l_popPersistentQueue;
+ }
+}
diff --git a/src/mem/protocol/MOESI_SMP_token-dir.sm b/src/mem/protocol/MOESI_SMP_token-dir.sm
new file mode 100644
index 000000000..5464a25fc
--- /dev/null
+++ b/src/mem/protocol/MOESI_SMP_token-dir.sm
@@ -0,0 +1,405 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MOESI_token-dir.sm 1.5 04/11/17 14:07:50-06:00 mikem@emperor15.cs.wisc.edu $
+ */
+
+machine(Directory, "Token protocol") {
+
+ MessageBuffer responseFromDir, network="To", virtual_network="0", ordered="false";
+
+ MessageBuffer responseToDir, network="From", virtual_network="0", ordered="false";
+ MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false";
+ MessageBuffer persistentToDir, network="From", virtual_network="2", ordered="true";
+
+
+ // STATES
+ enumeration(State, desc="Directory states", default="Directory_State_O") {
+ // Base states
+ O, desc="Owner";
+ NO, desc="Not Owner";
+ L, desc="Locked";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ GETX, desc="A GETX arrives";
+ GETS, desc="A GETS arrives";
+ Lockdown, desc="A lockdown request arrives";
+ Unlockdown, desc="An un-lockdown request arrives";
+ Data_Owner, desc="Data arrive, includes the owner token";
+ Data_Shared, desc="Data arrive, does not include the owner token";
+ Ack, desc="Tokens arrive";
+ Ack_Owner, desc="Tokens arrive, including the owner token";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ State DirectoryState, desc="Directory state";
+ DataBlock DataBlk, desc="data for the block";
+ int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+
+ PersistentTable persistentTable, constructor_hack="i";
+
+ State getState(Address addr) {
+ return directory[addr].DirectoryState;
+ }
+
+ void setState(Address addr, State state) {
+ directory[addr].DirectoryState := state;
+
+ if (state == State:L) {
+ assert(directory[addr].Tokens == 0);
+ }
+
+ // Make sure the token count is in range
+ assert(directory[addr].Tokens >= 0);
+ assert(directory[addr].Tokens <= max_tokens());
+
+ if (state == State:O) {
+ assert(directory[addr].Tokens >= 1); // Must have at least one token
+ assert(directory[addr].Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
+ }
+ }
+
+ // ** OUT_PORTS **
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+
+ // ** IN_PORTS **
+
+ in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
+ if (persistentNetwork_in.isReady()) {
+ peek(persistentNetwork_in, PersistentMsg) {
+
+ // Apply the lockdown or unlockdown message to the table
+ if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
+ persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
+ } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
+ persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
+ } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
+ persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
+ } else {
+ error("Invalid message");
+ }
+
+ // React to the message based on the current state of the table
+ if (persistentTable.isLocked(in_msg.Address)) {
+ trigger(Event:Lockdown, in_msg.Address); // locked
+ } else {
+ trigger(Event:Unlockdown, in_msg.Address); // unlocked
+ }
+ }
+ }
+ }
+
+ in_port(requestNetwork_in, RequestMsg, requestToDir) {
+ if (requestNetwork_in.isReady()) {
+ peek(requestNetwork_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:GETX, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(responseNetwork_in, ResponseMsg, responseToDir) {
+ if (responseNetwork_in.isReady()) {
+ peek(responseNetwork_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
+ trigger(Event:Data_Owner, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Ack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ trigger(Event:Data_Shared, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
+ trigger(Event:Ack_Owner, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+
+ // Actions
+
+ action(a_sendTokens, "a", desc="Send tokens to requestor") {
+ // Only send a message if we have tokens to send
+ if (directory[address].Tokens > 0) {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.Tokens := directory[in_msg.Address].Tokens;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ directory[address].Tokens := 0;
+ }
+ }
+
+ action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
+ // Only send a message if we have tokens to send
+ if (directory[address].Tokens > 0) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.Tokens := directory[address].Tokens;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ directory[address].Tokens := 0;
+ }
+ }
+
+
+ action(d_sendDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ assert(directory[address].Tokens > 0);
+ out_msg.Tokens := directory[in_msg.Address].Tokens;
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ directory[address].Tokens := 0;
+ }
+
+ action(dd_sendDataWithAllTokensToStarver, "\d", desc="Send data and tokens to starver") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.DestMachine := MachineType:L1Cache;
+ assert(directory[address].Tokens > 0);
+ out_msg.Tokens := directory[address].Tokens;
+ out_msg.DataBlk := directory[address].DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ directory[address].Tokens := 0;
+ }
+
+ action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Tokens >= 1);
+ directory[address].Tokens := directory[address].Tokens + in_msg.Tokens;
+ }
+ }
+
+
+ action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
+ requestNetwork_in.dequeue();
+ }
+
+ action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
+ responseNetwork_in.dequeue();
+ }
+
+ action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
+ persistentNetwork_in.dequeue();
+ }
+
+ action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ action(n_checkIncomingMsg, "n", desc="Check incoming token message") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
+ assert(in_msg.Dirty == false);
+ assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
+ assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
+ }
+ }
+
+ action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
+ peek(responseNetwork_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.MessageSize := in_msg.MessageSize;
+ }
+ }
+ }
+
+ action(s_bounceDatalessOwnerToken, "s", desc="Bounce clean owner token to starving processor") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
+ assert(in_msg.Dirty == false);
+ assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
+
+ // NOTE: The following check would not be valid in a real
+ // implementation. We include the data in the "dataless"
+ // message so we can assert the clean data matches the datablock
+ // in memory
+ assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
+
+ // Bounce the message, but "re-associate" the data and the owner
+ // token. In essence we're converting an ACK_OWNER message to a
+ // DATA_OWNER message, keeping the number of tokens the same.
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ // TRANSITIONS
+
+ // Trans. from O
+ transition(O, GETX, NO) {
+ d_sendDataWithAllTokens;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(O, GETS, NO) {
+ d_sendDataWithAllTokens;
+ // Since we found the owner, no need to forward
+ j_popIncomingRequestQueue;
+ }
+
+ transition(O, Lockdown, L) {
+ dd_sendDataWithAllTokensToStarver;
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(O, {Data_Shared, Ack}) {
+ f_incrementTokens;
+ k_popIncomingResponseQueue;
+ }
+
+ // Trans. from NO
+ transition(NO, GETX) {
+ a_sendTokens;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(NO, GETS) {
+ j_popIncomingRequestQueue;
+ }
+
+ transition(NO, Lockdown, L) {
+ aa_sendTokensToStarver;
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(NO, Data_Owner, O) {
+ m_writeDataToMemory;
+ f_incrementTokens;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(NO, Ack_Owner, O) {
+ n_checkIncomingMsg;
+ f_incrementTokens;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(NO, {Data_Shared, Ack}) {
+ f_incrementTokens;
+ k_popIncomingResponseQueue;
+ }
+
+ // Trans. from L
+ transition(L, {GETX, GETS}) {
+ j_popIncomingRequestQueue;
+ }
+
+ transition(L, Lockdown) {
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(L, {Data_Owner, Data_Shared, Ack}) {
+ r_bounceResponse;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(L, Ack_Owner) {
+ s_bounceDatalessOwnerToken;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(L, Unlockdown, NO) {
+ l_popIncomingPersistentQueue;
+ }
+
+}
diff --git a/src/mem/protocol/MOESI_SMP_token-msg.sm b/src/mem/protocol/MOESI_SMP_token-msg.sm
new file mode 100644
index 000000000..98e27ec02
--- /dev/null
+++ b/src/mem/protocol/MOESI_SMP_token-msg.sm
@@ -0,0 +1,61 @@
+/*
+ * $Id: MOESI_token-msg.sm 1.3 04/06/05 22:43:20-00:00 kmoore@cottons.cs.wisc.edu $
+ *
+ */
+
+//int max_tokens();
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ GETX, desc="Get eXclusive";
+ GETS, desc="Get Shared";
+}
+
+// StarvationType
+enumeration(PersistentRequestType, desc="...") {
+ GETX_PERSISTENT, desc="...";
+ GETS_PERSISTENT, desc="...";
+ DEACTIVATE_PERSISTENT, desc="...";
+}
+
+// CoherenceResponseType
+enumeration(CoherenceResponseType, desc="...") {
+ DATA_OWNER, desc="Data, with the owner token";
+ DATA_SHARED, desc="Data, without the owner token";
+ ACK, desc="ACKnowledgment";
+ ACK_OWNER, desc="ACKnowledgment, includes the clean owner token";
+}
+
+// StarvationMsg
+structure(PersistentMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ PersistentRequestType Type, desc="Type of starvation request";
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest Destination, desc="Destination set";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+// RequestMsg
+structure(RequestMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest Destination, desc="Multicast destination mask";
+ MachineType DestMachine, desc="What component receives the data";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+// ResponseMsg
+structure(ResponseMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
+ MachineID Sender, desc="Node who sent the data";
+ MachineType SenderMachine, desc="What component sent the data";
+ NetDest Destination, desc="Node to whom the data is sent";
+ MachineType DestMachine, desc="What component receives the data";
+ int Tokens, desc="Number of tokens being transfered for this line";
+ DataBlock DataBlk, desc="data for the cache line";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
diff --git a/src/mem/protocol/MOESI_SMP_token.slicc b/src/mem/protocol/MOESI_SMP_token.slicc
new file mode 100644
index 000000000..31822f997
--- /dev/null
+++ b/src/mem/protocol/MOESI_SMP_token.slicc
@@ -0,0 +1,4 @@
+MOESI_SMP_token-msg.sm
+MOESI_SMP_token-cache.sm
+MOESI_SMP_token-dir.sm
+standard_SMP-protocol.sm
diff --git a/src/mem/protocol/MOSI_SMP_bcast-cache.sm b/src/mem/protocol/MOSI_SMP_bcast-cache.sm
new file mode 100644
index 000000000..6512b435a
--- /dev/null
+++ b/src/mem/protocol/MOSI_SMP_bcast-cache.sm
@@ -0,0 +1,1000 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+machine(L1Cache, "MOSI Broadcast Optimized") {
+
+ MessageBuffer addressFromCache, network="To", virtual_network="0", ordered="true";
+ MessageBuffer dataFromCache, network="To", virtual_network="1", ordered="false";
+
+ MessageBuffer addressToCache, network="From", virtual_network="0", ordered="true";
+ MessageBuffer dataToCache, network="From", virtual_network="1", ordered="false";
+
+ // STATES
+
+ enumeration(State, desc="Cache states", default="L1Cache_State_I") {
+ NP, desc="Not Present";
+ I, desc="Idle";
+ S, desc="Shared";
+ O, desc="Owned";
+ M, desc="Modified", format="!b";
+ IS_AD, "IS^AD", desc="idle, issued GETS, have not seen GETS or data yet";
+ IM_AD, "IM^AD", desc="idle, issued GETX, have not seen GETX or data yet";
+ SM_AD, "SM^AD",desc="shared, issued GETX, have not seen GETX or data yet";
+ OM_A, "OM^A",desc="owned, issued GETX, have not seen GETX yet", format="!b";
+
+ IS_A, "IS^A",desc="idle, issued GETS, have not seen GETS, have seen data";
+ IM_A, "IM^A",desc="idle, issued GETX, have not seen GETX, have seen data";
+ SM_A, "SM^A",desc="shared, issued GETX, have not seen GETX, have seen data", format="!b";
+
+ MI_A, "MI^A", desc="modified, issued PUTX, have not seen PUTX yet";
+ OI_A, "OI^A", desc="owned, issued PUTX, have not seen PUTX yet";
+ II_A, "II^A", desc="modified, issued PUTX, have not seen PUTX, then saw other GETX", format="!b";
+
+ IS_D, "IS^D", desc="idle, issued GETS, have seen GETS, have not seen data yet";
+ IS_D_I, "IS^D^I", desc="idle, issued GETS, have seen GETS, have not seen data, then saw other GETX";
+ IM_D, "IM^D", desc="idle, issued GETX, have seen GETX, have not seen data yet";
+ IM_D_O, "IM^D^O", desc="idle, issued GETX, have seen GETX, have not seen data yet, then saw other GETS";
+ IM_D_I, "IM^D^I", desc="idle, issued GETX, have seen GETX, have not seen data yet, then saw other GETX";
+ IM_D_OI, "IM^D^OI", desc="idle, issued GETX, have seen GETX, have not seen data yet, then saw other GETS, then saw other GETX";
+ SM_D, "SM^D", desc="shared, issued GETX, have seen GETX, have not seen data yet";
+ SM_D_O, "SM^D^O", desc="shared, issued GETX, have seen GETX, have not seen data yet, then saw other GETS";
+ }
+
+ // ** EVENTS **
+
+ enumeration(Event, desc="Cache events") {
+ // From processor
+ Load, desc="Load request from the processor";
+ Ifetch, desc="I-fetch request from the processor";
+ Store, desc="Store request from the processor";
+ L1_to_L2, desc="L1 to L2 transfer";
+ L2_to_L1D, desc="L2 to L1-Data transfer";
+ L2_to_L1I, desc="L2 to L1-Instruction transfer";
+ L2_Replacement, desc="L2 Replacement";
+
+ // From Address network
+ Own_GETS, desc="Occurs when we observe our own GETS request in the global order";
+ Own_GET_INSTR, desc="Occurs when we observe our own GETInstr request in the global order";
+ Own_GETX, desc="Occurs when we observe our own GETX request in the global order";
+ Own_PUTX, desc="Occurs when we observe our own PUTX request in the global order", format="!r";
+ Other_GETS, desc="Occurs when we observe a GETS request from another processor";
+ Other_GET_INSTR, desc="Occurs when we observe a GETInstr request from another processor";
+ Other_GETX, desc="Occurs when we observe a GETX request from another processor";
+ Other_PUTX, desc="Occurs when we observe a PUTX request from another processor", format="!r";
+
+ // From Data network
+ Data, desc="Data for this block from the data network";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Address Address, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ NetDest ForwardIDs, desc="IDs of the processors to forward the block";
+ Address ForwardAddress, desc="Address of request for forwarding";
+ }
+
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ TBETable TBEs, template_hack="<L1Cache_TBE>";
+ CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
+ CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
+ CacheMemory L2cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L2"', abstract_chip_ptr="true";
+
+ MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
+ Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+ StoreBuffer storeBuffer, abstract_chip_ptr="true", constructor_hack="i";
+
+ int cache_state_to_int(State state);
+
+ Entry getCacheEntry(Address addr), return_by_ref="yes" {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory[addr];
+ } else if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory[addr];
+ } else {
+ return L1IcacheMemory[addr];
+ }
+ }
+
+ void changePermission(Address addr, AccessPermission permission) {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory.changePermission(addr, permission);
+ } else if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory.changePermission(addr, permission);
+ } else {
+ return L1IcacheMemory.changePermission(addr, permission);
+ }
+ }
+
+ bool isCacheTagPresent(Address addr) {
+ return (L2cacheMemory.isTagPresent(addr) || L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
+ }
+
+ State getState(Address addr) {
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+ assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+ assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+
+ if(TBEs.isPresent(addr)) {
+ return TBEs[addr].TBEState;
+ } else if (isCacheTagPresent(addr)) {
+ return getCacheEntry(addr).CacheState;
+ }
+ return State:NP;
+ }
+
+ void setState(Address addr, State state) {
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+ assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+ assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+
+ if (TBEs.isPresent(addr)) {
+ TBEs[addr].TBEState := state;
+ }
+
+ if (isCacheTagPresent(addr)) {
+ getCacheEntry(addr).CacheState := state;
+
+ // Set permission
+ if ((state == State:I) || (state == State:MI_A) || (state == State:II_A)) {
+ changePermission(addr, AccessPermission:Invalid);
+ } else if (state == State:S || state == State:O) {
+ changePermission(addr, AccessPermission:Read_Only);
+ } else if (state == State:M) {
+ changePermission(addr, AccessPermission:Read_Write);
+ } else {
+ changePermission(addr, AccessPermission:Busy);
+ }
+ }
+ }
+
+ Event mandatory_request_type_to_event(CacheRequestType type) {
+ if (type == CacheRequestType:LD) {
+ return Event:Load;
+ } else if (type == CacheRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
+ return Event:Store;
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+
+ // ** OUT_PORTS **
+
+ out_port(dataNetwork_out, DataMsg, dataFromCache);
+ out_port(addressNetwork_out, AddressMsg, addressFromCache);
+
+ // ** IN_PORTS **
+
+ // Data Network
+ in_port(dataNetwork_in, DataMsg, dataToCache) {
+ if (dataNetwork_in.isReady()) {
+ peek(dataNetwork_in, DataMsg) {
+ trigger(Event:Data, in_msg.Address);
+ }
+ }
+ }
+
+ // Address Network
+ in_port(addressNetwork_in, AddressMsg, addressToCache) {
+ if (addressNetwork_in.isReady()) {
+ peek(addressNetwork_in, AddressMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ if (in_msg.Requestor == machineID) {
+ trigger(Event:Own_GETS, in_msg.Address);
+ } else {
+ trigger(Event:Other_GETS, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ if (in_msg.Requestor == machineID) {
+ trigger(Event:Own_GETX, in_msg.Address);
+ } else {
+ trigger(Event:Other_GETX, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
+ if (in_msg.Requestor == machineID) {
+ trigger(Event:Own_GET_INSTR, in_msg.Address);
+ } else {
+ trigger(Event:Other_GET_INSTR, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ if (in_msg.Requestor == machineID) {
+ trigger(Event:Own_PUTX, in_msg.Address);
+ } else {
+ trigger(Event:Other_PUTX, in_msg.Address);
+ }
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady()) {
+ peek(mandatoryQueue_in, CacheMsg) {
+
+ // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
+
+ if (in_msg.Type == CacheRequestType:IFETCH) {
+ // ** INSTRUCTION ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, try to write it to the L2
+ if (L2cacheMemory.cacheAvail(in_msg.Address)) {
+ trigger(Event:L1_to_L2, in_msg.Address);
+ } else {
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ } else if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ // not in any L1
+ if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1
+ if (L2cacheMemory.isTagPresent(in_msg.Address)) {
+ // L2 has it (maybe not with the right permissions)
+ trigger(Event:L2_to_L1I, in_msg.Address);
+ } else {
+ // We have room, the L2 doesn't have it, so the L1 fetches the line
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ }
+ } else {
+ // No room in the L1, so we need to make room
+ if (L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.Address))) {
+ // The L2 has room, so we move the line from the L1 to the L2
+ trigger(Event:L1_to_L2, L1IcacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ // The L2 does not have room, so we replace a line from the L2
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.Address)));
+ }
+ }
+ }
+ } else {
+ // *** DATA ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, try to write it to the L2
+ if (L2cacheMemory.cacheAvail(in_msg.Address)) {
+ trigger(Event:L1_to_L2, in_msg.Address);
+ } else {
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ } else if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ // not in any L1
+ if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1
+ if (L2cacheMemory.isTagPresent(in_msg.Address)) {
+ // L2 has it (maybe not with the right permissions)
+ trigger(Event:L2_to_L1D, in_msg.Address);
+ } else {
+ // We have room, the L2 doesn't have it, so the L1 fetches the line
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ }
+ } else {
+ // No room in the L1, so we need to make room
+ if (L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.Address))) {
+ // The L2 has room, so we move the line from the L1 to the L2
+ trigger(Event:L1_to_L2, L1DcacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ // The L2 does not have room, so we replace a line from the L2
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.Address)));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+ action(a_allocateTBE, "a", desc="Allocate TBE with Address=B, ForwardID=null, RetryCount=zero, ForwardIDRetryCount=zero, ForwardProgressBit=unset.") {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ TBEs[address].ForwardIDs.clear();
+
+ // Keep the TBE state consistent with the cache state
+ if (isCacheTagPresent(address)) {
+ TBEs[address].TBEState := getCacheEntry(address).CacheState;
+ }
+ }
+
+ action(c_allocateL1DCacheBlock, "c", desc="Set L1 D-cache tag equal to tag of block B.") {
+ if (L1DcacheMemory.isTagPresent(address) == false) {
+ L1DcacheMemory.allocate(address);
+ }
+ }
+
+ action(c_allocateL1ICacheBlock, "c'", desc="Set L1 I-cache tag equal to tag of block B.") {
+ if (L1IcacheMemory.isTagPresent(address) == false) {
+ L1IcacheMemory.allocate(address);
+ }
+ }
+
+ action(cc_allocateL2CacheBlock, "\c", desc="Set L2 cache tag equal to tag of block B.") {
+ if (L2cacheMemory.isTagPresent(address) == false) {
+ L2cacheMemory.allocate(address);
+ }
+ }
+
+ action(d_deallocateTBE, "d", desc="Deallocate TBE.") {
+ TBEs.deallocate(address);
+ }
+
+ action(e_recordForwardingInfo, "e", desc="Record ID of other processor in ForwardID.") {
+ peek(addressNetwork_in, AddressMsg){
+ TBEs[address].ForwardIDs.add(in_msg.Requestor);
+ TBEs[address].ForwardAddress := in_msg.Address;
+ }
+ }
+
+ action(f_issueGETS, "f", desc="Issue GETS.") {
+ enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.CacheState := cache_state_to_int(getState(address));
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(g_issueGETX, "g", desc="Issue GETX.") {
+ enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.CacheState := cache_state_to_int(getState(address));
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ sequencer.readCallback(address, getCacheEntry(address).DataBlk);
+ }
+
+ action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ sequencer.writeCallback(address, getCacheEntry(address).DataBlk);
+ }
+
+ action(i_popAddressQueue, "i", desc="Pop incoming address queue.") {
+ addressNetwork_in.dequeue();
+ }
+
+ action(j_popDataQueue, "j", desc="Pop incoming data queue.") {
+ dataNetwork_in.dequeue();
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue();
+ }
+
+ action(m_deallocateL1CacheBlock, "m", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L1DcacheMemory.deallocate(address);
+ } else {
+ L1IcacheMemory.deallocate(address);
+ }
+ }
+
+ action(mm_deallocateL2CacheBlock, "\m", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ L2cacheMemory.deallocate(address);
+ }
+
+ action(n_copyFromL1toL2, "n", desc="Copy data block from L1 (I or D) to L2") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L2cacheMemory[address].DataBlk := L1DcacheMemory[address].DataBlk;
+ } else {
+ L2cacheMemory[address].DataBlk := L1IcacheMemory[address].DataBlk;
+ }
+ }
+
+ action(nn_copyFromL2toL1, "\n", desc="Copy data block from L2 to L1 (I or D)") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L1DcacheMemory[address].DataBlk := L2cacheMemory[address].DataBlk;
+ } else {
+ L1IcacheMemory[address].DataBlk := L2cacheMemory[address].DataBlk;
+ }
+ }
+
+ action(o_cacheToForward, "o", desc="Send data from the cache to the processor indicated by ForwardIDs.") {
+ peek(dataNetwork_in, DataMsg){
+ // This has a CACHE_RESPONSE_LATENCY latency because we want to avoid the
+ // timing strangeness that can occur if requests that source the
+ // data from the TBE are faster than data sourced from the cache
+ enqueue(dataNetwork_out, DataMsg, latency="CACHE_RESPONSE_LATENCY"){
+ out_msg.Address := TBEs[address].ForwardAddress;
+ out_msg.Sender := machineID;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Destination := TBEs[address].ForwardIDs;
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ action(p_issuePUTX, "p", desc="Issue PUTX.") {
+ enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.CacheState := cache_state_to_int(getState(address));
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
+ out_msg.Destination.add(machineID); // Back to us
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+
+ action(q_writeDataFromCacheToTBE, "q", desc="Write data from the cache into the TBE.") {
+ TBEs[address].DataBlk := getCacheEntry(address).DataBlk;
+ DEBUG_EXPR(TBEs[address].DataBlk);
+ }
+
+ action(r_cacheToRequestor, "r", desc="Send data from the cache to the requestor") {
+ peek(addressNetwork_in, AddressMsg) {
+ enqueue(dataNetwork_out, DataMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ }
+ }
+
+ action(s_saveDataInTBE, "s", desc="Save data in data field of TBE.") {
+ peek(dataNetwork_in, DataMsg) {
+ TBEs[address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(TBEs[address].DataBlk);
+ }
+ }
+
+ action(t_issueGET_INSTR, "t", desc="Issue GETInstr.") {
+ enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GET_INSTR;
+ out_msg.CacheState := cache_state_to_int(getState(address));
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(w_writeDataFromTBEToCache, "w", desc="Write data from the TBE into the cache.") {
+ getCacheEntry(address).DataBlk := TBEs[address].DataBlk;
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ }
+
+ action(x_profileMiss, "x", desc="Profile the demand miss") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ profile_miss(in_msg, id);
+ }
+ }
+
+ action(y_tbeToReq, "y", desc="Send data from the TBE to the requestor.") {
+ peek(addressNetwork_in, AddressMsg) {
+ enqueue(dataNetwork_out, DataMsg, latency="CACHE_RESPONSE_LATENCY") { // Either this or the PutX should have a real latency
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ // action(z_stall, "z", desc="Cannot be handled right now.") {
+ // Special name recognized as do nothing case
+ // }
+
+ action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
+ mandatoryQueue_in.recycle();
+ }
+
+ // TRANSITIONS
+
+ // Transitions from Idle
+ transition({NP, I}, Load, IS_AD) {
+ f_issueGETS;
+ c_allocateL1DCacheBlock;
+ a_allocateTBE;
+ x_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP, I}, Ifetch, IS_AD) {
+ t_issueGET_INSTR;
+ c_allocateL1ICacheBlock;
+ a_allocateTBE;
+ x_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP, I}, Store, IM_AD) {
+ g_issueGETX;
+ c_allocateL1DCacheBlock;
+ a_allocateTBE;
+ x_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, L2_Replacement) {
+ mm_deallocateL2CacheBlock;
+ }
+
+ transition({NP, I}, { Other_GETS, Other_GET_INSTR, Other_GETX } ) {
+ i_popAddressQueue;
+ }
+
+ // Transitions from Shared
+ transition(S, {Load,Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Store, SM_AD) {
+ g_issueGETX;
+ a_allocateTBE;
+ x_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, L2_Replacement, I) {
+ mm_deallocateL2CacheBlock;
+ }
+
+ transition(S, {Other_GETS, Other_GET_INSTR}) {
+ i_popAddressQueue;
+ }
+
+ transition(S, Other_GETX, I) {
+ i_popAddressQueue;
+ }
+
+ // Transitions from Owned
+ transition(O, {Load,Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, Store, OM_A){
+ g_issueGETX;
+ a_allocateTBE;
+ x_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, L2_Replacement, OI_A) {
+ p_issuePUTX;
+ a_allocateTBE;
+ q_writeDataFromCacheToTBE;// the cache line is now empty
+ mm_deallocateL2CacheBlock;
+ }
+
+ transition(O, {Other_GETS,Other_GET_INSTR}) {
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ transition(O, Other_GETX, I) {
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ // Transitions from Modified
+ transition(M, {Load,Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Store) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, L2_Replacement, MI_A) {
+ p_issuePUTX;
+ a_allocateTBE;
+ q_writeDataFromCacheToTBE;// the cache line is now empty
+ mm_deallocateL2CacheBlock;
+ }
+
+ transition(M, {Other_GETS,Other_GET_INSTR}, O) {
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ transition(M, Other_GETX, I) {
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ // Transitions moving data between the L1 and L2 caches
+
+ transition({I, S, O, M}, L1_to_L2) {
+ cc_allocateL2CacheBlock;
+ n_copyFromL1toL2; // Not really needed for state I
+ m_deallocateL1CacheBlock;
+ }
+
+ transition({I, S, O, M}, L2_to_L1D) {
+ c_allocateL1DCacheBlock;
+ nn_copyFromL2toL1; // Not really needed for state I
+ mm_deallocateL2CacheBlock;
+ }
+
+ transition({I, S, O, M}, L2_to_L1I) {
+ c_allocateL1ICacheBlock;
+ nn_copyFromL2toL1; // Not really needed for state I
+ mm_deallocateL2CacheBlock;
+ }
+
+ // Transitions for Load/Store/Replacement from transient states
+
+ transition({IS_AD, IM_AD, IS_A, IM_A, SM_AD, OM_A, SM_A, IS_D, IS_D_I, IM_D, IM_D_O, IM_D_I, IM_D_OI, SM_D, SM_D_O}, {Load, Ifetch, Store, L2_Replacement, L1_to_L2, L2_to_L1D, L2_to_L1I}) {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({MI_A, OI_A, II_A}, {Load, Ifetch, Store, L2_Replacement, L1_to_L2, L2_to_L1D, L2_to_L1I}) {
+ zz_recycleMandatoryQueue;
+ }
+
+ // Always ignore PUTXs which we are not the owner of
+ transition({NP, I, S, O, M, IS_AD, IM_AD, SM_AD, OM_A, IS_A, IM_A, SM_A, MI_A, OI_A, II_A, IS_D, IS_D_I, IM_D, IM_D_O, IM_D_I, IM_D_OI, SM_D, SM_D_O }, Other_PUTX) {
+ i_popAddressQueue;
+ }
+
+ // transitions from IS_AD
+
+ transition(IS_AD, {Own_GETS,Own_GET_INSTR}, IS_D) {
+ i_popAddressQueue;
+ }
+ transition(IS_AD, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
+ i_popAddressQueue;
+ }
+ transition(IS_AD, Data, IS_A) {
+ s_saveDataInTBE;
+ j_popDataQueue;
+ }
+
+
+ // Transitions from IM_AD
+
+ transition(IM_AD, Own_GETX, IM_D) {
+ i_popAddressQueue;
+ }
+ transition(IM_AD, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
+ i_popAddressQueue;
+ }
+ transition(IM_AD, Data, IM_A) {
+ s_saveDataInTBE;
+ j_popDataQueue;
+ }
+
+ // Transitions from OM_A
+
+ transition(OM_A, Own_GETX, M){
+ hh_store_hit;
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+
+ transition(OM_A, {Other_GETS, Other_GET_INSTR}){
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ transition(OM_A, Other_GETX, IM_AD){
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ transition(OM_A, Data, IM_A) { // if we get data, we know we're going to lose block before we see own GETX
+ s_saveDataInTBE;
+ j_popDataQueue;
+ }
+
+ // Transitions from SM_AD
+
+ transition(SM_AD, Own_GETX, SM_D) {
+ i_popAddressQueue;
+ }
+ transition(SM_AD, {Other_GETS,Other_GET_INSTR}) {
+ i_popAddressQueue;
+ }
+ transition(SM_AD, Other_GETX, IM_AD) {
+ i_popAddressQueue;
+ }
+ transition(SM_AD, Data, SM_A) {
+ s_saveDataInTBE;
+ j_popDataQueue;
+ }
+
+
+ // Transitions from IS_A
+
+ transition(IS_A, {Own_GETS,Own_GET_INSTR}, S) {
+ w_writeDataFromTBEToCache;
+ h_load_hit;
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+ transition(IS_A, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
+ i_popAddressQueue;
+ }
+
+ // Transitions from IM_A
+
+ transition(IM_A, Own_GETX, M) {
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+ transition(IM_A, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
+ i_popAddressQueue;
+ }
+
+ // Transitions from SM_A
+
+ transition(SM_A, Own_GETX, M) {
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+ transition(SM_A, {Other_GETS,Other_GET_INSTR}) {
+ i_popAddressQueue;
+ }
+ transition(SM_A, Other_GETX, IM_A) {
+ i_popAddressQueue;
+ }
+
+
+ // Transitions from MI_A
+
+ transition(MI_A, Own_PUTX, I) {
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+
+ transition(MI_A, {Other_GETS, Other_GET_INSTR}) {
+ y_tbeToReq;
+ i_popAddressQueue;
+ }
+
+ transition(MI_A, Other_GETX, II_A) {
+ y_tbeToReq;
+ i_popAddressQueue;
+ }
+
+ // Transitions from OI_A
+
+ transition(OI_A, Own_PUTX, I) {
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+
+ transition(OI_A, {Other_GETS, Other_GET_INSTR}) {
+ y_tbeToReq;
+ i_popAddressQueue;
+ }
+
+ transition(OI_A, Other_GETX, II_A) {
+ y_tbeToReq;
+ i_popAddressQueue;
+ }
+
+
+ // Transitions from II_A
+
+ transition(II_A, Own_PUTX, I) {
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+
+ transition(II_A, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
+ i_popAddressQueue;
+ }
+
+ // Transitions from IS_D, IS_D_I
+
+ transition({IS_D, IS_D_I}, {Other_GETS,Other_GET_INSTR}) {
+ i_popAddressQueue;
+ }
+ transition(IS_D, Other_GETX, IS_D_I) {
+ i_popAddressQueue;
+ }
+ transition(IS_D_I, Other_GETX) {
+ i_popAddressQueue;
+ }
+ transition(IS_D, Data, S) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ h_load_hit;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ transition(IS_D_I, Data, I) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ h_load_hit;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ // Transitions from IM_D, IM_D_O, IM_D_I, IM_D_OI
+
+ transition( IM_D, {Other_GETS,Other_GET_INSTR}, IM_D_O ) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition( IM_D, Other_GETX, IM_D_I ) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(IM_D_O, {Other_GETS,Other_GET_INSTR} ) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(IM_D_O, Other_GETX, IM_D_OI) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition( {IM_D_I, IM_D_OI}, {Other_GETS, Other_GET_INSTR, Other_GETX} ) {
+ i_popAddressQueue;
+ }
+
+ transition(IM_D, Data, M) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ transition(IM_D_O, Data, O) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ o_cacheToForward;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ transition(IM_D_I, Data, I) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ o_cacheToForward;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ transition(IM_D_OI, Data, I) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ o_cacheToForward;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ // Transitions for SM_D, SM_D_O
+
+ transition(SM_D, {Other_GETS,Other_GET_INSTR}, SM_D_O) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(SM_D, Other_GETX, IM_D_I) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(SM_D_O, {Other_GETS,Other_GET_INSTR}) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(SM_D_O, Other_GETX, IM_D_OI) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(SM_D, Data, M) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ transition(SM_D_O, Data, O) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ o_cacheToForward;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+}
diff --git a/src/mem/protocol/MOSI_SMP_bcast-dir.sm b/src/mem/protocol/MOSI_SMP_bcast-dir.sm
new file mode 100644
index 000000000..7cccaf9d3
--- /dev/null
+++ b/src/mem/protocol/MOSI_SMP_bcast-dir.sm
@@ -0,0 +1,267 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+machine(Directory, "MOSI Broadcast Optimized") {
+
+
+ MessageBuffer addressFromDir, network="To", virtual_network="0", ordered="true";
+ MessageBuffer dataFromDir, network="To", virtual_network="1", ordered="false";
+
+ MessageBuffer addressToDir, network="From", virtual_network="0", ordered="true";
+ MessageBuffer dataToDir, network="From", virtual_network="1", ordered="false";
+
+
+ enumeration(State, desc="Directory states", default="Directory_State_C") {
+ C, desc="Cold - no processor has requested this line";
+ I, desc="Idle";
+ S, desc="Shared";
+ SS, desc="Shared, 2 or more shares";
+ OS, desc="Owned by a cache";
+ OSS, desc="Owned by a cache, present in at least 3 caches";
+ M, desc="Modified", format="!b";
+ }
+
+ // ** EVENTS **
+
+ enumeration(Event, desc="Directory events") {
+ // From Address network
+ OtherAddress, desc="We saw an address msg to someone else";
+ GETS, desc="A GETS arrives";
+ GET_INSTR, desc="A GETInstr arrives";
+ GETX, desc="A GETX arrives", format="!r";
+ PUTX_Owner, desc="A PUTX arrives, requestor is owner";
+ PUTX_NotOwner, desc="A PUTX arrives, requestor is not owner", format="!r";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ State DirectoryState, desc="Directory state";
+ bool DirOwner, default="true", desc="Is dir owner?";
+ MachineID ProcOwner, desc="Processor Owner";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+
+ void profile_request(int cache_state, State directory_state, GenericRequestType request_type);
+
+ State getState(Address addr) {
+ if (directory.isPresent(addr)) {
+ return directory[addr].DirectoryState;
+ }
+ return State:C;
+ }
+
+ void setState(Address addr, State state) {
+ if (directory.isPresent(addr)) {
+ directory[addr].DirectoryState := state;
+ }
+ }
+
+ // ** OUT_PORTS **
+
+ out_port(dataNetwork_out, DataMsg, dataFromDir);
+ out_port(addressNetwork_out, AddressMsg, addressFromDir);
+
+ // ** IN_PORTS **
+
+ // Address Network
+ in_port(addressNetwork_in, AddressMsg, addressToDir) {
+ if (addressNetwork_in.isReady()) {
+ peek(addressNetwork_in, AddressMsg) {
+ if(map_Address_to_Directory(in_msg.Address) != machineID) {
+ trigger(Event:OtherAddress, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
+ trigger(Event:GET_INSTR, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ if (in_msg.Requestor == directory[in_msg.Address].ProcOwner && directory[in_msg.Address].DirOwner == false) {
+ trigger(Event:PUTX_Owner, in_msg.Address);
+ } else {
+ trigger(Event:PUTX_NotOwner, in_msg.Address);
+ }
+ } else {
+ error("unexpected message");
+ }
+ }
+ }
+ }
+
+ // *** ACTIONS ***
+
+ action(d_sendDataMsg, "d", desc="Send data message to requestor") {
+ peek(addressNetwork_in, AddressMsg) {
+ enqueue(dataNetwork_out, DataMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := in_msg.Address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ DEBUG_EXPR(in_msg.Requestor);
+ DEBUG_EXPR(out_msg.DataBlk);
+ }
+ }
+ }
+
+ action(j_popAddressQueue, "j", desc="Pop address queue.") {
+ addressNetwork_in.dequeue();
+ }
+
+ action(p_profile, "p", desc="Profile this transition.") {
+ peek(addressNetwork_in, AddressMsg) {
+ profile_request(in_msg.CacheState, getState(address), convertToGenericType(in_msg.Type));
+ }
+ }
+
+ action(m_setOwnerRequestor, "m", desc="Set owner = requestor") {
+ peek(addressNetwork_in, AddressMsg) {
+ directory[in_msg.Address].ProcOwner := in_msg.Requestor;
+ directory[in_msg.Address].DirOwner := false;
+ }
+ }
+
+ action(r_writeDataFromRequest, "r", desc="Write request data to memory") {
+ peek(addressNetwork_in, AddressMsg) {
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ action(x_setOwnerToDirectory, "x", desc="Set owner equal to the directory"){
+ peek(addressNetwork_in, AddressMsg) {
+ directory[in_msg.Address].DirOwner := true;
+ }
+ }
+
+ // TRANSITIONS
+
+ // Ignore all address and data messages not bound for us
+ transition(C, OtherAddress) {
+ j_popAddressQueue;
+ }
+
+ // PUTX_NotOwner Transitions
+ transition({I, S, SS, OS, OSS, M}, PUTX_NotOwner) {
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ // Transitions from Idle
+ transition({C, I}, {GETS,GET_INSTR}, S) {
+ d_sendDataMsg;
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ transition({C, I}, GETX, M) {
+ d_sendDataMsg;
+ m_setOwnerRequestor;
+ p_profile;
+ j_popAddressQueue
+ }
+
+ // Transitions from Shared
+ transition({S, SS}, {GETS,GET_INSTR}, SS) {
+ d_sendDataMsg;
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ transition({S, SS}, GETX, M) {
+ d_sendDataMsg;
+ m_setOwnerRequestor;
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ // Transitions from Owned
+ transition({OS, OSS}, {GETS,GET_INSTR}, OSS) {
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ transition({OS, OSS}, GETX, M) {
+ m_setOwnerRequestor;
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ transition(OS, PUTX_Owner, S) {
+ x_setOwnerToDirectory;
+ r_writeDataFromRequest;
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ transition(OSS, PUTX_Owner, SS) {
+ x_setOwnerToDirectory;
+ r_writeDataFromRequest;
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ // Transitions from Modified
+ transition(M, {GETS,GET_INSTR}, OS) {
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ transition(M, GETX) {
+ m_setOwnerRequestor;
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ transition(M, PUTX_Owner, I) {
+ x_setOwnerToDirectory;
+ r_writeDataFromRequest;
+ p_profile;
+ j_popAddressQueue;
+ }
+
+}
diff --git a/src/mem/protocol/MOSI_SMP_bcast-msg.sm b/src/mem/protocol/MOSI_SMP_bcast-msg.sm
new file mode 100644
index 000000000..263cf6f18
--- /dev/null
+++ b/src/mem/protocol/MOSI_SMP_bcast-msg.sm
@@ -0,0 +1,79 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ GET_INSTR, desc="Get Instruction";
+ GETS, desc="Get Shared";
+ GETX, desc="Get eXclusive";
+ PUTX, desc="Put eXclusive";
+}
+
+// AddressMsg
+structure(AddressMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ int CacheState, default="1000", desc="Hack to transfer the cache's state for profiling"; // The default of 1000 will generate an error if we forget to set this
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest Destination, desc="Multicast destination mask";
+ DataBlock DataBlk, desc="data for the cache line"; // This is used for PutX and Downgrades only
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+// DataMsg
+structure(DataMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ MachineID Sender, desc="Node who sent the data";
+ // MachineType SenderType, desc="Component who sent data";
+ NetDest Destination, desc="Node to whom the data is sent";
+ MachineType DestMachine, desc="What component receives the data";
+ DataBlock DataBlk, desc="data for the cache line";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+GenericRequestType convertToGenericType(CoherenceRequestType type) {
+ if(type == CoherenceRequestType:PUTX) {
+ return GenericRequestType:PUTX;
+ } else if(type == CoherenceRequestType:GETS) {
+ return GenericRequestType:GETS;
+ } else if(type == CoherenceRequestType:GET_INSTR) {
+ return GenericRequestType:GET_INSTR;
+ } else if(type == CoherenceRequestType:GETX) {
+ return GenericRequestType:GETX;
+ } else {
+ DEBUG_EXPR(type);
+ error("invalid CoherenceRequestType");
+ }
+}
+
diff --git a/src/mem/protocol/MOSI_SMP_bcast.slicc b/src/mem/protocol/MOSI_SMP_bcast.slicc
new file mode 100644
index 000000000..ac8b85d30
--- /dev/null
+++ b/src/mem/protocol/MOSI_SMP_bcast.slicc
@@ -0,0 +1,4 @@
+MOSI_SMP_bcast-msg.sm
+MOSI_SMP_bcast-cache.sm
+MOSI_SMP_bcast-dir.sm
+standard_SMP-protocol.sm
diff --git a/src/mem/protocol/MOSI_SMP_bcast_1level-cache.sm b/src/mem/protocol/MOSI_SMP_bcast_1level-cache.sm
new file mode 100644
index 000000000..b44e502c0
--- /dev/null
+++ b/src/mem/protocol/MOSI_SMP_bcast_1level-cache.sm
@@ -0,0 +1,921 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+machine(L1Cache, "MOSI Broadcast Optimized") {
+
+ MessageBuffer addressFromCache, network="To", virtual_network="0", ordered="true";
+ MessageBuffer dataFromCache, network="To", virtual_network="1", ordered="false";
+
+ MessageBuffer addressToCache, network="From", virtual_network="0", ordered="true";
+ MessageBuffer dataToCache, network="From", virtual_network="1", ordered="false";
+
+ // STATES
+
+ enumeration(State, desc="Cache states", default="L1Cache_State_I") {
+ NP, desc="Not Present";
+ I, desc="Idle";
+ S, desc="Shared";
+ O, desc="Owned";
+ M, desc="Modified", format="!b";
+ IS_AD, "IS^AD", desc="idle, issued GETS, have not seen GETS or data yet";
+ IM_AD, "IM^AD", desc="idle, issued GETX, have not seen GETX or data yet";
+ SM_AD, "SM^AD",desc="shared, issued GETX, have not seen GETX or data yet";
+ OM_A, "OM^A",desc="owned, issued GETX, have not seen GETX yet", format="!b";
+
+ IS_A, "IS^A",desc="idle, issued GETS, have not seen GETS, have seen data";
+ IM_A, "IM^A",desc="idle, issued GETX, have not seen GETX, have seen data";
+ SM_A, "SM^A",desc="shared, issued GETX, have not seen GETX, have seen data", format="!b";
+
+ MI_A, "MI^A", desc="modified, issued PUTX, have not seen PUTX yet";
+ OI_A, "OI^A", desc="owned, issued PUTX, have not seen PUTX yet";
+ II_A, "II^A", desc="modified, issued PUTX, have not seen PUTX, then saw other GETX", format="!b";
+
+ IS_D, "IS^D", desc="idle, issued GETS, have seen GETS, have not seen data yet";
+ IS_D_I, "IS^D^I", desc="idle, issued GETS, have seen GETS, have not seen data, then saw other GETX";
+ IM_D, "IM^D", desc="idle, issued GETX, have seen GETX, have not seen data yet";
+ IM_D_O, "IM^D^O", desc="idle, issued GETX, have seen GETX, have not seen data yet, then saw other GETS";
+ IM_D_I, "IM^D^I", desc="idle, issued GETX, have seen GETX, have not seen data yet, then saw other GETX";
+ IM_D_OI, "IM^D^OI", desc="idle, issued GETX, have seen GETX, have not seen data yet, then saw other GETS, then saw other GETX";
+ SM_D, "SM^D", desc="shared, issued GETX, have seen GETX, have not seen data yet";
+ SM_D_O, "SM^D^O", desc="shared, issued GETX, have seen GETX, have not seen data yet, then saw other GETS";
+ }
+
+ // ** EVENTS **
+
+ enumeration(Event, desc="Cache events") {
+ // From processor
+ Load, desc="Load request from the processor";
+ Ifetch, desc="I-fetch request from the processor";
+ Store, desc="Store request from the processor";
+ Replacement, desc="Replacement";
+ Load_prefetch, desc="Read only prefetch";
+ Store_prefetch, desc="Read write prefetch", format="!r";
+
+ // From Address network
+ Own_GETS, desc="Occurs when we observe our own GETS request in the global order";
+ Own_GET_INSTR, desc="Occurs when we observe our own GETInstr request in the global order";
+ Own_GETX, desc="Occurs when we observe our own GETX request in the global order";
+ Own_PUTX, desc="Occurs when we observe our own PUTX request in the global order", format="!r";
+ Other_GETS, desc="Occurs when we observe a GETS request from another processor";
+ Other_GET_INSTR, desc="Occurs when we observe a GETInstr request from another processor";
+ Other_GETX, desc="Occurs when we observe a GETX request from another processor";
+ Other_PUTX, desc="Occurs when we observe a PUTX request from another processor", format="!r";
+
+ // From Data network
+ Data, desc="Data for this block from the data network";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Address Address, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ NetDest ForwardIDs, desc="IDs of the processors to forward the block";
+ Address ForwardAddress, desc="Address of request for forwarding";
+ bool isPrefetch, desc="Set if this request is a prefetch";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
+ MessageBuffer optionalQueue, ordered="true", abstract_chip_ptr="true";
+ Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+ StoreBuffer storeBuffer, abstract_chip_ptr="true", constructor_hack="i";
+
+
+ TBETable TBEs, template_hack="<L1Cache_TBE>";
+ CacheMemory cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_unified"', abstract_chip_ptr="true";
+
+ int cache_state_to_int(State state);
+
+ State getState(Address addr) {
+ if(TBEs.isPresent(addr)) {
+ return TBEs[addr].TBEState;
+ } else if (cacheMemory.isTagPresent(addr)) {
+ return cacheMemory[addr].CacheState;
+ }
+ return State:NP;
+ }
+
+ void setState(Address addr, State state) {
+ if (TBEs.isPresent(addr)) {
+ TBEs[addr].TBEState := state;
+ }
+ if (cacheMemory.isTagPresent(addr)) {
+ cacheMemory[addr].CacheState := state;
+
+ // Set permission
+ if ((state == State:I) || (state == State:MI_A) || (state == State:II_A)) {
+ cacheMemory.changePermission(addr, AccessPermission:Invalid);
+ } else if (state == State:S || state == State:O) {
+ cacheMemory.changePermission(addr, AccessPermission:Read_Only);
+ } else if (state == State:M) {
+ cacheMemory.changePermission(addr, AccessPermission:Read_Write);
+ } else {
+ cacheMemory.changePermission(addr, AccessPermission:Busy);
+ }
+ }
+ }
+
+ // ** OUT_PORTS **
+
+ out_port(dataNetwork_out, DataMsg, dataFromCache);
+ out_port(addressNetwork_out, AddressMsg, addressFromCache);
+
+ // ** IN_PORTS **
+
+ // Data Network
+ in_port(dataNetwork_in, DataMsg, dataToCache) {
+ if (dataNetwork_in.isReady()) {
+ peek(dataNetwork_in, DataMsg) {
+ trigger(Event:Data, in_msg.Address);
+ }
+ }
+ }
+
+ // Address Network
+ in_port(addressNetwork_in, AddressMsg, addressToCache) {
+ if (addressNetwork_in.isReady()) {
+ peek(addressNetwork_in, AddressMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ if (in_msg.Requestor == machineID) {
+ trigger(Event:Own_GETS, in_msg.Address);
+ } else {
+ trigger(Event:Other_GETS, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ if (in_msg.Requestor == machineID) {
+ trigger(Event:Own_GETX, in_msg.Address);
+ } else {
+ trigger(Event:Other_GETX, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
+ if (in_msg.Requestor == machineID) {
+ trigger(Event:Own_GET_INSTR, in_msg.Address);
+ } else {
+ trigger(Event:Other_GET_INSTR, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ if (in_msg.Requestor == machineID) {
+ trigger(Event:Own_PUTX, in_msg.Address);
+ } else {
+ trigger(Event:Other_PUTX, in_msg.Address);
+ }
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady()) {
+ peek(mandatoryQueue_in, CacheMsg) {
+ if (cacheMemory.cacheAvail(in_msg.Address) == false) {
+ trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ if (in_msg.Type == CacheRequestType:LD) {
+ trigger(Event:Load, in_msg.Address);
+ } else if (in_msg.Type == CacheRequestType:IFETCH) {
+ trigger(Event:Ifetch, in_msg.Address);
+ } else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
+ trigger(Event:Store, in_msg.Address);
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+ }
+ }
+ }
+
+ // Optional Queue
+ in_port(optionalQueue_in, CacheMsg, optionalQueue, desc="...") {
+ if (optionalQueue_in.isReady()) {
+ peek(optionalQueue_in, CacheMsg) {
+ if (cacheMemory.cacheAvail(in_msg.Address) == false) {
+ trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ if ((in_msg.Type == CacheRequestType:LD) || (in_msg.Type == CacheRequestType:IFETCH)) {
+ trigger(Event:Load_prefetch, in_msg.Address);
+ } else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
+ trigger(Event:Store_prefetch, in_msg.Address);
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+ action(a_allocateTBE, "a", desc="Allocate TBE with Address=B, ForwardID=null, RetryCount=zero, ForwardIDRetryCount=zero, ForwardProgressBit=unset.") {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ TBEs[address].isPrefetch := false;
+ TBEs[address].ForwardIDs.clear();
+
+ // Keep the TBE state consistent with the cache state
+ if (cacheMemory.isTagPresent(address)) {
+ TBEs[address].TBEState := cacheMemory[address].CacheState;
+ }
+ }
+
+
+ action(b_setPrefetchBit, "b", desc="Set prefetch bit in TBE.") {
+ TBEs[address].isPrefetch := true;
+ }
+
+ action(c_allocateCacheBlock, "c", desc="Set cache tag equal to tag of block B.") {
+ if (cacheMemory.isTagPresent(address) == false) {
+ cacheMemory.allocate(address);
+ }
+ }
+
+ action(d_deallocateTBE, "d", desc="Deallocate TBE.") {
+ TBEs.deallocate(address);
+ }
+
+ action(e_recordForwardingInfo, "e", desc="Record ID of other processor in ForwardID.") {
+ peek(addressNetwork_in, AddressMsg){
+ TBEs[address].ForwardIDs.add(in_msg.Requestor);
+ TBEs[address].ForwardAddress := in_msg.Address;
+ }
+ }
+
+ action(f_issueGETS, "f", desc="Issue GETS.") {
+ enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.CacheState := cache_state_to_int(getState(address));
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(g_issueGETX, "g", desc="Issue GETX.") {
+ enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.CacheState := cache_state_to_int(getState(address));
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
+ DEBUG_EXPR(cacheMemory[address].DataBlk);
+ if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
+ // Non-prefetch
+ sequencer.readCallback(address, cacheMemory[address].DataBlk);
+ } else {
+ // Prefetch - don't call back
+ }
+ }
+
+ action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
+ DEBUG_EXPR(cacheMemory[address].DataBlk);
+ if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
+ // Non-prefetch
+ sequencer.writeCallback(address, cacheMemory[address].DataBlk);
+ } else {
+ // Prefetch - don't call back
+ }
+ }
+
+ action(i_popAddressQueue, "i", desc="Pop incoming address queue.") {
+ addressNetwork_in.dequeue();
+ }
+
+ action(j_popDataQueue, "j", desc="Pop incoming data queue.") {
+ dataNetwork_in.dequeue();
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue();
+ }
+
+ action(l_popOptionalQueue, "l", desc="Pop optional queue.") {
+ optionalQueue_in.dequeue();
+ }
+
+
+ action(o_cacheToForward, "o", desc="Send data from the cache to the processor indicated by ForwardIDs.") {
+ peek(dataNetwork_in, DataMsg){
+ // This has a CACHE_RESPONSE_LATENCY latency because we want to avoid the
+ // timing strangeness that can occur if requests that source the
+ // data from the TBE are faster than data sourced from the cache
+ enqueue(dataNetwork_out, DataMsg, latency="CACHE_RESPONSE_LATENCY"){
+ out_msg.Address := TBEs[address].ForwardAddress;
+ out_msg.Sender := machineID;
+ out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.Destination := TBEs[address].ForwardIDs;
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ action(p_issuePUTX, "p", desc="Issue PUTX.") {
+ enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.CacheState := cache_state_to_int(getState(address));
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
+ out_msg.Destination.add(machineID); // Back to us
+ out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+
+ action(q_writeDataFromCacheToTBE, "q", desc="Write data from the cache into the TBE.") {
+ TBEs[address].DataBlk := cacheMemory[address].DataBlk;
+ DEBUG_EXPR(TBEs[address].DataBlk);
+ }
+
+ action(r_cacheToRequestor, "r", desc="Send data from the cache to the requestor") {
+ peek(addressNetwork_in, AddressMsg) {
+ enqueue(dataNetwork_out, DataMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ DEBUG_EXPR(cacheMemory[address].DataBlk);
+ }
+ }
+
+
+ action(s_saveDataInTBE, "s", desc="Save data in data field of TBE.") {
+ peek(dataNetwork_in, DataMsg) {
+ TBEs[address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(TBEs[address].DataBlk);
+ }
+ }
+
+ action(t_issueGET_INSTR, "t", desc="Issue GETInstr.") {
+ enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GET_INSTR;
+ out_msg.CacheState := cache_state_to_int(getState(address));
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(w_writeDataFromTBEToCache, "w", desc="Write data from the TBE into the cache.") {
+ cacheMemory[address].DataBlk := TBEs[address].DataBlk;
+ DEBUG_EXPR(cacheMemory[address].DataBlk);
+ }
+
+ action(y_tbeToReq, "y", desc="Send data from the TBE to the requestor.") {
+ peek(addressNetwork_in, AddressMsg) {
+ enqueue(dataNetwork_out, DataMsg, latency="CACHE_RESPONSE_LATENCY") { // Either this or the PutX should have a real latency
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ action(ff_deallocateCacheBlock, "\f", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
+ cacheMemory.deallocate(address);
+ }
+
+ action(z_stall, "z", desc="Cannot be handled right now.") {
+ // Special name recognized as do nothing case
+ }
+
+ // TRANSITIONS
+
+ // Transitions from Idle
+ transition({NP, I}, Load, IS_AD) {
+ f_issueGETS;
+ c_allocateCacheBlock;
+ a_allocateTBE;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP, I}, Ifetch, IS_AD) {
+ t_issueGET_INSTR;
+ c_allocateCacheBlock;
+ a_allocateTBE;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP, I}, Load_prefetch, IS_AD) {
+ f_issueGETS;
+ c_allocateCacheBlock;
+ a_allocateTBE;
+ b_setPrefetchBit;
+ l_popOptionalQueue;
+ }
+
+ transition({NP, I}, Store, IM_AD) {
+ g_issueGETX;
+ c_allocateCacheBlock;
+ a_allocateTBE;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP, I}, Store_prefetch, IM_AD) {
+ g_issueGETX;
+ c_allocateCacheBlock;
+ a_allocateTBE;
+ b_setPrefetchBit;
+ l_popOptionalQueue;
+ }
+
+ transition(I, Replacement) {
+ ff_deallocateCacheBlock; // the cache line is now in NotPresent
+ }
+
+ transition({NP, I}, { Other_GETS, Other_GET_INSTR, Other_GETX } ) {
+ i_popAddressQueue;
+ }
+
+ // Transitions from Shared
+ transition(S, {Load,Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Load_prefetch) {
+ l_popOptionalQueue;
+ }
+
+ transition(S, Store, SM_AD) {
+ g_issueGETX;
+ a_allocateTBE;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Store_prefetch, IM_AD) {
+ g_issueGETX;
+ a_allocateTBE;
+ b_setPrefetchBit; // Must be after allocate TBE
+ l_popOptionalQueue;
+ }
+
+ transition(S, Replacement, I) {
+ ff_deallocateCacheBlock; // the cache line is now in NotPresent
+ }
+
+ transition(S, {Other_GETS, Other_GET_INSTR}) {
+ i_popAddressQueue;
+ }
+
+ transition(S, Other_GETX, I) {
+ i_popAddressQueue;
+ }
+
+ // Transitions from Owned
+ transition(O, {Load,Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, Store, OM_A){
+ g_issueGETX;
+ a_allocateTBE;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, Load_prefetch) {
+ l_popOptionalQueue;
+ }
+
+ transition(O, Store_prefetch, OM_A) {
+ g_issueGETX;
+ a_allocateTBE;
+ b_setPrefetchBit;
+ l_popOptionalQueue;
+ }
+
+ transition(O, Replacement, OI_A) {
+ p_issuePUTX;
+ a_allocateTBE;
+ q_writeDataFromCacheToTBE;// the cache line is now empty
+ ff_deallocateCacheBlock; // the cache line is now in NotPresent
+ }
+
+ transition(O, {Other_GETS,Other_GET_INSTR}) {
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ transition(O, Other_GETX, I) {
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ // Transitions from Modified
+ transition(M, {Load,Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Store) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, {Load_prefetch,Store_prefetch}) {
+ l_popOptionalQueue;
+ }
+
+ transition(M, Replacement, MI_A) {
+ p_issuePUTX;
+ a_allocateTBE;
+ q_writeDataFromCacheToTBE;// the cache line is now empty
+ ff_deallocateCacheBlock; // the cache line is now in NotPresent
+ }
+
+ transition(M, {Other_GETS,Other_GET_INSTR}, O) {
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ transition(M, Other_GETX, I) {
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+
+ // Transitions for Load/Store/Replacement from transient states
+
+ transition({IS_AD, IM_AD, IS_A, IM_A, SM_AD, OM_A, SM_A, IS_D, IS_D_I, IM_D, IM_D_O, IM_D_I, IM_D_OI, SM_D, SM_D_O}, {Load, Ifetch, Store, Replacement}) {
+ z_stall;
+ }
+
+ transition({IS_AD, IM_AD, IS_A, IM_A, SM_AD, OM_A, SM_A, IS_D, IM_D, IM_D_O, SM_D, SM_D_O}, Load_prefetch) {
+ l_popOptionalQueue;
+ }
+
+ transition({IS_D_I, IM_D_I, IM_D_OI}, Load_prefetch) {
+ z_stall;
+ }
+
+ transition({IM_AD, SM_AD, OM_A, IM_A, SM_A, IM_D, SM_D}, Store_prefetch) {
+ l_popOptionalQueue;
+ }
+
+ transition({IS_AD, IS_A, IS_D, IS_D_I, IM_D_O, IM_D_I, IM_D_OI, SM_D_O}, Store_prefetch) {
+ z_stall;
+ }
+
+ transition({MI_A, OI_A, II_A}, {Load, Ifetch, Store, Load_prefetch, Store_prefetch, Replacement}) {
+ z_stall;
+ }
+
+ // Always ignore PUTXs which we are not the owner of
+ transition({NP, I, S, O, M, IS_AD, IM_AD, SM_AD, OM_A, IS_A, IM_A, SM_A, MI_A, OI_A, II_A, IS_D, IS_D_I, IM_D, IM_D_O, IM_D_I, IM_D_OI, SM_D, SM_D_O }, Other_PUTX) {
+ i_popAddressQueue;
+ }
+
+ // transitions from IS_AD
+
+ transition(IS_AD, {Own_GETS,Own_GET_INSTR}, IS_D) {
+ i_popAddressQueue;
+ }
+ transition(IS_AD, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
+ i_popAddressQueue;
+ }
+ transition(IS_AD, Data, IS_A) {
+ s_saveDataInTBE;
+ j_popDataQueue;
+ }
+
+
+ // Transitions from IM_AD
+
+ transition(IM_AD, Own_GETX, IM_D) {
+ i_popAddressQueue;
+ }
+ transition(IM_AD, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
+ i_popAddressQueue;
+ }
+ transition(IM_AD, Data, IM_A) {
+ s_saveDataInTBE;
+ j_popDataQueue;
+ }
+
+ // Transitions from OM_A
+
+ transition(OM_A, Own_GETX, M){
+ hh_store_hit;
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+
+ transition(OM_A, {Other_GETS, Other_GET_INSTR}){
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ transition(OM_A, Other_GETX, IM_AD){
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ transition(OM_A, Data, IM_A) { // if we get data, we know we're going to lose block before we see own GETX
+ s_saveDataInTBE;
+ j_popDataQueue;
+ }
+
+ // Transitions from SM_AD
+
+ transition(SM_AD, Own_GETX, SM_D) {
+ i_popAddressQueue;
+ }
+ transition(SM_AD, {Other_GETS,Other_GET_INSTR}) {
+ i_popAddressQueue;
+ }
+ transition(SM_AD, Other_GETX, IM_AD) {
+ i_popAddressQueue;
+ }
+ transition(SM_AD, Data, SM_A) {
+ s_saveDataInTBE;
+ j_popDataQueue;
+ }
+
+
+ // Transitions from IS_A
+
+ transition(IS_A, {Own_GETS,Own_GET_INSTR}, S) {
+ w_writeDataFromTBEToCache;
+ h_load_hit;
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+ transition(IS_A, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
+ i_popAddressQueue;
+ }
+
+ // Transitions from IM_A
+
+ transition(IM_A, Own_GETX, M) {
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+ transition(IM_A, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
+ i_popAddressQueue;
+ }
+
+ // Transitions from SM_A
+
+ transition(SM_A, Own_GETX, M) {
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+ transition(SM_A, {Other_GETS,Other_GET_INSTR}) {
+ i_popAddressQueue;
+ }
+ transition(SM_A, Other_GETX, IM_A) {
+ i_popAddressQueue;
+ }
+
+
+ // Transitions from MI_A
+
+ transition(MI_A, Own_PUTX, I) {
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+
+ transition(MI_A, {Other_GETS, Other_GET_INSTR}) {
+ y_tbeToReq;
+ i_popAddressQueue;
+ }
+
+ transition(MI_A, Other_GETX, II_A) {
+ y_tbeToReq;
+ i_popAddressQueue;
+ }
+
+ // Transitions from OI_A
+
+ transition(OI_A, Own_PUTX, I) {
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+
+ transition(OI_A, {Other_GETS, Other_GET_INSTR}) {
+ y_tbeToReq;
+ i_popAddressQueue;
+ }
+
+ transition(OI_A, Other_GETX, II_A) {
+ y_tbeToReq;
+ i_popAddressQueue;
+ }
+
+
+ // Transitions from II_A
+
+ transition(II_A, Own_PUTX, I) {
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+
+ transition(II_A, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
+ i_popAddressQueue;
+ }
+
+ // Transitions from IS_D, IS_D_I
+
+ transition({IS_D, IS_D_I}, {Other_GETS,Other_GET_INSTR}) {
+ i_popAddressQueue;
+ }
+ transition(IS_D, Other_GETX, IS_D_I) {
+ i_popAddressQueue;
+ }
+ transition(IS_D_I, Other_GETX) {
+ i_popAddressQueue;
+ }
+ transition(IS_D, Data, S) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ h_load_hit;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ transition(IS_D_I, Data, I) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ h_load_hit;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ // Transitions from IM_D, IM_D_O, IM_D_I, IM_D_OI
+
+ transition( IM_D, {Other_GETS,Other_GET_INSTR}, IM_D_O ) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition( IM_D, Other_GETX, IM_D_I ) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(IM_D_O, {Other_GETS,Other_GET_INSTR} ) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(IM_D_O, Other_GETX, IM_D_OI) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition( {IM_D_I, IM_D_OI}, {Other_GETS, Other_GET_INSTR, Other_GETX} ) {
+ i_popAddressQueue;
+ }
+
+ transition(IM_D, Data, M) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ transition(IM_D_O, Data, O) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ o_cacheToForward;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ transition(IM_D_I, Data, I) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ o_cacheToForward;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ transition(IM_D_OI, Data, I) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ o_cacheToForward;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ // Transitions for SM_D, SM_D_O
+
+ transition(SM_D, {Other_GETS,Other_GET_INSTR}, SM_D_O) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(SM_D, Other_GETX, IM_D_I) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(SM_D_O, {Other_GETS,Other_GET_INSTR}) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(SM_D_O, Other_GETX, IM_D_OI) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(SM_D, Data, M) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ transition(SM_D_O, Data, O) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ o_cacheToForward;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+}
diff --git a/src/mem/protocol/MOSI_SMP_bcast_1level.slicc b/src/mem/protocol/MOSI_SMP_bcast_1level.slicc
new file mode 100644
index 000000000..d683743a3
--- /dev/null
+++ b/src/mem/protocol/MOSI_SMP_bcast_1level.slicc
@@ -0,0 +1,4 @@
+MOSI_SMP_bcast-msg.sm
+MOSI_SMP_bcast_1level-cache.sm
+MOSI_SMP_bcast-dir.sm
+standard_1level_SMP-protocol.sm
diff --git a/src/mem/protocol/MOSI_SMP_bcast_m-dir.sm b/src/mem/protocol/MOSI_SMP_bcast_m-dir.sm
new file mode 100644
index 000000000..e241ea25c
--- /dev/null
+++ b/src/mem/protocol/MOSI_SMP_bcast_m-dir.sm
@@ -0,0 +1,345 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+machine(Directory, "MOSI Broadcast Optimized") {
+
+
+ MessageBuffer addressFromDir, network="To", virtual_network="0", ordered="true";
+ MessageBuffer dataFromDir, network="To", virtual_network="1", ordered="false";
+
+ MessageBuffer addressToDir, network="From", virtual_network="0", ordered="true";
+ MessageBuffer dataToDir, network="From", virtual_network="1", ordered="false";
+
+
+ enumeration(State, desc="Directory states", default="Directory_State_C") {
+ C, desc="Cold - no processor has requested this line";
+ I, desc="Idle";
+ S, desc="Shared";
+ SS, desc="Shared, 2 or more shares";
+ OS, desc="Owned by a cache";
+ OSS, desc="Owned by a cache, present in at least 3 caches";
+ M, desc="Modified", format="!b";
+ }
+
+ // ** EVENTS **
+
+ enumeration(Event, desc="Directory events") {
+ // From Address network
+ OtherAddress, desc="We saw an address msg to someone else";
+ GETS, desc="A GETS arrives";
+ GET_INSTR, desc="A GETInstr arrives";
+ GETX, desc="A GETX arrives", format="!r";
+ PUTX_Owner, desc="A PUTX arrives, requestor is owner";
+ PUTX_NotOwner, desc="A PUTX arrives, requestor is not owner", format="!r";
+ Memory_Data, desc="Fetched data from memory arrives";
+ Memory_Ack, desc="Writeback Ack from memory arrives";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ State DirectoryState, desc="Directory state";
+ bool DirOwner, default="true", desc="Is dir owner?";
+ MachineID ProcOwner, desc="Processor Owner";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+ // to simulate detailed DRAM
+ external_type(MemoryControl, inport="yes", outport="yes") {
+
+ }
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+ MemoryControl memBuffer, constructor_hack="i";
+
+ void profile_request(int cache_state, State directory_state, GenericRequestType request_type);
+
+ State getState(Address addr) {
+ if (directory.isPresent(addr)) {
+ return directory[addr].DirectoryState;
+ }
+ return State:C;
+ }
+
+ void setState(Address addr, State state) {
+ if (directory.isPresent(addr)) {
+ directory[addr].DirectoryState := state;
+ }
+ }
+
+ // ** OUT_PORTS **
+
+ out_port(dataNetwork_out, DataMsg, dataFromDir);
+ out_port(addressNetwork_out, AddressMsg, addressFromDir);
+ out_port(memQueue_out, MemoryMsg, memBuffer);
+
+
+ // ** IN_PORTS **
+
+ // Address Network
+ in_port(addressNetwork_in, AddressMsg, addressToDir) {
+ if (addressNetwork_in.isReady()) {
+ peek(addressNetwork_in, AddressMsg) {
+ if(map_Address_to_Directory(in_msg.Address) != machineID) {
+ trigger(Event:OtherAddress, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
+ trigger(Event:GET_INSTR, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ if (in_msg.Requestor == directory[in_msg.Address].ProcOwner && directory[in_msg.Address].DirOwner == false) {
+ trigger(Event:PUTX_Owner, in_msg.Address);
+ } else {
+ trigger(Event:PUTX_NotOwner, in_msg.Address);
+ }
+ } else {
+ error("unexpected message");
+ }
+ }
+ }
+ }
+
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, memBuffer) {
+ if (memQueue_in.isReady()) {
+ peek(memQueue_in, MemoryMsg) {
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:Memory_Data, in_msg.Address);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:Memory_Ack, in_msg.Address);
+ } else {
+ DEBUG_EXPR(in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // *** ACTIONS ***
+
+ action(d_sendDataMsg, "d", desc="Send data message to requestor") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(dataNetwork_out, DataMsg, latency="1") {
+ out_msg.Address := in_msg.Address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.DestMachine := MachineType:L1Cache;
+ //out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ DEBUG_EXPR(in_msg.OriginalRequestorMachId);
+ DEBUG_EXPR(out_msg.DataBlk);
+ }
+ }
+ }
+
+ action(j_popAddressQueue, "j", desc="Pop address queue.") {
+ addressNetwork_in.dequeue();
+ }
+
+ action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
+ memQueue_in.dequeue();
+ }
+
+ action(p_profile, "p", desc="Profile this transition.") {
+ peek(addressNetwork_in, AddressMsg) {
+ profile_request(in_msg.CacheState, getState(address), convertToGenericType(in_msg.Type));
+ }
+ }
+
+ action(m_setOwnerRequestor, "m", desc="Set owner = requestor") {
+ peek(addressNetwork_in, AddressMsg) {
+ directory[in_msg.Address].ProcOwner := in_msg.Requestor;
+ directory[in_msg.Address].DirOwner := false;
+ }
+ }
+
+ action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
+ peek(addressNetwork_in, AddressMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_READ;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := in_msg.Requestor;
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := in_msg.Prefetch;
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
+ peek(addressNetwork_in, AddressMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_WB;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := in_msg.Requestor;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := in_msg.Prefetch;
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(r_writeDataFromRequest, "r", desc="Write request data to memory") {
+ peek(addressNetwork_in, AddressMsg) {
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ action(x_setOwnerToDirectory, "x", desc="Set owner equal to the directory"){
+ peek(addressNetwork_in, AddressMsg) {
+ directory[in_msg.Address].DirOwner := true;
+ }
+ }
+
+ // TRANSITIONS
+
+ // Ignore all address and data messages not bound for us
+ transition(C, OtherAddress) {
+ j_popAddressQueue;
+ }
+
+ // PUTX_NotOwner Transitions
+ transition({I, S, SS, OS, OSS, M}, PUTX_NotOwner) {
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ // Transitions from Idle
+ transition({C, I}, {GETS,GET_INSTR}, S) {
+ //d_sendDataMsg;
+ qf_queueMemoryFetchRequest;
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ transition({C, I}, GETX, M) {
+ //d_sendDataMsg;
+ qf_queueMemoryFetchRequest;
+ m_setOwnerRequestor;
+ p_profile;
+ j_popAddressQueue
+ }
+
+ // Transitions from Shared
+ transition({S, SS}, {GETS,GET_INSTR}, SS) {
+ //d_sendDataMsg;
+ qf_queueMemoryFetchRequest;
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ transition({S, SS}, GETX, M) {
+ //d_sendDataMsg;
+ qf_queueMemoryFetchRequest;
+ m_setOwnerRequestor;
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ // Transitions from Owned
+ transition({OS, OSS}, {GETS,GET_INSTR}, OSS) {
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ transition({OS, OSS}, GETX, M) {
+ m_setOwnerRequestor;
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ transition(OS, PUTX_Owner, S) {
+ x_setOwnerToDirectory;
+ r_writeDataFromRequest;
+ qw_queueMemoryWBRequest;
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ transition(OSS, PUTX_Owner, SS) {
+ x_setOwnerToDirectory;
+ r_writeDataFromRequest;
+ qw_queueMemoryWBRequest;
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ // Transitions from Modified
+ transition(M, {GETS,GET_INSTR}, OS) {
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ transition(M, GETX) {
+ m_setOwnerRequestor;
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ transition(M, PUTX_Owner, I) {
+ x_setOwnerToDirectory;
+ r_writeDataFromRequest;
+ qw_queueMemoryWBRequest;
+ p_profile;
+ j_popAddressQueue;
+ }
+
+ transition({C, I, S, SS, OS, OSS, M}, Memory_Data) {
+ d_sendDataMsg;
+ l_popMemQueue;
+ }
+
+ transition({C, I, S, SS, OS, OSS, M}, Memory_Ack) {
+ //a_sendAck;
+ l_popMemQueue;
+ }
+
+}
diff --git a/src/mem/protocol/MOSI_SMP_bcast_m.slicc b/src/mem/protocol/MOSI_SMP_bcast_m.slicc
new file mode 100644
index 000000000..090df74d9
--- /dev/null
+++ b/src/mem/protocol/MOSI_SMP_bcast_m.slicc
@@ -0,0 +1,4 @@
+MOSI_SMP_bcast-msg.sm
+MOSI_SMP_bcast-cache.sm
+MOSI_SMP_bcast_m-dir.sm
+standard_SMP-protocol.sm
diff --git a/src/mem/protocol/MOSI_SMP_directory_1level-cache.sm b/src/mem/protocol/MOSI_SMP_directory_1level-cache.sm
new file mode 100644
index 000000000..f780bb473
--- /dev/null
+++ b/src/mem/protocol/MOSI_SMP_directory_1level-cache.sm
@@ -0,0 +1,838 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MOSI_directory_1level-cache.sm 1.18 04/09/07 13:52:52-05:00 mikem@maya.cs.wisc.edu $
+ *
+ */
+
+machine(L1Cache, "MOSI Directory Optimized") {
+
+ MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="false";
+ MessageBuffer responseFromCache, network="To", virtual_network="2", ordered="false";
+
+ MessageBuffer forwardedRequestToCache, network="From", virtual_network="1", ordered="true";
+ MessageBuffer responseToCache, network="From", virtual_network="2", ordered="false";
+
+ // STATES
+ enumeration(State, desc="Cache states", default="L1Cache_State_I") {
+ // Base states
+ I, desc="Idle";
+ S, desc="Shared";
+ O, desc="Owned";
+ M, desc="Modified", format="!b";
+
+ // Transient States
+ MI, desc="modified, issued PUTX, have not seen response yet";
+ OI, desc="owned, issued PUTX, have not seen response yet";
+
+ IS, desc="idle, issued GETS, have not seen response yet";
+ ISI, desc="idle, issued GETS, saw INV, have not seen data for GETS yet", format="!b";
+
+ IM, desc="idle, issued GETX, have not seen response yet";
+ IMI, desc="idle, issued GETX, saw forwarded GETX";
+ IMO, desc="idle, issued GETX, saw forwarded GETS";
+ IMOI, desc="idle, issued GETX, saw forwarded GETS, saw forwarded GETX";
+
+ // Note: OM is a strange state, because it is waiting for the line
+ // to be stolen away, or look like it has been stolen away. The
+ // common case is that we see a forward from the directory that is
+ // really from us, we forwarded the data to our dataqueue, and
+ // everythings works fine.
+
+ OM, desc="owned, issued GETX, have not seen response yet";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ Load, desc="Load request from the processor";
+ Load_prefetch, desc="Load prefetch request from the processor";
+ Ifetch, desc="I-fetch request from the processor";
+ Store_prefetch, desc="Store prefetch request from the processor";
+ Store, desc="Store request from the processor";
+ Replacement, desc="Replacement", format="!r";
+
+ Forwarded_GETS, "Forwarded GETS", desc="Directory forwards GETS to us";
+ Forwarded_GETX, "Forwarded GETX", desc="Directory forwards GETX to us";
+ INV, "INV", desc="Invalidation", format="!r";
+
+ Proc_ack, "Proc ack", desc="Ack from proc";
+ Proc_last_ack, "Proc last ack", desc="Last ack", format="!r";
+
+ Data_ack_0, "Data ack 0", desc="Data with ack count = 0";
+ Data_ack_not_0, "Data ack not 0", desc="Data with ack count != 0 (but haven't seen all acks first";
+ Data_ack_not_0_last, "Data ack not 0 last", desc="Data with ack count != 0 after having received all acks";
+
+ Dir_WB_ack, "WB ack", desc="Writeback ack from dir";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Address Address, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ int NumPendingAcks, desc="Number of acks that this processor is waiting for";
+ NetDest ForwardGetS_IDs, desc="Set of the processors to forward the block";
+ MachineID ForwardGetX_ID, desc="ID of the processor to forward the block";
+ int ForwardGetX_AckCount, desc="Number of acks the GetX we are forwarded needs";
+ bool isPrefetch, desc="Set if this was caused by a prefetch";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
+ MessageBuffer optionalQueue, ordered="false", abstract_chip_ptr="true";
+ Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+ StoreBuffer storeBuffer, abstract_chip_ptr="true", constructor_hack="i";
+
+ TBETable TBEs, template_hack="<L1Cache_TBE>";
+ CacheMemory cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_unified L1"', abstract_chip_ptr="true";
+
+ State getState(Address addr) {
+ if(TBEs.isPresent(addr)) {
+ return TBEs[addr].TBEState;
+ } else if (cacheMemory.isTagPresent(addr)) {
+ return cacheMemory[addr].CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(Address addr, State state) {
+ if (TBEs.isPresent(addr)) {
+ TBEs[addr].TBEState := state;
+ }
+ if (cacheMemory.isTagPresent(addr)) {
+ cacheMemory[addr].CacheState := state;
+
+ // Set permission
+ if ((state == State:I) || (state == State:MI) || (state == State:OI)) {
+ cacheMemory.changePermission(addr, AccessPermission:Invalid);
+ } else if (state == State:S || state == State:O) {
+ cacheMemory.changePermission(addr, AccessPermission:Read_Only);
+ } else if (state == State:M) {
+ cacheMemory.changePermission(addr, AccessPermission:Read_Write);
+ } else {
+ cacheMemory.changePermission(addr, AccessPermission:Busy);
+ }
+ }
+ }
+
+ // ** OUT_PORTS **
+
+ out_port(requestNetwork_out, RequestMsg, requestFromCache);
+ out_port(responseNetwork_out, ResponseMsg, responseFromCache);
+
+ // ** IN_PORTS **
+
+ // Response Network
+ in_port(responseNetwork_in, ResponseMsg, responseToCache) {
+ if (responseNetwork_in.isReady()) {
+ peek(responseNetwork_in, ResponseMsg) {
+ if(in_msg.Type == CoherenceResponseType:DATA) {
+ if(in_msg.NumPendingAcks == 0) {
+ trigger(Event:Data_ack_0, in_msg.Address);
+ } else {
+ if(in_msg.NumPendingAcks + TBEs[in_msg.Address].NumPendingAcks != 0) {
+ trigger(Event:Data_ack_not_0, in_msg.Address);
+ } else {
+ trigger(Event:Data_ack_not_0_last, in_msg.Address);
+ }
+ }
+ } else if(in_msg.Type == CoherenceResponseType:ACK) {
+ if(TBEs[in_msg.Address].NumPendingAcks != 1){
+ trigger(Event:Proc_ack, in_msg.Address);
+ } else {
+ trigger(Event:Proc_last_ack, in_msg.Address);
+ }
+ }
+ }
+ }
+ }
+
+ // Forwarded Request network
+ in_port(forwardedRequestNetwork_in, RequestMsg, forwardedRequestToCache) {
+ if(forwardedRequestNetwork_in.isReady()) {
+ peek(forwardedRequestNetwork_in, RequestMsg) {
+ if(in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:Forwarded_GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:Forwarded_GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:INV) {
+ trigger(Event:INV, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
+ trigger(Event:Dir_WB_ack, in_msg.Address);
+ } else {
+ error("Invalid forwarded request type");
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady()) {
+ peek(mandatoryQueue_in, CacheMsg) {
+ if (cacheMemory.cacheAvail(in_msg.Address) == false) {
+ trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ if (in_msg.Type == CacheRequestType:LD) {
+ trigger(Event:Load, in_msg.Address);
+ } else if (in_msg.Type == CacheRequestType:IFETCH) {
+ trigger(Event:Ifetch, in_msg.Address);
+ } else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
+ trigger(Event:Store, in_msg.Address);
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+ }
+ }
+ }
+
+ // Optional Queue
+ in_port(optionalQueue_in, CacheMsg, optionalQueue, desc="...") {
+ if (optionalQueue_in.isReady()) {
+ peek(optionalQueue_in, CacheMsg) {
+ if (cacheMemory.cacheAvail(in_msg.Address) == false) {
+ trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ if (in_msg.Type == CacheRequestType:LD) {
+ trigger(Event:Load_prefetch, in_msg.Address);
+ } else if (in_msg.Type == CacheRequestType:IFETCH) {
+ trigger(Event:Load_prefetch, in_msg.Address);
+ } else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
+ trigger(Event:Store_prefetch, in_msg.Address);
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+
+ action(a_issueGETS, "a", desc="Issue GETS") {
+ enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(b_issueGETX, "b", desc="Issue GETX") {
+ enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(d_issuePUTX, "d", desc="Issue PUTX") {
+ enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+
+ action(e_dataFromCacheToRequestor, "e", desc="Send data from cache to requestor") {
+ peek(forwardedRequestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.NumPendingAcks := in_msg.NumPendingAcks; // Needed when in state O and we see a GetX
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ action(g_allocateCacheBlock, "g", desc="Allocate cache block") {
+ if (cacheMemory.isTagPresent(address) == false) {
+ cacheMemory.allocate(address);
+ }
+ }
+
+ action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
+ DEBUG_EXPR(cacheMemory[address].DataBlk);
+ if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
+ // Non-prefetch
+ sequencer.readCallback(address, cacheMemory[address].DataBlk);
+ } else {
+ // Prefetch - don't call back
+ }
+ }
+
+ action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
+ DEBUG_EXPR(cacheMemory[address].DataBlk);
+ if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
+ // Non-prefetch
+ sequencer.writeCallback(address, cacheMemory[address].DataBlk);
+ } else {
+ // Prefetch - don't call back
+ }
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ TBEs[address].NumPendingAcks := 0; // default value
+ TBEs[address].isPrefetch := false;
+ TBEs[address].ForwardGetS_IDs.clear();
+ }
+
+ action(j_setPrefetchBit, "j", desc="Set prefetch bit") {
+ TBEs[address].isPrefetch := true;
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue();
+ }
+
+ action(l_popForwardedRequestQueue, "l", desc="Pop incoming forwarded request queue") {
+ forwardedRequestNetwork_in.dequeue();
+ }
+
+ action(m_popOptionalQueue, "m", desc="Pop optional queue") {
+ optionalQueue_in.dequeue();
+ }
+
+ action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
+ responseNetwork_in.dequeue();
+ }
+
+ action(p_addNumberOfPendingAcks, "p", desc="Add number of pending acks to TBE") {
+ peek(responseNetwork_in, ResponseMsg) {
+ DEBUG_EXPR(TBEs[address].NumPendingAcks);
+ TBEs[address].NumPendingAcks := TBEs[address].NumPendingAcks + in_msg.NumPendingAcks;
+ DEBUG_EXPR(in_msg.NumPendingAcks);
+ DEBUG_EXPR(TBEs[address].NumPendingAcks);
+ }
+ }
+
+ action(q_decrementNumberOfPendingAcks, "q", desc="Decrement number of pending invalidations by one") {
+ DEBUG_EXPR(TBEs[address].NumPendingAcks);
+ TBEs[address].NumPendingAcks := TBEs[address].NumPendingAcks - 1;
+ DEBUG_EXPR(TBEs[address].NumPendingAcks);
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ }
+
+ action(t_sendAckToInvalidator, "t", desc="Send ack to invalidator") {
+ peek(forwardedRequestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.NumPendingAcks := 0;
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+
+ action(u_writeDataToCache, "u", desc="Write data to cache") {
+ peek(responseNetwork_in, ResponseMsg) {
+ cacheMemory[address].DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
+ TBEs[address].DataBlk := cacheMemory[address].DataBlk;
+ }
+
+ action(y_dataFromTBEToRequestor, "y", desc="Send data from TBE to requestor") {
+ peek(forwardedRequestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.NumPendingAcks := in_msg.NumPendingAcks; // Needed when in state MS and we see a GetX
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ action(z_stall, "z", desc="Stall") {
+ }
+
+ action(dd_recordGetSForwardID, "\d", desc="Record forwarded GetS for future forwarding") {
+ peek(forwardedRequestNetwork_in, RequestMsg) {
+ TBEs[address].ForwardGetS_IDs.add(in_msg.Requestor);
+ }
+ }
+
+ action(ee_dataFromCacheToGetSForwardIDs, "\e", desc="Send data from cache to GetS ForwardIDs") {
+ // FIXME - In some cases this should be from the TBE, not the cache.
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination := TBEs[address].ForwardGetS_IDs;
+ out_msg.DestMachine := MachineType:L1Cache;
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.NumPendingAcks := 0;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+
+ action(ff_deallocateCacheBlock, "\f", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
+ cacheMemory.deallocate(address);
+ }
+
+ action(gg_dataFromCacheToGetXForwardID, "\g", desc="Send data from cache to GetX ForwardID") {
+ // FIXME - In some cases this should be from the TBE, not the cache.
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(TBEs[address].ForwardGetX_ID);
+ out_msg.DestMachine := MachineType:L1Cache;
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.NumPendingAcks := TBEs[address].ForwardGetX_AckCount;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+
+ action(ii_recordGetXForwardID, "\i", desc="Record forwarded GetX and ack count for future forwarding") {
+ peek(forwardedRequestNetwork_in, RequestMsg) {
+ TBEs[address].ForwardGetX_ID := in_msg.Requestor;
+ TBEs[address].ForwardGetX_AckCount := in_msg.NumPendingAcks;
+ }
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // Transitions for Load/Store/Prefetch/Replacement from transient states
+ transition({OM, OI, IS, ISI, IM, IMO, IMOI, IMI, MI}, {Load, Load_prefetch, Ifetch, Store, Store_prefetch, Replacement}) {
+ z_stall;
+ }
+
+ // Transitions from Idle
+ transition(I, {Load,Ifetch}, IS) {
+ g_allocateCacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, {Load_prefetch}, IS) {
+ g_allocateCacheBlock;
+ i_allocateTBE;
+ j_setPrefetchBit;
+ a_issueGETS;
+ m_popOptionalQueue;
+ }
+
+ transition(I, Store, IM) {
+ g_allocateCacheBlock;
+ i_allocateTBE;
+ b_issueGETX;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, Store_prefetch, IM) {
+ g_allocateCacheBlock;
+ i_allocateTBE;
+ j_setPrefetchBit;
+ b_issueGETX;
+ m_popOptionalQueue;
+ }
+
+ transition(I, Replacement) {
+ ff_deallocateCacheBlock;
+ }
+
+ transition(I, INV) {
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ // Transitions from Shared
+ transition({S, O}, {Load,Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition({S, O, M}, Load_prefetch) {
+ m_popOptionalQueue;
+ }
+
+ transition(S, Store, IM) {
+ i_allocateTBE;
+ b_issueGETX;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Store_prefetch, IM) {
+ i_allocateTBE;
+ j_setPrefetchBit;
+ b_issueGETX;
+ m_popOptionalQueue;
+ }
+
+ transition(S, Replacement, I) {
+ ff_deallocateCacheBlock;
+ }
+
+ transition(S, INV, I) {
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ // Transitions from Modified
+ transition(M, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Store) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Store_prefetch) {
+ m_popOptionalQueue;
+ }
+
+ transition(M, Replacement, MI) {
+ i_allocateTBE;
+ d_issuePUTX;
+ x_copyDataFromCacheToTBE;
+ ff_deallocateCacheBlock;
+ }
+
+ transition(M, Forwarded_GETS, O) {
+ e_dataFromCacheToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(M, Forwarded_GETX, I) {
+ e_dataFromCacheToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ // Transitions from O
+ transition(O, Store, OM) {
+ i_allocateTBE;
+ b_issueGETX;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, Store_prefetch, OM) {
+ i_allocateTBE;
+ j_setPrefetchBit;
+ b_issueGETX;
+ m_popOptionalQueue;
+ }
+
+ transition(O, Replacement, OI){
+ i_allocateTBE;
+ d_issuePUTX;
+ x_copyDataFromCacheToTBE;
+ ff_deallocateCacheBlock;
+ }
+
+ transition(O, Forwarded_GETS) {
+ e_dataFromCacheToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(O, Forwarded_GETX, I) {
+ e_dataFromCacheToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ // transitions from OI
+
+ transition(OI, Forwarded_GETS) {
+ y_dataFromTBEToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(OI, Forwarded_GETX) {
+ y_dataFromTBEToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(OI, Dir_WB_ack, I) {
+ s_deallocateTBE;
+ l_popForwardedRequestQueue;
+ }
+
+ // Transitions from IS
+
+ transition(IS, INV, ISI) {
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(IS, Data_ack_0, S) {
+ u_writeDataToCache;
+ h_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // transitions from ISI
+
+ // in ISI, could get data from the Proc whose GETX caused INV to go from IS to ISI
+ // or, could get data from Dir if Dir's data lost race to Dir's INV
+ // or, could get data from Dir, if my GETS took forever to get to Dir, and the GETX
+ // processor already wrote it back
+
+ transition(ISI, Data_ack_0, I) {
+ u_writeDataToCache;
+ h_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(ISI, INV) {
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ // Transitions from IM
+
+ transition(IM, INV) { // do not need to go to IMI, since INV is for earlier epoch
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ transition({IM, IMO}, Forwarded_GETS, IMO) {
+ dd_recordGetSForwardID;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(IM, Forwarded_GETX, IMI) {
+ ii_recordGetXForwardID;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(IM, {Data_ack_0, Data_ack_not_0_last}, M) {
+ u_writeDataToCache;
+ hh_store_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IM, Data_ack_not_0) {
+ u_writeDataToCache;
+ p_addNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IM, Proc_ack) {
+ q_decrementNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IM, Proc_last_ack, M) {
+ hh_store_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // transitions from IMO
+
+ transition(IMO, Forwarded_GETX, IMOI) {
+ ii_recordGetXForwardID;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(IMO, {Data_ack_0, Data_ack_not_0_last}, O) {
+ u_writeDataToCache;
+ hh_store_hit;
+ ee_dataFromCacheToGetSForwardIDs;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMO, Data_ack_not_0) {
+ u_writeDataToCache;
+ p_addNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMO, Proc_ack) {
+ q_decrementNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMO, Proc_last_ack, O) {
+ hh_store_hit;
+ ee_dataFromCacheToGetSForwardIDs;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // transitions from IMI
+
+ transition(IMI, {Data_ack_0, Data_ack_not_0_last}, I) {
+ u_writeDataToCache;
+ hh_store_hit;
+ gg_dataFromCacheToGetXForwardID;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMI, Data_ack_not_0) {
+ u_writeDataToCache;
+ p_addNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMI, Proc_ack) {
+ q_decrementNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMI, Proc_last_ack, I) {
+ hh_store_hit;
+ gg_dataFromCacheToGetXForwardID;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // transitions from IMOI
+
+ transition(IMOI, {Data_ack_0, Data_ack_not_0_last}, I) {
+ u_writeDataToCache;
+ hh_store_hit;
+ ee_dataFromCacheToGetSForwardIDs;
+ gg_dataFromCacheToGetXForwardID;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMOI, Data_ack_not_0) {
+ u_writeDataToCache;
+ p_addNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMOI, Proc_ack) {
+ q_decrementNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMOI, Proc_last_ack, I) {
+ hh_store_hit;
+ ee_dataFromCacheToGetSForwardIDs;
+ gg_dataFromCacheToGetXForwardID;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // Transitions from OM
+ transition(OM, Proc_ack) {
+ q_decrementNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(OM, Forwarded_GETS) {
+ e_dataFromCacheToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(OM, Forwarded_GETX, IM) {
+ e_dataFromCacheToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ // Transitions from MI
+
+ transition(MI, Forwarded_GETS) {
+ y_dataFromTBEToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(MI, Forwarded_GETX) {
+ y_dataFromTBEToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(MI, Dir_WB_ack, I) {
+ s_deallocateTBE;
+ l_popForwardedRequestQueue;
+ }
+}
diff --git a/src/mem/protocol/MOSI_SMP_directory_1level-dir.sm b/src/mem/protocol/MOSI_SMP_directory_1level-dir.sm
new file mode 100644
index 000000000..fa91b1baa
--- /dev/null
+++ b/src/mem/protocol/MOSI_SMP_directory_1level-dir.sm
@@ -0,0 +1,333 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MOSI_directory-dir.sm 1.14 04/09/07 13:52:52-05:00 mikem@maya.cs.wisc.edu $
+ */
+
+machine(Directory, "MOSI Directory Optimized") {
+
+
+ MessageBuffer forwardedRequestFromDir, network="To", virtual_network="1", ordered="true";
+ MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false";
+ MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false";
+
+ // STATES
+ enumeration(State, desc="Directory states", default="Directory_State_I") {
+ // Base states
+ I, desc="Idle";
+ S, desc="Shared";
+ O, desc="Owned";
+ M, desc="Modified", format="!b";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ GETS, desc="A GETS arrives";
+ GETX_Owner, desc="A GETX arrives, requestor is owner";
+ GETX_NotOwner, desc="A GETX arrives, requestor is not owner";
+ PUTX_Owner, "PUTX (requestor is owner)", desc="A PUTX arrives, requestor is owner";
+ PUTX_NotOwner, "PUTX (requestor not owner)",desc="A PUTX arrives, requestor is not owner";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ State DirectoryState, desc="Directory state";
+ NetDest Sharers, desc="Set of sharers";
+ bool DirOwner, default="true", desc="Is dir owner?";
+ MachineID ProcOwner, desc="Processor owner";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+
+ State getState(Address addr) {
+ if (directory.isPresent(addr)) {
+ return directory[addr].DirectoryState;
+ }
+ return State:I;
+ }
+
+ void setState(Address addr, State state) {
+ if (directory.isPresent(addr)) {
+ directory[addr].DirectoryState := state;
+ }
+ }
+
+ // ** OUT_PORTS **
+ out_port(forwardedRequestNetwork_out, RequestMsg, forwardedRequestFromDir);
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+
+ // ** IN_PORTS **
+
+ in_port(requestNetwork_in, RequestMsg, requestToDir) {
+ if (requestNetwork_in.isReady()) {
+ peek(requestNetwork_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ if(directory[in_msg.Address].DirOwner == false && in_msg.Requestor == directory[in_msg.Address].ProcOwner) {
+ trigger(Event:GETX_Owner, in_msg.Address);
+ } else {
+ trigger(Event:GETX_NotOwner, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ if (directory[in_msg.Address].DirOwner == false && in_msg.Requestor == directory[in_msg.Address].ProcOwner) {
+ trigger(Event:PUTX_Owner, in_msg.Address);
+ } else {
+ trigger(Event:PUTX_NotOwner, in_msg.Address);
+ }
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+
+ // Actions
+
+ // a_addRequestorToSharers
+
+ action(a_addRequestorToSharers, "a", desc="Add requestor to list of sharers") {
+ peek(requestNetwork_in, RequestMsg) {
+ directory[address].Sharers.add(in_msg.Requestor);
+ DEBUG_EXPR(directory[address].Sharers);
+ }
+ }
+
+ // b_dataToRequestor
+
+ action(b_dataToRequestor, "b", desc="Send data to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ if(in_msg.Type == CoherenceRequestType:GETX) {
+ out_msg.NumPendingAcks := directory[address].Sharers.count();
+ } else {
+ out_msg.NumPendingAcks := 0; // don't need to send pending ack count to GETS requestor
+ }
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.DataBlk := directory[address].DataBlk;
+ DEBUG_EXPR(out_msg.NumPendingAcks);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ // d_forwardRequestToOwner
+
+ action(d_forwardRequestToOwner, "d", desc="Forward request to owner") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(directory[address].ProcOwner);
+ DEBUG_EXPR(out_msg.Destination);
+
+ if(in_msg.Type == CoherenceRequestType:GETX) {
+ out_msg.NumPendingAcks := directory[address].Sharers.count();
+ } else {
+ out_msg.NumPendingAcks := 0; // don't need to send pending ack count to GETS requestor
+ }
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+
+ action(f_setOwnerToRequestor, "f", desc="Set owner equal to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ directory[address].ProcOwner := in_msg.Requestor;
+ directory[address].DirOwner := false;
+ }
+ DEBUG_EXPR(directory[address].ProcOwner);
+ }
+
+ action(g_clearSharers, "g", desc="Clear list of sharers") {
+ directory[address].Sharers.clear();
+ }
+
+ // currently done via multicast message
+
+ action(h_invToSharers, "h", desc="Send INVs to all sharers") {
+ peek(requestNetwork_in, RequestMsg) {
+ if(directory[address].Sharers.count() != 0){
+ enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination := directory[address].Sharers;
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+ DEBUG_EXPR(directory[address].Sharers);
+ }
+
+ action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
+ requestNetwork_in.dequeue();
+ }
+
+ action(l_writeRequestDataToMemory, "l", desc="Write PUTX/DWN data to memory") {
+ peek(requestNetwork_in, RequestMsg) {
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ action(n_writebackAckToRequestor, "n", desc="Send WB_ack to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ // This needs to be DIRECTORY_LATENCY to keep the queue fifo
+ enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:WB_ACK;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+
+ action(p_clearOwner, "p", desc="Clear owner") {
+ directory[address].DirOwner := true; // set owner equal to dir
+ }
+
+ action(r_addOwnerToSharers, "r", desc="Add owner to list of sharers") {
+ directory[address].Sharers.add(directory[address].ProcOwner);
+ }
+
+ action(t_removeOwnerFromSharers, "t", desc="Remove owner from list of sharers") {
+ directory[address].Sharers.remove(directory[address].ProcOwner);
+ }
+
+ action(u_removeRequestorFromSharers, "u", desc="Remove requestor from list of sharers") {
+ peek(requestNetwork_in, RequestMsg) {
+ directory[address].Sharers.remove(in_msg.Requestor);
+ }
+ }
+
+ // TRANSITIONS
+
+ transition({I, S, M, O}, PUTX_NotOwner) {
+ n_writebackAckToRequestor;
+ j_popIncomingRequestQueue;
+ }
+
+ // Transitions from Idle
+ transition(I, GETS, S) {
+ a_addRequestorToSharers;
+ b_dataToRequestor;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(I, GETX_NotOwner, M) {
+ f_setOwnerToRequestor;
+ b_dataToRequestor;
+ j_popIncomingRequestQueue;
+ }
+
+ // Transitions from Shared
+ transition(S, GETS) {
+ a_addRequestorToSharers;
+ b_dataToRequestor;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(S, GETX_NotOwner, M) {
+ u_removeRequestorFromSharers;
+ b_dataToRequestor;
+ f_setOwnerToRequestor;
+ h_invToSharers;
+ g_clearSharers;
+ j_popIncomingRequestQueue;
+ }
+
+ // Transitions from Owned
+ transition(O, GETS) {
+ a_addRequestorToSharers;
+ d_forwardRequestToOwner;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(O, {GETX_NotOwner, GETX_Owner}, M) {
+ u_removeRequestorFromSharers;
+ t_removeOwnerFromSharers;
+ d_forwardRequestToOwner;
+ f_setOwnerToRequestor;
+ h_invToSharers;
+ g_clearSharers;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(O, PUTX_Owner, S) {
+ u_removeRequestorFromSharers;
+ l_writeRequestDataToMemory;
+ n_writebackAckToRequestor;
+ p_clearOwner;
+ j_popIncomingRequestQueue;
+ }
+
+
+ // Transitions from Modified
+ transition(M, GETS, O) {
+ a_addRequestorToSharers;
+ r_addOwnerToSharers;
+ d_forwardRequestToOwner;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(M, GETX_NotOwner) {
+ d_forwardRequestToOwner;
+ f_setOwnerToRequestor;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(M, PUTX_Owner, I) {
+ l_writeRequestDataToMemory;
+ n_writebackAckToRequestor;
+ p_clearOwner;
+ j_popIncomingRequestQueue;
+ }
+}
diff --git a/src/mem/protocol/MOSI_SMP_directory_1level-msg.sm b/src/mem/protocol/MOSI_SMP_directory_1level-msg.sm
new file mode 100644
index 000000000..b827ab05e
--- /dev/null
+++ b/src/mem/protocol/MOSI_SMP_directory_1level-msg.sm
@@ -0,0 +1,74 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MOSI_directory-msg.sm 1.9 04/08/09 16:11:38-05:00 mikem@maya.cs.wisc.edu $
+ *
+ */
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ GETX, desc="Get eXclusive";
+ GETS, desc="Get Shared";
+ GET_INSTR, desc="Get Instruction";
+ PUTX, desc="Put eXclusive";
+ INV, desc="INValidate";
+ WB_ACK, desc="Write Back ACKnowledgment";
+}
+
+// CoherenceResponseType
+enumeration(CoherenceResponseType, desc="...") {
+ ACK, desc="ACKnowledgment";
+ NACK, desc="Negative ACKnowledgment";
+ DATA, desc="Data";
+}
+
+// RequestMsg
+structure(RequestMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest Destination, desc="Multicast destination mask";
+ DataBlock DataBlk, desc="Data for the cache line (if PUTX)";
+ int NumPendingAcks, desc="Number of acks to wait for"; // Needed for forwarded responses only
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+// ResponseMsg
+structure(ResponseMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
+ MachineID Sender, desc="Node who sent the data";
+ MachineType SenderMachine, desc="What component sent the data";
+ NetDest Destination, desc="Node to whom the data is sent";
+ MachineType DestMachine, desc="What component receives the data";
+ DataBlock DataBlk, desc="data for the cache line";
+ int NumPendingAcks, desc="Number of acks to wait for";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
diff --git a/src/mem/protocol/MOSI_SMP_directory_1level.slicc b/src/mem/protocol/MOSI_SMP_directory_1level.slicc
new file mode 100644
index 000000000..250c72ae7
--- /dev/null
+++ b/src/mem/protocol/MOSI_SMP_directory_1level.slicc
@@ -0,0 +1,4 @@
+MOSI_SMP_directory_1level-msg.sm
+MOSI_SMP_directory_1level-cache.sm
+MOSI_SMP_directory_1level-dir.sm
+standard_1level_SMP-protocol.sm
diff --git a/src/mem/protocol/MSI_MOSI_CMP_directory-L1cache.sm b/src/mem/protocol/MSI_MOSI_CMP_directory-L1cache.sm
new file mode 100644
index 000000000..c16a2fe80
--- /dev/null
+++ b/src/mem/protocol/MSI_MOSI_CMP_directory-L1cache.sm
@@ -0,0 +1,799 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+
+machine(L1Cache, "MSI Directory L1 Cache CMP") {
+
+ // NODE L1 CACHE
+ // From this node's L1 cache TO the network
+ // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+ MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="true";
+ MessageBuffer dummyFrom1, network="To", virtual_network="1", ordered="false"; // dummy buffer that shouldn't be used
+ MessageBuffer dummyFrom2, network="To", virtual_network="2", ordered="false"; // dummy buffer that shouldn't be used
+ // a local L1 -> this L2 bank
+ MessageBuffer responseFromL1Cache, network="To", virtual_network="3", ordered="false";
+ MessageBuffer dummyFrom4, network="To", virtual_network="4", ordered="false"; // dummy buffer that shouldn't be used
+
+
+ // To this node's L1 cache FROM the network
+ MessageBuffer dummyTo0, network="From", virtual_network="0", ordered="false"; // dummy buffer that shouldn't be used
+ MessageBuffer dummyTo1, network="From", virtual_network="1", ordered="false"; // dummy buffer that shouldn't be used
+ // a L2 bank -> this L1
+ MessageBuffer requestToL1Cache, network="From", virtual_network="2", ordered="true";
+ // a L2 bank -> this L1
+ MessageBuffer responseToL1Cache, network="From", virtual_network="3", ordered="false";
+ MessageBuffer dummyTo4, network="From", virtual_network="4", ordered="false"; // dummy buffer that shouldn't be used
+
+ // STATES
+ enumeration(State, desc="Cache states", default="L1Cache_State_L1_I") {
+ // Base states
+ NP, desc="Not present in either cache";
+ L1_I, desc="a L1 cache entry Idle";
+ L1_S, desc="a L1 cache entry Shared";
+ L1_M, desc="a L1 cache entry Modified", format="!b";
+
+ // Transient States
+ L1_IS, desc="L1 idle, issued GETS, have not seen response yet";
+ L1_ISI, desc="L1 idle, issued GETS, saw INV, still waiting for data";
+ L1_IM, desc="L1 idle, issued GETX, have not seen response yet";
+ L1_IMI, desc="L1 idle, issued GETX, saw INV, still waiting for data";
+ L1_IMS, desc="L1 idle, issued GETX, saw DownGrade, still waiting for data";
+ L1_IMSI, desc="L1 idle, issued GETX, saw DownGrade, saw INV, still waiting for data";
+
+ L1_SI, desc="issued PUTS, waiting for response";
+ L1_MI, desc="issued PUTX, waiting for response";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ // L1 events
+ Load, desc="Load request from the home processor";
+ Ifetch, desc="I-fetch request from the home processor";
+ Store, desc="Store request from the home processor";
+
+ // L1 is required to send response to the L2 immediately
+ L1_INV, "INV", desc="L1 Invalidation of M data", format="!r";
+ L1_INV_S, "INV", desc="L1 Invalidation of S data", format="!r";
+ L1_DownGrade, "Force DownGrade", desc="L2 cache forces an L1 cache in M to downgrade to S and writeback result";
+
+ // receiving of data
+ L1_Data, "Data", desc="Data in response to an L1 request, transistion to M or S depending on request";
+ L1_Data_S, "Data S", desc="Data in response to an L1 request, write data then transistion to S";
+ L1_Data_I, "Data I", desc="Data in response to an L1 request, write data then transistion to I";
+
+ // receiving of acks
+ L1_PutAck, "Put Ack", desc="PutS or PutX ack from L2";
+
+ // internal generated request
+ // L1 request to replace block, results in either a PUTS or PUTX request
+ L1_Replacement, desc="L1 Replacement", format="!r";
+ // Currently same as replacement, request initiated when block is in the wrong L1 cache
+ L1_WriteBack, desc="on-chip L1 cache must write back to shared L2";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
+ State CacheState, desc="cache state";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Address Address, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ bool isPrefetch, desc="Set if this was caused by a prefetch";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
+
+ CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
+ CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
+
+ MessageBuffer mandatoryQueue, ordered="false", rank="100", abstract_chip_ptr="true";
+ // the optionalQueue doesn't have to be ordered for correctness
+ // however inforcing order ensures the prefetches reach the L2 in order
+ MessageBuffer optionalQueue, ordered="true", rank="101", abstract_chip_ptr="true";
+
+ Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+
+ int cache_state_to_int(State state);
+
+ // inclusive cache returns L1 entries only
+ Entry getL1CacheEntry(Address addr), return_by_ref="yes" {
+ if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory[addr];
+ } else {
+ return L1IcacheMemory[addr];
+ }
+ }
+
+ void changeL1Permission(Address addr, AccessPermission permission) {
+ if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory.changePermission(addr, permission);
+ } else if(L1IcacheMemory.isTagPresent(addr)) {
+ return L1IcacheMemory.changePermission(addr, permission);
+ } else {
+ error("cannot change permission, L1 block not present");
+ }
+ }
+
+ bool isL1CacheTagPresent(Address addr) {
+ return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
+ }
+
+ State getState(Address addr) {
+ if((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == true){
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(addr);
+ }
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+
+ if(L1_TBEs.isPresent(addr)) {
+ return L1_TBEs[addr].TBEState;
+ } else if (isL1CacheTagPresent(addr)) {
+ return getL1CacheEntry(addr).CacheState;
+ }
+ return State:NP;
+ }
+
+ string getStateStr(Address addr) {
+ return L1Cache_State_to_string(getState(addr));
+ }
+
+ // when is this called?
+ void setState(Address addr, State state) {
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+
+ // MUST CHANGE
+ if(L1_TBEs.isPresent(addr)) {
+ L1_TBEs[addr].TBEState := state;
+ }
+
+ if (isL1CacheTagPresent(addr)) {
+ getL1CacheEntry(addr).CacheState := state;
+
+ // Set permission
+ if (state == State:L1_I || state == State:L1_SI || state == State:L1_MI) {
+ changeL1Permission(addr, AccessPermission:Invalid);
+ } else if (state == State:L1_S) {
+ changeL1Permission(addr, AccessPermission:Read_Only);
+ } else if (state == State:L1_M) {
+ changeL1Permission(addr, AccessPermission:Read_Write);
+ } else {
+ changeL1Permission(addr, AccessPermission:Busy);
+ }
+ }
+ }
+
+ Event mandatory_request_type_to_event(CacheRequestType type) {
+ if (type == CacheRequestType:LD) {
+ return Event:Load;
+ } else if (type == CacheRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
+ return Event:Store;
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+
+ // ** OUT_PORTS **
+ // All ports are to the same CMP network, queue id numbers determine IntraChip Switch location
+
+ out_port(requestIntraChipL1Network_out, RequestMsg, requestFromL1Cache);
+ out_port(responseIntraChipL1Network_out, ResponseMsg, responseFromL1Cache);
+
+ // ** IN_PORTS **
+ in_port(dummyTo0_in, RequestMsg, dummyTo0) {
+ if (dummyTo0_in.isReady()) {
+ peek(dummyTo0_in, RequestMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(machineID);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.RequestorMachId);
+ }
+ error("dummyTo0 port should not be used");
+ }
+ }
+
+ in_port(dummyTo1_in, RequestMsg, dummyTo1) {
+ if (dummyTo1_in.isReady()) {
+ peek(dummyTo1_in, RequestMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(machineID);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.RequestorMachId);
+ }
+ error("dummyTo1 port should not be used");
+ }
+ }
+
+ in_port(dummyTo4_in, ResponseMsg, dummyTo4) {
+ if (dummyTo4_in.isReady()) {
+ peek(dummyTo4_in, ResponseMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(machineID);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.SenderMachId);
+ }
+ error("dummyTo4 port should not be used");
+ }
+ }
+
+ // Response IntraChip L1 Network - response msg to this L1 cache
+ in_port(responseIntraChipL1Network_in, ResponseMsg, responseToL1Cache) {
+ if (responseIntraChipL1Network_in.isReady()) {
+ peek(responseIntraChipL1Network_in, ResponseMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.Destination);
+ DEBUG_EXPR(in_msg.SenderMachId);
+ DEBUG_EXPR(machineID);
+ assert(in_msg.Destination.isElement(machineID));
+ if(machineIDToMachineType(in_msg.SenderMachId) == MachineType:L2Cache) {
+ if(in_msg.Type == CoherenceResponseType:DATA) {
+ trigger(Event:L1_Data, in_msg.Address); // L1 now has data in its desired state
+ } else if(in_msg.Type == CoherenceResponseType:DATA_S) {
+ trigger(Event:L1_Data_S, in_msg.Address); // L1 now has data but must imediately move to S state
+ } else if(in_msg.Type == CoherenceResponseType:DATA_I) {
+ trigger(Event:L1_Data_I, in_msg.Address); // L1 now has data but must imediately move to INV state
+ } else if(in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:L1_PutAck, in_msg.Address);
+ } else {
+ error("Invalid L1 response type");
+ }
+ } else {
+ error("A non-L2 cache sent a response to a L1 cache");
+ }
+ }
+ }
+ }
+
+ // Request InterChip network - request from this L1 cache to the shared L2
+ in_port(requestIntraChipL1Network_in, RequestMsg, requestToL1Cache) {
+ if(requestIntraChipL1Network_in.isReady()) {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if(machineIDToMachineType(in_msg.RequestorMachId) == MachineType:L2Cache) {
+ if(in_msg.Type == CoherenceRequestType:L1_DG) {
+ trigger(Event:L1_DownGrade, in_msg.Address); // Force L1 to downgrade to S state
+ } else if (in_msg.Type == CoherenceRequestType:INV) {
+ trigger(Event:L1_INV, in_msg.Address); // L1 must invalidate it's modified version
+ } else if (in_msg.Type == CoherenceRequestType:INV_S) {
+ trigger(Event:L1_INV_S, in_msg.Address); // L1 must invalidate it's shared version
+ } else {
+ error("Invalid forwarded request type");
+ }
+ } else {
+ error("A non-L2 cache sent a request to a L1 cache");
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue betweens Node's CPU and it's L1 caches
+ in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady()) {
+ peek(mandatoryQueue_in, CacheMsg) {
+
+ // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
+
+ if (in_msg.Type == CacheRequestType:IFETCH) {
+ // ** INSTRUCTION ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_WriteBack, in_msg.Address);
+ }
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 asks the L2 for it.
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ // No room in the L1, so we need to make room in the L1
+ trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+ } else {
+ // *** DATA ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_WriteBack, in_msg.Address);
+ }
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 ask the L2 for it
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ // No room in the L1, so we need to make room in the L1
+ trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+ action(a_issueGETS, "a", desc="Issue GETS") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.L1CacheStateStr := getStateStr(address);
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(b_issueGETX, "b", desc="Issue GETX") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.RequestorMachId := machineID;
+ DEBUG_EXPR(machineID);
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.L1CacheStateStr := getStateStr(address);
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(c_issueUPGRADE, "c", desc="Issue GETX") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:UPGRADE;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.L1CacheStateStr := getStateStr(address);
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(f_issueGETINSTR, "g", desc="Issue GETINSTR") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GET_INSTR;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.L1CacheStateStr := getStateStr(address);
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(d_issuePUTX, "d", desc="Issue PUTX") {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ DEBUG_EXPR(out_msg.DataBlk);
+ out_msg.MessageSize := MessageSizeType:Data;
+ out_msg.L1CacheStateStr := getStateStr(address);
+ }
+ }
+
+ action(q_issuePUTS, "q", desc="Issue PUTS") {
+ enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTS;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ out_msg.L1CacheStateStr := getStateStr(address);
+ }
+ }
+
+ // L1 responding to a L2 request with data
+ action(e_dataFromL1CacheToL2Cache, "e", desc="Send data from L1 cache to L2 Cache") {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ DEBUG_EXPR(out_msg.DataBlk);
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+
+ action(f_dataFromTBEToL2Cache, "f", desc="Send data from L1_TBE to L2 Cache") {
+ peek(requestIntraChipL1Network_in, RequestMsg) {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.DataBlk := L1_TBEs[in_msg.Address].DataBlk;
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ DEBUG_EXPR(out_msg.DataBlk);
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ // L1 responding to a L2 request with an invadiation ack
+ action(t_sendInvAckToL2Cache, "t", desc="Send Invadiation ack to L2 Cache") {
+ enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:INV_ACK;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ DEBUG_EXPR(address);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
+ DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
+ sequencer.readCallback(address, getL1CacheEntry(address).DataBlk);
+ }
+
+ action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
+ DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
+ sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk);
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
+ check_allocate(L1_TBEs);
+ L1_TBEs.allocate(address);
+ L1_TBEs[address].isPrefetch := false;
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue();
+ }
+
+ action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") {
+ profileMsgDelay(2, requestIntraChipL1Network_in.dequeue_getDelayCycles());
+ }
+
+ action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") {
+ profileMsgDelay(3, responseIntraChipL1Network_in.dequeue_getDelayCycles());
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+ L1_TBEs.deallocate(address);
+ }
+
+ action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
+ peek(responseIntraChipL1Network_in, ResponseMsg) {
+ getL1CacheEntry(address).DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(x_copyDataFromL1CacheToTBE, "x", desc="Copy data from cache to TBE") {
+ L1_TBEs[address].DataBlk := getL1CacheEntry(address).DataBlk;
+ }
+
+ action(z_stall, "z", desc="Stall") {
+ }
+
+ action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L1DcacheMemory.deallocate(address);
+ } else {
+ L1IcacheMemory.deallocate(address);
+ }
+ }
+
+ action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
+ if (L1DcacheMemory.isTagPresent(address) == false) {
+ L1DcacheMemory.allocate(address);
+ }
+ }
+
+ action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
+ if (L1IcacheMemory.isTagPresent(address) == false) {
+ L1IcacheMemory.allocate(address);
+ }
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // Transitions for Load/Store/Replacement/WriteBack from transient states
+ transition({L1_IS, L1_IM, L1_ISI, L1_IMI, L1_IMS, L1_IMSI, L1_SI, L1_MI}, {Load, Ifetch, Store, L1_Replacement, L1_WriteBack}) {
+ z_stall;
+ }
+
+ // Transitions from Idle
+ transition({NP,L1_I}, {L1_Replacement, L1_WriteBack}) {
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition({NP,L1_I}, Load, L1_IS) {
+ oo_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP,L1_I}, Ifetch, L1_IS) {
+ pp_allocateL1ICacheBlock;
+ i_allocateTBE;
+ f_issueGETINSTR;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP,L1_I}, Store, L1_IM) {
+ oo_allocateL1DCacheBlock;
+ i_allocateTBE;
+ b_issueGETX;
+ k_popMandatoryQueue;
+ }
+
+ // Transitions from Shared
+ transition({L1_S}, {Load,Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(L1_S, Store, L1_IM) {
+ i_allocateTBE;
+ c_issueUPGRADE;
+ k_popMandatoryQueue;
+ }
+
+ transition(L1_S, {L1_Replacement,L1_WriteBack}, L1_SI) {
+ i_allocateTBE;
+ q_issuePUTS;
+ x_copyDataFromL1CacheToTBE;
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition(L1_S, L1_INV_S, L1_I) {
+ t_sendInvAckToL2Cache;
+ l_popRequestQueue;
+ }
+
+ // Transitions from Modified
+ transition(L1_M, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(L1_M, Store) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(L1_M, {L1_Replacement, L1_WriteBack}, L1_MI) {
+ i_allocateTBE;
+ d_issuePUTX;
+ x_copyDataFromL1CacheToTBE;
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition(L1_M, L1_INV, L1_I) {
+ e_dataFromL1CacheToL2Cache;
+ l_popRequestQueue;
+ }
+
+ transition(L1_M, L1_DownGrade, L1_S) {
+ e_dataFromL1CacheToL2Cache;
+ l_popRequestQueue;
+ }
+
+ // Transitions from L1_IS
+ transition(L1_IS, L1_INV_S, L1_ISI) {
+ t_sendInvAckToL2Cache;
+ l_popRequestQueue;
+ }
+
+ transition(L1_IS, L1_Data, L1_S) {
+ u_writeDataToL1Cache;
+ h_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L1_IS, L1_Data_I, L1_I) {
+ u_writeDataToL1Cache;
+ h_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // Transitions from L1_ISI
+ transition(L1_ISI, L1_Data, L1_I) {
+ u_writeDataToL1Cache;
+ h_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // Transitions from L1_IM
+ transition(L1_IM, L1_INV, L1_IMI) { // we don't have to respond immediately because we know the data is coming
+ l_popRequestQueue;
+ }
+
+ transition(L1_IM, L1_INV_S) {
+ t_sendInvAckToL2Cache;
+ l_popRequestQueue;
+ }
+
+ transition(L1_IM, L1_DownGrade, L1_IMS) {
+ l_popRequestQueue;
+ }
+
+ transition(L1_IM, L1_Data, L1_M) {
+ u_writeDataToL1Cache;
+ hh_store_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L1_IM, L1_Data_S, L1_S) {
+ u_writeDataToL1Cache;
+ hh_store_hit;
+ s_deallocateTBE;
+ e_dataFromL1CacheToL2Cache;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L1_IM, L1_Data_I, L1_I) {
+ u_writeDataToL1Cache;
+ hh_store_hit;
+ s_deallocateTBE;
+ e_dataFromL1CacheToL2Cache;
+ o_popIncomingResponseQueue;
+ }
+
+ // Transitions from L1_IMI - data should arrive and no request are possilbe
+ transition(L1_IMI, L1_Data, L1_I) {
+ u_writeDataToL1Cache;
+ hh_store_hit;
+ s_deallocateTBE;
+ e_dataFromL1CacheToL2Cache;
+ o_popIncomingResponseQueue;
+ }
+
+ // Transitions from L1_IMS
+ transition(L1_IMS, L1_Data, L1_S) {
+ u_writeDataToL1Cache;
+ hh_store_hit;
+ s_deallocateTBE;
+ e_dataFromL1CacheToL2Cache;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L1_IMS, L1_INV_S, L1_IMSI) {
+ l_popRequestQueue;
+ }
+
+ // Transitions from L1_IMSI
+ transition(L1_IMSI, L1_Data, L1_I) {
+ u_writeDataToL1Cache;
+ hh_store_hit;
+ s_deallocateTBE;
+ e_dataFromL1CacheToL2Cache;
+ o_popIncomingResponseQueue;
+ }
+
+ // Transitions from L1_SI
+ transition(L1_SI, L1_INV_S) {
+ t_sendInvAckToL2Cache;
+ l_popRequestQueue;
+ }
+
+ transition(L1_SI, L1_PutAck, L1_I) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // Transitions from L1_MI
+ transition(L1_MI, L1_INV) {
+ f_dataFromTBEToL2Cache;
+ l_popRequestQueue;
+ }
+
+ transition(L1_MI, L1_DownGrade, L1_SI) {
+ f_dataFromTBEToL2Cache;
+ l_popRequestQueue;
+ }
+
+ transition(L1_MI, L1_PutAck, L1_I) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+}
+
+
+
diff --git a/src/mem/protocol/MSI_MOSI_CMP_directory-L2cache.sm b/src/mem/protocol/MSI_MOSI_CMP_directory-L2cache.sm
new file mode 100644
index 000000000..d68efc819
--- /dev/null
+++ b/src/mem/protocol/MSI_MOSI_CMP_directory-L2cache.sm
@@ -0,0 +1,2191 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+machine(L2Cache, "MOSI Directory L2 Cache CMP") {
+
+ // L2 BANK QUEUES
+ // From local bank of L2 cache TO the network
+ MessageBuffer dummyFrom0, network="To", virtual_network="0", ordered="false"; // dummy buffer that shouldn't be used
+ MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="1", ordered="false"; // this L2 bank -> mod-directory
+ MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="2", ordered="true"; // this L2 bank -> a local L1
+ MessageBuffer responseFromL2Cache, network="To", virtual_network="3", ordered="false"; // this L2 bank -> a local L1 || mod-directory
+ MessageBuffer finalAckFromL2Cache, network="To", virtual_network="4", ordered="false"; // this L2 bank -> mod-directory
+
+ // FROM the network to this local bank of L2 cache
+ //MessageBuffer L1RequestToL2Cache, network="From", virtual_network="1", ordered="true"; // a local L1 -> this L2 bank
+ MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="true"; // a local L1 -> this L2 bank
+ MessageBuffer dummyTo1, network="From", virtual_network="1", ordered="false"; // dummy buffer that shouldn't be used
+ MessageBuffer forwardedRequestToL2Cache, network="From", virtual_network="2", ordered="true"; // mod-directory -> this L2 bank
+ MessageBuffer responseToL2Cache, network="From", virtual_network="3", ordered="false"; // a local L1 || mod-directory -> this L2 bank
+ MessageBuffer dummyTo4, network="From", virtual_network="4", ordered="false"; // dummy buffer that shouldn't be used
+
+ // STATES
+ enumeration(State, desc="L2 Cache states", default="L2Cache_State_L2_NP") {
+ // Base states
+ L2_NP, desc="Not present in either cache";
+ L2_I, desc="L2 cache entry Idle";
+ L2_S, desc="L2 cache entry Shared, not present in any local L1s";
+ L2_O, desc="L2 cache entry Owned, not present in any local L1s";
+ L2_M, desc="L2 cache entry Modified, not present in any L1s", format="!b";
+ L2_SS, desc="L2 cache entry Shared, also present in one or more L1s";
+ L2_SO, desc="L2 cache entry Owned, also present in one or more L1s or ext L2s";
+ L2_MT, desc="L2 cache entry Modified in a local L1, assume L2 copy stale", format="!b";
+
+ // Transient States
+
+ // Transient States from I
+ L2_IS, desc="L2 idle, issued GETS, have not seen response yet";
+ L2_ISZ, desc="L2 idle, issued GETS, saw a L1_GETX, have not seen data for GETS yet", format="!b";
+ L2_ISI, desc="L2 idle, issued GETS, saw INV, have not seen data for GETS yet", format="!b";
+ L2_IMV, desc="L2 idle, issued GETX, valid int L1, have not seen response(s) yet";
+ L2_MV, desc="L2 modified, a valid old L1 copy exist, external world gave write permission";
+ L2_IM, desc="L2 idle, issued GETX, no valid int L1, have not seen response(s) yet";
+ L2_IMO, desc="L2 idle, issued GETX, saw forwarded GETS";
+ L2_IMI, desc="L2 idle, issued GETX, saw forwarded GETX";
+ L2_IMZ, desc="L2 idle, issued GETX, saw another L1_GETX";
+ L2_IMOI, desc="L2 idle, issued GETX, saw GETS, saw forwarded GETX";
+ L2_IMOZ, desc="L2 idle, issued GETX, saw GETS, then a L1_GETX";
+
+ // Invalidation steps for S -> I
+ L2_SIC, desc="L2 shared, L2_INV, valid L1 copies exist, issued invalidates, have not seen responses yet";
+ L2_SIV, desc="L2 shared, L2_Replacement, valid L1 copies exist, issued invalidates, have not seen responses yet";
+
+ // Invalidation steps for M -> I for L2 Repalcement
+ L2_MIV, desc="L2 modified, a valid L1 copy exist, issued forced writeback, have not seen the response yet";
+ L2_MIN, desc="L2 modified, no valid L1 copies, issued PUTX, have not seen response yet";
+
+ // Invalidation steps for M -> I for a Forwarded GetX
+ L2_MIC, desc="L2 modified, a valid L1 copy exist, issued forced writeback, have not seen the response yet";
+
+ // In MT state and see another L1_GETX request
+ L2_MIT, desc="L2 modified, a valid L1 copy exist, saw L1_GETX, issued INV, have not seen the response yet";
+
+ // Downgrade steps for M -> SO
+ L2_MO, desc="L2 modified, a valid L1 copy exist, issued downgrade request, have not seen response yet";
+ L2_MOIC, desc="L2 modified, a valid L1 copy exist, issued downgrade request, saw INV, have not seen response yet";
+ L2_MOICR, desc="L2 modified, a valid L1 copy exist, issued invalidate request, saw INV, have not seen response yet";
+ L2_MOZ, desc="L2 modified, a valid L1 copy exist, issued downgrade request, saw L1_GETX, have not seen response yet";
+
+ // Invalidation steps for O/SO -> I for L2 Replacement
+ L2_OIV, desc="L2 owned, valid L1 copies exist, issued invalidates, have not seen responses yet from L1s";
+ L2_OIN, desc="L2 owned, no valid L1 copies, issued PUTX, have not seen response yet from dir";
+
+ // Invalidation steps for SO -> I for a Forwarded GetX
+ L2_OIC, desc="L2 owned, valid L1 copies exist, issued invalidates, have not seen responses yet from L1s";
+
+ // Strange OM states
+ // Note: strange states, because it is waiting for the line
+ // to be stolen away, or look like it has been stolen away. The
+ // common case is that we see a forward from the directory that is
+ // really from us, we forwarded the data to our dataqueue, and
+ // everythings works fine.
+ L2_OMV, desc="L2 owned and valid L1 copies, issued GETX and invalidates, have not seen responses yet";
+ L2_OM, desc="L2 owned and no valid L1 copies, issued GETX, have not seen response yet";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="L2 Cache events") {
+ // L2 events
+
+ // events initiated by the local L1s
+ L1_GET_INSTR, desc="a L1I GET INSTR request for a block maped to us";
+ L1_GETS, desc="a L1D GETS request for a block maped to us";
+ L1_GETX, desc="a L1D GETX request for a block maped to us";
+ L1_UPGRADE, desc="a L1D UPGRADE request for a block maped to us";
+ L1_UPGRADE_no_others, desc="a L1D UPGRADE request for a block maped to us, requestor is the only on-chip sharer";
+ L1_PUTX, desc="a L1D PUTX request for a block maped to us (L1 replacement of a modified block)";
+ L1_PUTX_last, desc="a L1D PUTX request for a block maped to us (L1 replacement of a modified block) last sharer";
+ L1_PUTX_old, desc="an old L1D PUTX request for a block maped to us (L1 replacement of a modified block)";
+ L1_PUTS, desc="a L1 replacement of a shared block", format="!r";
+ L1_PUTS_last, desc="a L1 replacement of the last local L1 shared block", format="!r";
+ L1_PUTS_old, desc="an old L1 replacement of a shared block", format="!r";
+
+ // events of local L1 responses
+ Proc_int_ack, "Proc on-chip L1 Cache ack", desc="Ack from on-chip L1 Cache";
+ Proc_last_int_ack, "Proc last on-chip L1 Cache ack", desc="Last on-chip L1 Cache ack", format="!r";
+
+ Data_int_ack, "Data int ack", desc="Received modified data from L1 now proceed in handling miss";
+
+ // events initiated by the external L2s
+ Forwarded_GETS, "Forwarded GETS", desc="Directory forwards Inter-chip GETS to us";
+ Forwarded_GET_INSTR, "Forwarded GETINSTR", desc="Inter-chip Forwarded GETINSTR";
+ Forwarded_GETX, "Forwarded GETX", desc="Directory forwards Inter-chip GETX to us";
+ L2_INV, "L2_INV", desc="L2 Invalidation initiated from other L2", format="!r";
+
+ // events initiated by this L2
+ L2_Replacement, desc="L2 Replacement", format="!r";
+
+ // events of external L2 responses
+ Proc_ext_ack, "Proc off-chip ack", desc="Ack from off-chip";
+ Proc_last_ext_ack, "Proc last off-chip ack", desc="Last off-chip ack", format="!r";
+
+ Data_ext_ack_0, "Data ack 0", desc="Data with ack count = 0";
+ Data_ext_ack_not_0, "Data ack not 0", desc="Data with ack count != 0 (but haven't seen all acks first";
+ // Data_ext_ack_not_0_last: is when the requestor has seen all acks but the directory has not, therefore
+ // the directory must be told that we now have the data
+ Data_ext_ack_not_0_last, "Data ack not 0 last", desc="Data with ack count != 0 after having received all acks";
+
+ Dir_WB_ack, "WB ack", desc="Writeback ack from dir";
+ Dir_exe_ack, "Only copy", desc="Directory tells us we already have exclusive permission, go directly to MT state";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ NetDest Sharers, desc="tracks the L1 shares on-chip";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Address Address, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ int NumPendingExtAcks, desc="Number of ext acks that this L2 bank is waiting for";
+ int NumPendingIntAcks, desc="Number of int acks that this L2 bank is waiting for";
+ NetDest Forward_GetS_IDs, desc="Set of the external processors to forward the block";
+ NetDest L1_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
+ MachineID Forward_GetX_ID, desc="ID of the L2 cache to forward the block";
+ MachineID L1_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
+ MachineID InvalidatorID, desc="ID of the L2 cache (needed for L2_SS -> L2_I)";
+ int ForwardGetX_AckCount, desc="Number of acks the GetX we are forwarded needs";
+ bool isPrefetch, desc="Set if this was caused by a prefetch";
+ bool isThreeHop, desc="is this request a three hop";
+ bool validForwardedGetXId, desc="Indicate whether a forwarded GetX ID is valid";
+ bool validInvalidator, desc="Indicate whether an invalidator is valid";
+ bool isInternalRequestOnly, desc="Is internal request only, i.e. only L1s";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ void setMRU(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
+
+ CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)';
+
+ // inclusive cache, returns L2 entries only
+ Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
+ return L2cacheMemory[addr];
+ }
+
+ void changeL2Permission(Address addr, AccessPermission permission) {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory.changePermission(addr, permission);
+ }
+ }
+
+ string getCoherenceRequestTypeStr(CoherenceRequestType type) {
+ return CoherenceRequestType_to_string(type);
+ }
+
+ bool isL2CacheTagPresent(Address addr) {
+ return (L2cacheMemory.isTagPresent(addr));
+ }
+
+ bool isOneSharerLeft(Address addr, MachineID requestor) {
+ assert(L2cacheMemory[addr].Sharers.isElement(requestor));
+ return (L2cacheMemory[addr].Sharers.count() == 1);
+ }
+
+ bool isSharer(Address addr, MachineID requestor) {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory[addr].Sharers.isElement(requestor);
+ } else {
+ return false;
+ }
+ }
+
+ void addSharer(Address addr, MachineID requestor) {
+ DEBUG_EXPR(machineID);
+ DEBUG_EXPR(requestor);
+ DEBUG_EXPR(addr);
+ assert(map_L1CacheMachId_to_L2Cache(addr, requestor) == machineID);
+ L2cacheMemory[addr].Sharers.add(requestor);
+ }
+
+ State getState(Address addr) {
+ if(L2_TBEs.isPresent(addr)) {
+ return L2_TBEs[addr].TBEState;
+ } else if (isL2CacheTagPresent(addr)) {
+ return getL2CacheEntry(addr).CacheState;
+ }
+ return State:L2_NP;
+ }
+
+ string getStateStr(Address addr) {
+ return L2Cache_State_to_string(getState(addr));
+ }
+
+ // when is this called
+ void setState(Address addr, State state) {
+
+ // MUST CHANGE
+ if (L2_TBEs.isPresent(addr)) {
+ L2_TBEs[addr].TBEState := state;
+ }
+
+ if (isL2CacheTagPresent(addr)) {
+ getL2CacheEntry(addr).CacheState := state;
+
+ // Set permission
+ if (state == State:L2_I ||
+ state == State:L2_SIC || state == State:L2_SIV ||
+ state == State:L2_MIV || state == State:L2_MIN || state == State:L2_MIC || state == State:L2_MIT ||
+ state == State:L2_OIV || state == State:L2_OIN || state == State:L2_OIC) {
+ changeL2Permission(addr, AccessPermission:Invalid);
+ } else if (state == State:L2_S || state == State:L2_O || state == State:L2_SS || state == State:L2_SO) {
+ changeL2Permission(addr, AccessPermission:Read_Only);
+ } else if (state == State:L2_OM || state == State:L2_OMV) {
+ changeL2Permission(addr, AccessPermission:ReadUpgradingToWrite);
+ } else if (state == State:L2_M) {
+ changeL2Permission(addr, AccessPermission:Read_Write);
+ } else if (state == State:L2_MT) {
+ changeL2Permission(addr, AccessPermission:Stale);
+ } else {
+ changeL2Permission(addr, AccessPermission:Busy);
+ }
+ }
+ }
+
+ Event L1Cache_request_type_to_event(CoherenceRequestType type, Address addr, MachineID requestor) {
+ if(type == CoherenceRequestType:GETS) {
+ return Event:L1_GETS;
+ } else if(type == CoherenceRequestType:GET_INSTR) {
+ return Event:L1_GET_INSTR;
+ } else if (type == CoherenceRequestType:GETX) {
+ return Event:L1_GETX;
+ } else if (type == CoherenceRequestType:UPGRADE) {
+ if (isSharer(addr, requestor)) {
+ if (isOneSharerLeft(addr, requestor)) {
+ return Event:L1_UPGRADE_no_others;
+ } else {
+ return Event:L1_UPGRADE;
+ }
+ } else { // possible that we removed the line from the L2 before we could process the UPGRADE request
+ return Event:L1_GETX;
+ }
+ } else if (type == CoherenceRequestType:PUTX) {
+ if (isSharer(addr, requestor)) {
+ if (isOneSharerLeft(addr, requestor)) {
+ return Event:L1_PUTX_last;
+ } else {
+ return Event:L1_PUTX;
+ }
+ } else {
+ return Event:L1_PUTX_old;
+ }
+ } else if (type == CoherenceRequestType:PUTS) {
+ if (isSharer(addr, requestor)) {
+ if (isOneSharerLeft(addr, requestor)) {
+ return Event:L1_PUTS_last;
+ } else {
+ return Event:L1_PUTS;
+ }
+ } else { // possible that we removed the line from the L2 before we could process the L1_PUTS request
+ return Event:L1_PUTS_old;
+ }
+ } else {
+ DEBUG_EXPR(addr);
+ DEBUG_EXPR(type);
+ error("Invalid L1 forwarded request type");
+ }
+ }
+
+ // ** OUT_PORTS **
+ // All ports output to the same CMP network, NI determines where to route msg
+
+ out_port(L1RequestIntraChipL2Network_out, RequestMsg, L1RequestFromL2Cache);
+ out_port(DirRequestIntraChipL2Network_out, RequestMsg, DirRequestFromL2Cache);
+ out_port(responseIntraChipL2Network_out, ResponseMsg, responseFromL2Cache);
+ out_port(finalAckIntraChipL2Network_out, ResponseMsg, finalAckFromL2Cache);
+
+ // ** IN_PORTS **
+
+ in_port(dummyTo1_in, RequestMsg, dummyTo1) {
+ if (dummyTo1_in.isReady()) {
+ peek(dummyTo1_in, RequestMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.RequestorMachId);
+ }
+ error("dummyTo1 port should not be used");
+ }
+ }
+
+ in_port(dummyTo4_in, ResponseMsg, dummyTo4) {
+ if (dummyTo4_in.isReady()) {
+ peek(dummyTo4_in, ResponseMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.SenderMachId);
+ }
+ error("dummyTo4 port should not be used");
+ }
+ }
+
+ // Response IntraChip L2 Network - response msg to this particular L2 bank
+ in_port(responseIntraChipL2Network_in, ResponseMsg, responseToL2Cache) {
+ if (responseIntraChipL2Network_in.isReady()) {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.SenderMachId);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(in_msg.NumPendingExtAcks);
+ // test wether it's from a local L1 or an off chip source
+ assert(in_msg.Destination.isElement(machineID));
+ if(machineIDToMachineType(in_msg.SenderMachId) == MachineType:L1Cache) {
+ if(in_msg.Type == CoherenceResponseType:DATA) {
+ if(L2_TBEs[in_msg.Address].NumPendingIntAcks == 1) {
+ trigger(Event:Data_int_ack, in_msg.Address); // L1 now has data and all on-chip acks
+ } else {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(L2_TBEs[in_msg.Address].NumPendingIntAcks);
+ error("Invalid L1 sent data when L2 wasn't expecting it");
+ }
+ } else if(in_msg.Type == CoherenceResponseType:INV_ACK) {
+ if(L2_TBEs.isPresent(in_msg.Address)) { // FIXME - possible to get a L1 ack after the transaction is completed
+ if(L2_TBEs[in_msg.Address].NumPendingIntAcks == 1) {
+ trigger(Event:Proc_last_int_ack, in_msg.Address); // L1 now has all on-chip acks
+ } else {
+ trigger(Event:Proc_int_ack, in_msg.Address); // process on-chip ack
+ }
+ }
+ }
+ } else { // external message
+ if(in_msg.Type == CoherenceResponseType:DATA) {
+ if(in_msg.NumPendingExtAcks == 0) {
+ trigger(Event:Data_ext_ack_0, in_msg.Address); // L2 now has data and all off-chip acks
+ } else {
+ if(in_msg.NumPendingExtAcks + L2_TBEs[in_msg.Address].NumPendingExtAcks != 0) {
+ trigger(Event:Data_ext_ack_not_0, in_msg.Address);
+ } else {
+ trigger(Event:Data_ext_ack_not_0_last, in_msg.Address);
+ }
+ }
+ } else if(in_msg.Type == CoherenceResponseType:ACK) {
+ if(L2_TBEs[in_msg.Address].NumPendingExtAcks != 1){
+ trigger(Event:Proc_ext_ack, in_msg.Address);
+ } else {
+ trigger(Event:Proc_last_ext_ack, in_msg.Address);
+ }
+ }
+ }
+ }
+ } // if not ready, do nothing
+ }
+
+ // Forwarded Request from Directory
+ in_port(forwardedRequestIntraChipL2Network_in, RequestMsg, forwardedRequestToL2Cache) {
+ if(forwardedRequestIntraChipL2Network_in.isReady()) {
+ peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.RequestorMachId);
+ DEBUG_EXPR(in_msg.Type);
+ assert(in_msg.Destination.isElement(machineID));
+ if(in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:Forwarded_GETS, in_msg.Address); // L2
+ } else if(in_msg.Type == CoherenceRequestType:GET_INSTR) {
+ trigger(Event:Forwarded_GET_INSTR, in_msg.Address); // L2
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:Forwarded_GETX, in_msg.Address); // L2
+ } else if (in_msg.Type == CoherenceRequestType:INV) {
+ trigger(Event:L2_INV, in_msg.Address); // L2
+ } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
+ trigger(Event:Dir_WB_ack, in_msg.Address); // L2
+ } else if (in_msg.Type == CoherenceRequestType:EXE_ACK) {
+ trigger(Event:Dir_exe_ack, in_msg.Address); // L2
+ } else {
+ error("Invalid L2 forwarded request type");
+ }
+ }
+ }
+ }
+
+ // L1 Request
+ in_port(L1RequestIntraChipL2Network_in, RequestMsg, L1RequestToL2Cache) {
+ if(L1RequestIntraChipL2Network_in.isReady()) {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(version);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.RequestorMachId);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(in_msg.Destination);
+ assert(machineIDToMachineType(in_msg.RequestorMachId) == MachineType:L1Cache);
+ assert(in_msg.Destination.isElement(machineID));
+ if (L2cacheMemory.isTagPresent(in_msg.Address)) {
+ // The L2 contains the block, so proceeded with handling the request
+ trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.RequestorMachId), in_msg.Address);
+ } else {
+ if (L2cacheMemory.cacheAvail(in_msg.Address)) {
+ // L2 does't have the line, but we have space for it in the L2
+ trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.RequestorMachId), in_msg.Address);
+ } else {
+ // No room in the L2, so we need to make room before handling the request
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+
+ action(a_issueGETS, "a", desc="Issue GETS") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.L1CacheStateStr := in_msg.L1CacheStateStr;
+ out_msg.L2CacheStateStr := getStateStr(address);
+ }
+ }
+ }
+
+ action(b_issueGETX, "b", desc="Issue GETX") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.L1CacheStateStr := in_msg.L1CacheStateStr;
+ out_msg.L2CacheStateStr := getStateStr(address);
+ }
+ }
+ }
+
+ // finalAck issued from the response queue
+ action(c_finalAckToDirIfNeeded, "c", desc="Send FinalAck to dir if this is response to 3-hop xfer") {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ DEBUG_EXPR(in_msg);
+ if(machineIDToMachineType(in_msg.SenderMachId) == MachineType:L2Cache) {
+ enqueue(finalAckIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY"){
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:FINALACK;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+ }
+
+ // finalAck issued from TBE
+ action(n_sendFinalAckIfThreeHop, "n", desc=""){
+ peek(responseIntraChipL2Network_in, ResponseMsg){
+ DEBUG_EXPR(in_msg);
+ if(L2_TBEs[address].isThreeHop == true){
+ enqueue(finalAckIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY"){
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:FINALACK;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+ }
+
+ action(mm_rememberIfFinalAckNeeded, "\m", desc=""){
+ peek(responseIntraChipL2Network_in, ResponseMsg){
+ if(machineIDToMachineType(in_msg.SenderMachId) == MachineType:L2Cache){
+ L2_TBEs[address].isThreeHop := true;
+ }
+ }
+ }
+
+ action(d_issuePUTX, "d", desc="Issue PUTX") {
+ enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ DEBUG_EXPR(out_msg.Address);
+ DEBUG_EXPR(out_msg.Destination);
+ DEBUG_EXPR(out_msg.DataBlk);
+ out_msg.MessageSize := MessageSizeType:Data;
+ out_msg.L1CacheStateStr := "NA";
+ out_msg.L2CacheStateStr := getStateStr(address);
+ }
+ }
+
+ action(f_issueGETINSTR, "f", desc="Issue GETINSTR") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GET_INSTR;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.L1CacheStateStr := in_msg.L1CacheStateStr;
+ out_msg.L2CacheStateStr := getStateStr(address);
+ }
+ }
+ }
+
+ // DELAYED RESPONSES - Sorced from a TBE entry
+ // TBE -> L1
+ action(h_issueLoadHit, "h", desc="If not prefetch, notify sequencer the load completed.") {
+ DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
+ if((L2_TBEs.isPresent(address) == false) || (L2_TBEs[address].isPrefetch == false)) {
+ // Non-prefetch
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // could be multiple internal nodes
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ } else {
+ // Prefetch - don't issue hit msg
+ }
+ }
+
+ action(oo_issueLoadHitInv, "\o", desc="If not prefetch, notify sequencer the load completed.") {
+ DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
+ if((L2_TBEs.isPresent(address) == false) || (L2_TBEs[address].isPrefetch == false)) {
+ // Non-prefetch
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_I;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // could be multiple internal nodes
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ } else {
+ // Prefetch - don't issue hit msg
+ }
+
+ }
+
+ action(hh_issueStoreHit, "\h", desc="If not prefetch, issue store hit message to local L1 requestor") {
+ DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
+ if((L2_TBEs.isPresent(address) == false) || (L2_TBEs[address].isPrefetch == false)) {
+ // Non-prefetch
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID); // a single node
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ } else {
+ // Prefetch - don't issue hit msg
+ }
+ }
+
+ action(pp_issueStoreHitInv, "\p", desc="If not prefetch, issue store hit message to local L1 requestor") {
+ DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
+ if((L2_TBEs.isPresent(address) == false) || (L2_TBEs[address].isPrefetch == false)) {
+ // Non-prefetch
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_I;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID); // a single node
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ } else {
+ // Prefetch - don't issue hit msg
+ }
+ }
+
+ action(cc_issueStoreHitDG, "\c", desc="If not prefetch, issue store hit message to local L1 requestor") {
+ DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
+ if((L2_TBEs.isPresent(address) == false) || (L2_TBEs[address].isPrefetch == false)) {
+ // Non-prefetch
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_S;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID); // a single node
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ } else {
+ // Prefetch - don't issue hit msg
+ }
+ }
+
+ action(w_sendPutAckToL1Cache, "w", desc="send acknowledgement of an L1 replacement") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination.add(in_msg.RequestorMachId); // a single node
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+
+ // TBE -> L1s and L2s
+ action(ee_dataFromL2CacheToGetSIDs, "\e", desc="Send data from cache to all GetS IDs") {
+ // FIXME - In some cases this should be from the TBE, not the cache.
+ // may send to other mod-L2s
+ if (L2_TBEs[address].Forward_GetS_IDs.count() > 0) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination := L2_TBEs[address].Forward_GetS_IDs; // external nodes
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.NumPendingExtAcks := 0;
+ DEBUG_EXPR(out_msg.Address);
+ DEBUG_EXPR(out_msg.Destination);
+ DEBUG_EXPR(out_msg.DataBlk);
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ // may send to local L1s
+ if (L2_TBEs[address].L1_GetS_IDs.count() > 0) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ // TBE -> L2s only
+ action(bb_dataFromL2CacheToGetSForwardIDs, "\b", desc="Send data from cache to GetS ForwardIDs") {
+ // FIXME - In some cases this should be from the TBE, not the cache.
+ if ((L2_TBEs[address].Forward_GetS_IDs.count() > 0) || (L2_TBEs[address].L1_GetS_IDs.count() > 0)) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination := L2_TBEs[address].Forward_GetS_IDs; // external nodes
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.NumPendingExtAcks := 0;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ // TBE -> L2 only
+ action(gg_dataFromL2CacheToGetXForwardID, "\g", desc="Send data from cache to GetX ForwardID") {
+ // FIXME - In some cases this should be from the TBE, not the cache.
+ if (L2_TBEs[address].validForwardedGetXId) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination.add(L2_TBEs[address].Forward_GetX_ID);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.NumPendingExtAcks := L2_TBEs[address].ForwardGetX_AckCount;
+ DEBUG_EXPR(out_msg.Address);
+ DEBUG_EXPR(out_msg.Destination);
+ DEBUG_EXPR(out_msg.DataBlk);
+ DEBUG_EXPR(out_msg.NumPendingExtAcks);
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ // IMMEDIATE RESPONSES directly from the ForwardRequest queue
+ // ForwardRequest -> L2
+ action(e_dataFromL2CacheToL2Requestor, "e", desc="Send data from cache to requestor") {
+ peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.SenderMachId := machineID;
+ out_msg.NumPendingExtAcks := in_msg.NumPendingExtAcks; // Needed when in state O and we see a GetX
+ out_msg.Destination.add(in_msg.RequestorMachId);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ DEBUG_EXPR(out_msg.Address);
+ DEBUG_EXPR(out_msg.Destination);
+ DEBUG_EXPR(out_msg.DataBlk);
+ DEBUG_EXPR(out_msg.NumPendingExtAcks);
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ // ForwardRequest -> L1
+ action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data from cache to L1 requestor") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination.add(in_msg.RequestorMachId);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ // OTHER ACTIONS
+ action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
+ check_allocate(L2_TBEs);
+ L2_TBEs.allocate(address);
+ L2_TBEs[address].NumPendingIntAcks := 0; // default value
+ L2_TBEs[address].NumPendingExtAcks := 0; // default value
+ L2_TBEs[address].isPrefetch := false;
+ L2_TBEs[address].isThreeHop := false;
+ L2_TBEs[address].Forward_GetS_IDs.clear();
+ L2_TBEs[address].L1_GetS_IDs.clear();
+ L2_TBEs[address].validInvalidator := false;
+ L2_TBEs[address].validForwardedGetXId := false;
+ L2_TBEs[address].isInternalRequestOnly := false;
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
+ L2_TBEs.deallocate(address);
+ }
+
+ action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
+ profileMsgDelay(0, L1RequestIntraChipL2Network_in.dequeue_getDelayCycles());
+ }
+
+ action(l_popForwardedRequestQueue, "l", desc="Pop incoming forwarded request queue") {
+ profileMsgDelay(2, forwardedRequestIntraChipL2Network_in.dequeue_getDelayCycles());
+ }
+
+ action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
+ profileMsgDelay(3, responseIntraChipL2Network_in.dequeue_getDelayCycles());
+ }
+
+ action(p_addNumberOfPendingExtAcks, "p", desc="Add number of pending acks to TBE") {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
+ L2_TBEs[address].NumPendingExtAcks := L2_TBEs[address].NumPendingExtAcks + in_msg.NumPendingExtAcks;
+ DEBUG_EXPR(in_msg.NumPendingExtAcks);
+ DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
+ }
+ }
+
+ action(q_decrementNumberOfPendingExtAcks, "q", desc="Decrement number of pending ext invalidations by one") {
+ DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
+ L2_TBEs[address].NumPendingExtAcks := L2_TBEs[address].NumPendingExtAcks - 1;
+ DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
+ }
+
+ action(r_decrementNumberOfPendingIntAcks, "r", desc="Decrement number of pending int invalidations by one") {
+ DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
+ L2_TBEs[address].NumPendingIntAcks := L2_TBEs[address].NumPendingIntAcks - 1;
+ DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
+ }
+
+ action(t_sendAckToInvalidator, "t", desc="Send ack to invalidator") {
+ peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination.add(in_msg.RequestorMachId);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.NumPendingExtAcks := 0;
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+
+ action(u_writeDataFromResponseQueueToL2Cache, "u", desc="Write data from response queue to cache") {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ // FIXME - probably need to change this to a seperate low priority request queue
+ action(m_writeDataFromRequestQueueToL2Cache, "m", desc="Write data from response queue to cache") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(x_copyDataFromL2CacheToTBE, "x", desc="Copy data from cache to TBE") {
+ L2_TBEs[address].DataBlk := getL2CacheEntry(address).DataBlk;
+ }
+
+ action(y_dataFromTBEToRequestor, "y", desc="Send data from TBE to requestor") {
+ peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.SenderMachId := machineID;
+ out_msg.NumPendingExtAcks := in_msg.NumPendingExtAcks;
+ out_msg.Destination.add(in_msg.RequestorMachId);
+ out_msg.DataBlk := L2_TBEs[address].DataBlk;
+ DEBUG_EXPR(out_msg.Address);
+ DEBUG_EXPR(out_msg.Destination);
+ DEBUG_EXPR(out_msg.DataBlk);
+ DEBUG_EXPR(out_msg.NumPendingExtAcks);
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ action(zz_sendAckToQueuedInvalidator, "\z", desc="Send ack to invalidator") {
+ if (L2_TBEs[address].validInvalidator) {
+ enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.SenderMachId := machineID;
+ out_msg.Destination.add(L2_TBEs[address].InvalidatorID);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.NumPendingExtAcks := 0;
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+
+ action(z_stall, "z", desc="Stall") {
+ }
+
+ action(yy_recordInvalidatorID, "\y", desc="Record Invalidator for future response") {
+ peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
+ L2_TBEs[address].InvalidatorID := in_msg.RequestorMachId;
+ L2_TBEs[address].validInvalidator := true;
+ }
+ }
+
+ action(dd_recordGetSForwardID, "\d", desc="Record forwarded GetS for future forwarding") {
+ peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
+ L2_TBEs[address].Forward_GetS_IDs.add(in_msg.RequestorMachId);
+ }
+ }
+
+ action(ss_recordGetSL1ID, "\s", desc="Record forwarded L1 GetS for load response") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2_TBEs[address].L1_GetS_IDs.add(in_msg.RequestorMachId);
+ }
+ }
+
+ action(ii_recordGetXForwardID, "\i", desc="Record forwarded GetX and ack count for future forwarding") {
+ peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
+ L2_TBEs[address].Forward_GetX_ID := in_msg.RequestorMachId;
+ L2_TBEs[address].ForwardGetX_AckCount := in_msg.NumPendingExtAcks;
+ L2_TBEs[address].validForwardedGetXId := true;
+ }
+ }
+
+ action(xx_recordGetXL1ID, "\x", desc="Record L1 GetX for store response") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2_TBEs[address].L1_GetX_ID := in_msg.RequestorMachId;
+ }
+ }
+
+ action(set_setMRU, "\set", desc="set the MRU entry") {
+ L2cacheMemory.setMRU(address);
+ }
+
+ action(bbb_setPendingIntAcksToSharers, "\bb", desc="Set number of pending acks equal to number of sharers") {
+ L2_TBEs[address].NumPendingIntAcks := L2cacheMemory[address].Sharers.count();
+ }
+
+ action(ddd_setPendingIntAcksToOne, "\dd", desc="Set number of pending acks equal to one") {
+ L2_TBEs[address].NumPendingIntAcks := 1;
+ }
+
+ action(ccc_setPendingIntAcksMinusOne, "\cc", desc="Set number of pending acks equal to number of sharers minus one") {
+ L2_TBEs[address].NumPendingIntAcks := L2cacheMemory[address].Sharers.count() - 1;
+ }
+
+ action(qq_allocateL2CacheBlock, "\q", desc="Set L2 cache tag equal to tag of block B.") {
+ if (L2cacheMemory.isTagPresent(address) == false) {
+ L2cacheMemory.allocate(address);
+ }
+ }
+
+ action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ L2cacheMemory.deallocate(address);
+ }
+
+ action(uu_profileMiss, "\u", desc="Profile the demand miss") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, L1CacheMachIDToProcessorNum(in_msg.RequestorMachId));
+ }
+ }
+
+ action(ww_profileMissNoDir, "\w", desc="Profile this transition at the L2 because Dir won't see the request") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ profile_request(in_msg.L1CacheStateStr, getStateStr(address), "NA", getCoherenceRequestTypeStr(in_msg.Type));
+ }
+ }
+
+ action(v_issueInvalidateIntL1copyRequest, "v", desc="invalidate the L1 M copy") {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination := L2cacheMemory[address].Sharers;
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(tt_issueSharedInvalidateIntL1copiesRequest, "\t", desc="invalidate all L1 S copies") {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV_S;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination := L2cacheMemory[address].Sharers;
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(vv_issueInvalidateOtherIntL1copiesRequest, "\v", desc="invalidate other L1 copies not the local requestor") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ if ((L2cacheMemory[address].Sharers.count() > 1) || (L2cacheMemory[address].Sharers.isElement(in_msg.RequestorMachId) != true)) {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV_S;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination := L2cacheMemory[address].Sharers;
+ out_msg.Destination.remove(in_msg.RequestorMachId);
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+ }
+
+ action(g_issueDownGradeIntL1copiesRequest, "g", desc="DownGrade L1 copy") {
+ enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:L1_DG;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination := L2cacheMemory[address].Sharers;
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(nn_addSharer, "\n", desc="Add L1 sharer to list") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ addSharer(address, in_msg.RequestorMachId);
+ }
+ }
+
+ action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from list") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2cacheMemory[address].Sharers.remove(in_msg.RequestorMachId);
+ }
+ }
+
+ action(aa_removeResponseSharer, "\a", desc="Remove L1 Response sharer from list") {
+ peek(responseIntraChipL2Network_in, ResponseMsg) {
+ L2cacheMemory[address].Sharers.remove(in_msg.SenderMachId);
+ }
+ }
+
+ action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
+ peek(L1RequestIntraChipL2Network_in, RequestMsg) {
+ L2cacheMemory[address].Sharers.clear();
+ }
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ //===============================================
+ // STALLS
+
+ // Stalls L2 Replacement and L1 PUT for all transient states
+ transition({L2_IS, L2_ISZ, L2_ISI, L2_IMV, L2_MV, L2_IM, L2_IMO, L2_IMI, L2_IMZ, L2_IMOI, L2_IMOZ,
+ L2_SIV, L2_SIC,
+ L2_MIV, L2_MIN, L2_MIC, L2_MIT, L2_MO, L2_MOIC, L2_MOICR, L2_MOZ,
+ L2_OIV, L2_OIN, L2_OIC, L2_OMV, L2_OM},
+ {L2_Replacement, L1_PUTX, L1_PUTX_last, L1_PUTS, L1_PUTS_last, L1_PUTX_old, L1_PUTS_old, }) {
+ z_stall;
+ }
+
+ //===============================================
+ // old L1_PUT requests
+
+ transition({L2_NP, L2_I, L2_S, L2_SS, L2_M, L2_MT, L2_O, L2_SO}, {L1_PUTX_old, L1_PUTS_old}) {
+ w_sendPutAckToL1Cache;
+ jj_popL1RequestQueue;
+ }
+
+ //===============================================
+ // BASE STATE - I
+
+ // Transitions from I (Idle)
+ transition({L2_NP,L2_I}, L2_Replacement) {
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition({L2_NP,L2_I}, L2_INV) { // could see an invalidate from the directory, but not Forwards
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ transition({L2_NP,L2_I}, L1_GETS, L2_IS) {
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ nn_addSharer;
+ i_allocateTBE;
+ ss_recordGetSL1ID;
+ a_issueGETS;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition({L2_NP,L2_I}, L1_GET_INSTR, L2_IS) {
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ nn_addSharer;
+ i_allocateTBE;
+ ss_recordGetSL1ID;
+ f_issueGETINSTR;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition({L2_NP,L2_I}, {L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}, L2_IM) { // UPGRADE possible because L2_Replacement have higher priority
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ nn_addSharer;
+ i_allocateTBE;
+ xx_recordGetXL1ID;
+ b_issueGETX;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ // Transitions from L2_IS
+ // could see L2_INVs or more L1 requests
+ transition(L2_IS, L2_INV, L2_ISI) { // could see an invalidate from the directory, but not Forwards
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_IS, Data_ext_ack_0, L2_SS) {
+ u_writeDataFromResponseQueueToL2Cache;
+ h_issueLoadHit;
+ c_finalAckToDirIfNeeded;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IS, {L1_GETS,L1_GET_INSTR}) {
+ set_setMRU;
+ ww_profileMissNoDir;
+ nn_addSharer;
+ ss_recordGetSL1ID;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_IS, L1_GETX, L2_ISZ) { // don't go there, just go to stall state
+ z_stall;
+ }
+
+ // Transitions from L2_ISZ
+ // could see L2_INVs or more L1 requests
+ // stall all L1 requests, wait for data
+ transition(L2_ISZ, L2_INV, L2_ISI) { // could see an invalidate from the directory, but not Forwards
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_ISZ, Data_ext_ack_0, L2_SS) {
+ u_writeDataFromResponseQueueToL2Cache;
+ h_issueLoadHit;
+ c_finalAckToDirIfNeeded;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_ISZ, {L1_GETS, L1_GET_INSTR, L1_GETX}) {
+ z_stall;
+ }
+
+ // Transitions from L2_ISI, already sent the invalidate ack so can imediately go to I
+ // - in ISI, could get data from the Proc whose GETX caused INV to go from IS to ISI
+ // or, could get data from Dir if Dir's data lost race to Dir's INV
+ // or, could get data from Dir, if my GETS took forever to get to Dir, and the GETX
+ // processor already wrote it back
+ transition(L2_ISI, Data_ext_ack_0, L2_I) {
+ u_writeDataFromResponseQueueToL2Cache;
+ oo_issueLoadHitInv;
+ c_finalAckToDirIfNeeded;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_ISI, L2_INV) { // could see an invalidate from the directory, but not Forwards
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_ISI, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall all L1 requests
+ z_stall;
+ }
+
+ // Transitions from L2_IMV, waiting for int_acks
+ // currently stall all request
+ // could see forwards and/or more L1 requests
+ transition(L2_IMV, L2_INV) { // could see an invalidate for SS
+ yy_recordInvalidatorID;
+ l_popForwardedRequestQueue;
+ }
+
+ // stall all Forwarded request
+ transition(L2_IMV, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX}) {
+ z_stall;
+ }
+
+ // stall all L1 request
+ transition(L2_IMV, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}) {
+ z_stall;
+ }
+
+ transition(L2_IMV, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MV) {
+ u_writeDataFromResponseQueueToL2Cache;
+ c_finalAckToDirIfNeeded;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMV, Data_ext_ack_not_0) {
+ u_writeDataFromResponseQueueToL2Cache;
+ p_addNumberOfPendingExtAcks;
+ mm_rememberIfFinalAckNeeded;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMV, Proc_ext_ack) {
+ q_decrementNumberOfPendingExtAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMV, Proc_last_ext_ack, L2_MV) {
+ n_sendFinalAckIfThreeHop;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMV, Proc_int_ack) {
+ aa_removeResponseSharer;
+ r_decrementNumberOfPendingIntAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMV, Proc_last_int_ack, L2_IM) {
+ aa_removeResponseSharer;
+ r_decrementNumberOfPendingIntAcks;
+ o_popIncomingResponseQueue;
+ zz_sendAckToQueuedInvalidator;
+ }
+
+ // Transitions from L2_MV, waiting for int_acks
+ // external world gave us write permission
+
+ // stall all Forwarded request
+ transition(L2_MV, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX}) {
+ z_stall;
+ }
+
+ // stall all L1 request
+ transition(L2_MV, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}) {
+ z_stall;
+ }
+
+ transition(L2_MV, Proc_int_ack) {
+ aa_removeResponseSharer;
+ r_decrementNumberOfPendingIntAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_MV, Proc_last_int_ack, L2_MT) {
+ aa_removeResponseSharer;
+ r_decrementNumberOfPendingIntAcks;
+ hh_issueStoreHit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // Transitions from L2_IM, waiting for external data before going to MT state
+ // could see forwards and/or more L1 requests
+ transition(L2_IM, L2_INV) { // could see an invalidate from the directory (earlier epoch)
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_IM, {Forwarded_GETS,Forwarded_GET_INSTR}, L2_IMO) { // could see Forwards, if directory responses get out-of-order
+ dd_recordGetSForwardID;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_IM, {L1_GETS,L1_GET_INSTR}, L2_IMO) {
+ set_setMRU;
+ ww_profileMissNoDir;
+ nn_addSharer;
+ ss_recordGetSL1ID;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_IM, Forwarded_GETX, L2_IMI) { // could see Forwards, if directory requests get ahead of responses
+ ii_recordGetXForwardID;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_IM, L1_GETX, L2_IMZ) { // don't go there, just go to stall state
+ z_stall;
+ }
+
+ transition(L2_IM, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MT) {
+ u_writeDataFromResponseQueueToL2Cache;
+ hh_issueStoreHit;
+ c_finalAckToDirIfNeeded;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IM, Data_ext_ack_not_0) {
+ u_writeDataFromResponseQueueToL2Cache;
+ p_addNumberOfPendingExtAcks;
+ mm_rememberIfFinalAckNeeded;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IM, Proc_ext_ack) {
+ q_decrementNumberOfPendingExtAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IM, Proc_last_ext_ack, L2_MT) {
+ hh_issueStoreHit;
+ n_sendFinalAckIfThreeHop;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // transitions from L2_IMO
+ transition(L2_IMO, L2_INV) { // could see an invalidate from the directory (earlier epoch)
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_IMO, {Forwarded_GETS,Forwarded_GET_INSTR}) { // could see Forwards
+ dd_recordGetSForwardID;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_IMO, Forwarded_GETX, L2_IMOI) { // could see Forwards
+ ii_recordGetXForwardID;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_IMO, {L1_GETS,L1_GET_INSTR}) {
+ set_setMRU;
+ ww_profileMissNoDir;
+ nn_addSharer;
+ ss_recordGetSL1ID;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_IMO, L1_GETX, L2_IMOZ) {
+ z_stall;
+ }
+
+ transition(L2_IMO, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MO) {
+ u_writeDataFromResponseQueueToL2Cache;
+ cc_issueStoreHitDG;
+ ddd_setPendingIntAcksToOne;
+ c_finalAckToDirIfNeeded;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMO, Data_ext_ack_not_0) {
+ u_writeDataFromResponseQueueToL2Cache;
+ p_addNumberOfPendingExtAcks;
+ mm_rememberIfFinalAckNeeded;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMO, Proc_ext_ack) {
+ q_decrementNumberOfPendingExtAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMO, Proc_last_ext_ack, L2_MO) {
+ n_sendFinalAckIfThreeHop;
+ cc_issueStoreHitDG;
+ ddd_setPendingIntAcksToOne;
+ o_popIncomingResponseQueue;
+ }
+
+ // transitions from L2_IMI
+ // the directory put us in this state so it should tell us nothing (i.e. don't worry about INV or Forwards)
+ // stall all L1 request
+ transition(L2_IMI, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MIC) {
+ u_writeDataFromResponseQueueToL2Cache;
+ pp_issueStoreHitInv;
+ ddd_setPendingIntAcksToOne;
+ c_finalAckToDirIfNeeded;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMI, Data_ext_ack_not_0) {
+ u_writeDataFromResponseQueueToL2Cache;
+ p_addNumberOfPendingExtAcks;
+ mm_rememberIfFinalAckNeeded;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMI, Proc_ext_ack) {
+ q_decrementNumberOfPendingExtAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMI, Proc_last_ext_ack, L2_MIC) {
+ n_sendFinalAckIfThreeHop;
+ pp_issueStoreHitInv;
+ ddd_setPendingIntAcksToOne;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMI, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall all L1 requests
+ z_stall;
+ }
+
+ // transistions from L2_IMZ
+ // just wait for all acks and data
+ // stall on all requests
+ // NOTE: A performance option might be possible to go into M state instead of MT
+ transition(L2_IMZ, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MT) {
+ u_writeDataFromResponseQueueToL2Cache;
+ hh_issueStoreHit;
+ c_finalAckToDirIfNeeded;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMZ, Data_ext_ack_not_0) {
+ u_writeDataFromResponseQueueToL2Cache;
+ p_addNumberOfPendingExtAcks;
+ mm_rememberIfFinalAckNeeded;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMZ, Proc_ext_ack) {
+ q_decrementNumberOfPendingExtAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMZ, Proc_last_ext_ack, L2_MT) {
+ hh_issueStoreHit;
+ n_sendFinalAckIfThreeHop;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMZ, L2_INV) { // could see an invalidate from the directory (earlier epoch)
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_IMZ, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX, L1_GETS, L1_GET_INSTR, L1_GETX}) {
+ z_stall;
+ }
+
+ // transitions from L2_IMOI
+ // the directory put us in this state so it should tell us nothing (i.e. don't worry about INV or Forwards)
+ // stall all L1 requests
+ transition(L2_IMOI, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MOICR) {
+ u_writeDataFromResponseQueueToL2Cache;
+ pp_issueStoreHitInv;
+ ddd_setPendingIntAcksToOne;
+ c_finalAckToDirIfNeeded;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMOI, Data_ext_ack_not_0) {
+ u_writeDataFromResponseQueueToL2Cache;
+ p_addNumberOfPendingExtAcks;
+ mm_rememberIfFinalAckNeeded;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMOI, Proc_ext_ack) {
+ q_decrementNumberOfPendingExtAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMOI, Proc_last_ext_ack, L2_MOICR) {
+ n_sendFinalAckIfThreeHop;
+ pp_issueStoreHitInv;
+ ddd_setPendingIntAcksToOne;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMOI, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall all L1 requests
+ z_stall;
+ }
+
+ // transitions from L2_IMOZ
+ // just wait for all acks and data
+ // stall on all requests
+ transition(L2_IMOZ, L2_INV) { // could see an invalidate from the directory (earlier epoch)
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_IMOZ, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MOZ) {
+ u_writeDataFromResponseQueueToL2Cache;
+ cc_issueStoreHitDG;
+ ddd_setPendingIntAcksToOne;
+ c_finalAckToDirIfNeeded;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMOZ, Data_ext_ack_not_0) {
+ u_writeDataFromResponseQueueToL2Cache;
+ p_addNumberOfPendingExtAcks;
+ mm_rememberIfFinalAckNeeded;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMOZ, Proc_ext_ack) {
+ q_decrementNumberOfPendingExtAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_IMOZ, Proc_last_ext_ack, L2_MOZ) {
+ cc_issueStoreHitDG;
+ ddd_setPendingIntAcksToOne;
+ n_sendFinalAckIfThreeHop;
+ o_popIncomingResponseQueue;
+ }
+
+ // stall on all requests
+ transition(L2_IMOZ, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX, L1_GETS, L1_GET_INSTR, L1_GETX}) {
+ z_stall;
+ }
+
+ // ===============================================
+ // BASE STATE - S
+ // Transitions from S, no L1 copies
+ transition(L2_S, L2_Replacement, L2_I) {
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(L2_S, L2_INV, L2_I) { // could see an invalidate from the directory, but not Forwards
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_S, {L1_GETS, L1_GET_INSTR}, L2_SS) {
+ set_setMRU;
+ ww_profileMissNoDir;
+ nn_addSharer;
+ k_dataFromL2CacheToL1Requestor;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_S, L1_GETX, L2_IM) {
+ set_setMRU;
+ nn_addSharer;
+ i_allocateTBE;
+ xx_recordGetXL1ID;
+ b_issueGETX;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ // BASE STATE - SS
+ // Transitions from SS, L1 copies
+ transition(L2_SS, L2_Replacement, L2_SIV) {
+ i_allocateTBE; // for internal request
+ bbb_setPendingIntAcksToSharers;
+ tt_issueSharedInvalidateIntL1copiesRequest;
+ }
+
+ transition(L2_SS, L2_INV, L2_SIC) {
+ i_allocateTBE; // for internal request
+ yy_recordInvalidatorID;
+ bbb_setPendingIntAcksToSharers;
+ tt_issueSharedInvalidateIntL1copiesRequest;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_SS, {L1_GETS, L1_GET_INSTR}) {
+ set_setMRU;
+ ww_profileMissNoDir;
+ nn_addSharer;
+ k_dataFromL2CacheToL1Requestor;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_SS, L1_UPGRADE_no_others, L2_IM) {
+ set_setMRU;
+ i_allocateTBE; // for both ext. and int.
+ xx_recordGetXL1ID;
+ b_issueGETX; // for external
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_SS, L1_UPGRADE, L2_IMV) {
+ set_setMRU;
+ i_allocateTBE; // for both ext. and int.
+ xx_recordGetXL1ID;
+ ccc_setPendingIntAcksMinusOne;
+ vv_issueInvalidateOtherIntL1copiesRequest; // for internal
+ b_issueGETX; // for external
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_SS, L1_GETX, L2_IMV) {
+ set_setMRU;
+ i_allocateTBE; // for both ext. and int.
+ xx_recordGetXL1ID;
+ bbb_setPendingIntAcksToSharers;
+ vv_issueInvalidateOtherIntL1copiesRequest; // for internal
+ nn_addSharer;
+ b_issueGETX; // for external
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_SS, L1_PUTS) {
+ ww_profileMissNoDir;
+ w_sendPutAckToL1Cache;
+ kk_removeRequestSharer;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_SS, L1_PUTS_last, L2_S) {
+ ww_profileMissNoDir;
+ w_sendPutAckToL1Cache;
+ kk_removeRequestSharer;
+ jj_popL1RequestQueue;
+ }
+
+ // Transitions from SIC - Initiated by an invalidate
+ transition(L2_SIC, Proc_int_ack) {
+ aa_removeResponseSharer;
+ r_decrementNumberOfPendingIntAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_SIC, Proc_last_int_ack, L2_I) {
+ aa_removeResponseSharer;
+ r_decrementNumberOfPendingIntAcks;
+ o_popIncomingResponseQueue;
+ zz_sendAckToQueuedInvalidator;
+ s_deallocateTBE;
+ }
+
+ transition(L2_SIC, L2_INV) { // could see an invalidate from the directory, but not Forwards
+ l_popForwardedRequestQueue; // ignore: already know an ack must be sent to the directory
+ }
+
+ transition(L2_SIC, {L1_GETS, L1_GET_INSTR, L1_UPGRADE, L1_UPGRADE_no_others, L1_GETX}) { // stall on all L1 requests
+ z_stall;
+ }
+
+ // Transitions from SIV - initiated by a L2_Replacement
+ transition(L2_SIV, Proc_int_ack) {
+ aa_removeResponseSharer;
+ r_decrementNumberOfPendingIntAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_SIV, Proc_last_int_ack, L2_I) {
+ aa_removeResponseSharer;
+ r_decrementNumberOfPendingIntAcks;
+ o_popIncomingResponseQueue;
+ s_deallocateTBE;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(L2_SIV, L2_INV) { // could see an invalidate from the directory, but not Forwards
+ z_stall; // guarenteed to receive all acks thus moving the state to I where the L2_INV can be handled
+ }
+
+ transition(L2_SIV, {L1_GETS, L1_GET_INSTR, L1_UPGRADE, L1_UPGRADE_no_others, L1_GETX}) { // stall on all L1 requests
+ z_stall;
+ }
+
+ // ===============================================
+ // BASE STATE - M
+ // Transitions from M, no L1 copies
+ transition(L2_M, L2_Replacement, L2_MIN) {
+ i_allocateTBE;
+ d_issuePUTX;
+ x_copyDataFromL2CacheToTBE;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(L2_M, {Forwarded_GETS,Forwarded_GET_INSTR}, L2_O) { // can see forwards, not inv
+ e_dataFromL2CacheToL2Requestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_M, Forwarded_GETX, L2_I) { // can see forwards, not inv
+ e_dataFromL2CacheToL2Requestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_M, {L1_GETS, L1_GET_INSTR}, L2_SO) { // FIXME FOR BETTER PERFORMANCE - an E state would be nice here
+ set_setMRU;
+ ww_profileMissNoDir;
+ nn_addSharer;
+ k_dataFromL2CacheToL1Requestor;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_M, L1_GETX, L2_MT) {
+ set_setMRU;
+ ww_profileMissNoDir;
+ nn_addSharer;
+ k_dataFromL2CacheToL1Requestor;
+ jj_popL1RequestQueue;
+ }
+
+ // BASE STATE - MT
+ // Transitions from MT, M L1 copy
+ transition(L2_MT, L2_Replacement, L2_MIV) {
+ i_allocateTBE;
+ bbb_setPendingIntAcksToSharers;
+ v_issueInvalidateIntL1copyRequest;
+ }
+
+ transition(L2_MT, {Forwarded_GETS, Forwarded_GET_INSTR}, L2_MO) { // can see forwards, not inv
+ i_allocateTBE;
+ bbb_setPendingIntAcksToSharers;
+ g_issueDownGradeIntL1copiesRequest;
+ dd_recordGetSForwardID;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_MT, {L1_GETS, L1_GET_INSTR}, L2_MO) {
+ set_setMRU;
+ ww_profileMissNoDir;
+ i_allocateTBE;
+ bbb_setPendingIntAcksToSharers;
+ g_issueDownGradeIntL1copiesRequest;
+ ss_recordGetSL1ID;
+ nn_addSharer;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_MT, Forwarded_GETX, L2_MIC) { // can see forwards, not inv
+ i_allocateTBE;
+ bbb_setPendingIntAcksToSharers;
+ v_issueInvalidateIntL1copyRequest;
+ ii_recordGetXForwardID;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_MT, L1_GETX, L2_MIT) {
+ set_setMRU;
+ ww_profileMissNoDir;
+ i_allocateTBE;
+ bbb_setPendingIntAcksToSharers;
+ v_issueInvalidateIntL1copyRequest;
+ nn_addSharer;
+ xx_recordGetXL1ID;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_MT, L1_PUTX_last, L2_M) {
+ ww_profileMissNoDir;
+ w_sendPutAckToL1Cache;
+ kk_removeRequestSharer;
+ m_writeDataFromRequestQueueToL2Cache;
+ jj_popL1RequestQueue;
+ }
+
+ // Transitions from L2_MIV, waiting for local L1 response
+ transition(L2_MIV, Data_int_ack, L2_MIN) {
+ aa_removeResponseSharer;
+ u_writeDataFromResponseQueueToL2Cache;
+ bb_dataFromL2CacheToGetSForwardIDs; // likely won't send any messages
+ gg_dataFromL2CacheToGetXForwardID; // likely won't send any messages
+ d_issuePUTX;
+ x_copyDataFromL2CacheToTBE;
+ rr_deallocateL2CacheBlock;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_MIV, {Forwarded_GETS,Forwarded_GET_INSTR}) { // could see Forwards
+ dd_recordGetSForwardID;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_MIV, Forwarded_GETX) { // could see Forwards
+ ii_recordGetXForwardID;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_MIV, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall on all L1 requests
+ z_stall;
+ }
+
+ // Transitions from L2_MIN, waiting for directory ack
+ transition(L2_MIN, {Forwarded_GETS,Forwarded_GET_INSTR}) { // could see Forwards
+ y_dataFromTBEToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_MIN, Forwarded_GETX) { // could see Forwards
+ y_dataFromTBEToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_MIN, Dir_WB_ack, L2_I) {
+ s_deallocateTBE;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_MIN, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall all L1 requests
+ z_stall;
+ }
+
+ // Transitions from L2_MIC, waiting for local L1 response
+ // Directory put us in this state with a forwarded GetX
+ // therefore we shouldn't see anymore forwards
+ // we stall on all L1 requests
+ transition(L2_MIC, Data_int_ack, L2_I) {
+ aa_removeResponseSharer;
+ u_writeDataFromResponseQueueToL2Cache;
+ gg_dataFromL2CacheToGetXForwardID;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_MIC, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall all L1 requests
+ z_stall;
+ }
+
+ // Transitions from L2_MIT, waiting for local L1 response
+ // A local L1 request put us in this state, so any request are possible
+ // we currently stall all requests because of the ugly recursive path it could lead us on
+ // removing some of the blocking here could have major performance benefits
+ // however one must be careful not to violate cache coherence
+ transition(L2_MIT, Data_int_ack, L2_MT) {
+ aa_removeResponseSharer;
+ u_writeDataFromResponseQueueToL2Cache;
+ hh_issueStoreHit; // internal requestor
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // stall all requests
+ transition(L2_MIT, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX, L1_GETS, L1_GET_INSTR, L1_GETX}) {
+ z_stall;
+ }
+
+ // Transistion from L2_MO, waiting for local L1 data response
+ // a GetS request put us in this state
+ // stall must stall if we get a GETX request
+ transition(L2_MO, Data_int_ack, L2_SO) {
+ u_writeDataFromResponseQueueToL2Cache;
+ ee_dataFromL2CacheToGetSIDs; // could be an internal or external requestor
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_MO, {Forwarded_GETS, Forwarded_GET_INSTR}) { // can see forwards, not inv
+ dd_recordGetSForwardID;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_MO, {L1_GETS, L1_GET_INSTR}) {
+ set_setMRU;
+ ww_profileMissNoDir;
+ nn_addSharer;
+ ss_recordGetSL1ID;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_MO, Forwarded_GETX, L2_MOIC) { // can see forwards, not inv
+ ii_recordGetXForwardID;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_MO, {L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}, L2_MOZ) { // don't go there, just go to a stall state
+ z_stall;
+ }
+
+ // Transistion from L2_MOIC
+ // a Forwarded_GETX put us here so we should not see any more forwards
+ // stall on all L1 requests, once data is received send new data to all queued up L1 shares
+ // then immediately send invalidate request to those new L1 shared copies
+ //
+ // KEY DIFFERENCE: L2_MOICR assumes the L1 data responder moved to I state and removes the sharer,
+ // while L2_MOIC assumes the L1 data responder moved to S state and doesn't remove the sharer
+ transition(L2_MOIC, Data_int_ack, L2_OIC) { // need only one ack
+ u_writeDataFromResponseQueueToL2Cache;
+ ee_dataFromL2CacheToGetSIDs;
+ bbb_setPendingIntAcksToSharers;
+ tt_issueSharedInvalidateIntL1copiesRequest;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_MOIC, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}) {
+ z_stall;
+ }
+
+ // Transistion from L2_MOICR
+ // a Forwarded_GETX put us here so we should not see any more forwards
+ // stall on all L1 requests, once data is received send new data to all queued up L1 shares
+ // then immediately send invalidate request to those new L1 shared copies
+ //
+ // KEY DIFFERENCE: L2_MOICR assumes the L1 data responder moved to I state and removes the sharer,
+ // while L2_MOIC assumes the L1 data responder moved to S state and doesn't remove the sharer
+ transition(L2_MOICR, Data_int_ack, L2_OIC) { // need only one ack
+ aa_removeResponseSharer;
+ u_writeDataFromResponseQueueToL2Cache;
+ ee_dataFromL2CacheToGetSIDs;
+ bbb_setPendingIntAcksToSharers;
+ tt_issueSharedInvalidateIntL1copiesRequest;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_MOICR, {L1_GETS, L1_GET_INSTR, L1_GETX}) {
+ z_stall;
+ }
+
+ // L2_MOZ
+ // simply wait on data
+ // stall on everything
+ transition(L2_MOZ, Data_int_ack, L2_SO) {
+ u_writeDataFromResponseQueueToL2Cache;
+ ee_dataFromL2CacheToGetSIDs; // could be an internal or external requestor
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // stall everything
+ transition(L2_MOZ, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX, L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}) {
+ z_stall;
+ }
+
+ // ===============================================
+ // BASE STATE - O
+ // Transitions from L2_O, only block cached on the chip
+ transition(L2_O, L2_Replacement, L2_OIN){
+ i_allocateTBE;
+ x_copyDataFromL2CacheToTBE;
+ d_issuePUTX;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(L2_O, {Forwarded_GETS,Forwarded_GET_INSTR}) {
+ e_dataFromL2CacheToL2Requestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_O, Forwarded_GETX, L2_I) {
+ e_dataFromL2CacheToL2Requestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_O, {L1_GETS, L1_GET_INSTR}, L2_SO) {
+ set_setMRU;
+ ww_profileMissNoDir;
+ nn_addSharer;
+ k_dataFromL2CacheToL1Requestor;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_O, L1_GETX, L2_OM) {
+ set_setMRU;
+ nn_addSharer;
+ i_allocateTBE;
+ xx_recordGetXL1ID;
+ b_issueGETX;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ // BASE STATE - SO
+ // Transitions from L2_SO, other valid L1 cached copies
+ transition(L2_SO, L2_Replacement, L2_OIV){
+ i_allocateTBE;
+ x_copyDataFromL2CacheToTBE;
+ bbb_setPendingIntAcksToSharers;
+ tt_issueSharedInvalidateIntL1copiesRequest;
+ }
+
+ transition(L2_SO, {Forwarded_GETS,Forwarded_GET_INSTR}) {
+ e_dataFromL2CacheToL2Requestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_SO, Forwarded_GETX, L2_OIC) {
+ i_allocateTBE;
+ bbb_setPendingIntAcksToSharers;
+ ii_recordGetXForwardID;
+ tt_issueSharedInvalidateIntL1copiesRequest;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_SO, {L1_GETS, L1_GET_INSTR}) {
+ set_setMRU;
+ ww_profileMissNoDir;
+ nn_addSharer;
+ k_dataFromL2CacheToL1Requestor;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_SO, L1_UPGRADE, L2_OMV) {
+ set_setMRU;
+ nn_addSharer;
+ i_allocateTBE;
+ xx_recordGetXL1ID;
+ ccc_setPendingIntAcksMinusOne;
+ vv_issueInvalidateOtherIntL1copiesRequest; // for internal
+ b_issueGETX; // for external
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_SO, L1_UPGRADE_no_others, L2_OM) {
+ set_setMRU;
+ i_allocateTBE;
+ xx_recordGetXL1ID;
+ b_issueGETX; // for external
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_SO, L1_GETX, L2_OMV) {
+ set_setMRU;
+ i_allocateTBE;
+ xx_recordGetXL1ID;
+ bbb_setPendingIntAcksToSharers;
+ vv_issueInvalidateOtherIntL1copiesRequest;
+ nn_addSharer;
+ b_issueGETX; // for external
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_SO, {L1_PUTS, L1_PUTX}) { // PUTX possible because L2 downgraded before seeing PUTX
+ ww_profileMissNoDir;
+ w_sendPutAckToL1Cache;
+ kk_removeRequestSharer;
+ jj_popL1RequestQueue;
+ }
+
+ transition(L2_SO, {L1_PUTS_last, L1_PUTX_last}, L2_O) { // PUTX possible because L2 downgraded before seeing PUTX
+ ww_profileMissNoDir;
+ w_sendPutAckToL1Cache;
+ kk_removeRequestSharer;
+ jj_popL1RequestQueue;
+ }
+
+ // Transitions from L2_OIV
+ // L2 replacement put us here, we must stall all L1 requests
+ transition(L2_OIV, {Forwarded_GETS, Forwarded_GET_INSTR}) {
+ y_dataFromTBEToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_OIV, Forwarded_GETX) {
+ z_stall;
+ }
+
+ transition(L2_OIV, Proc_int_ack) {
+ aa_removeResponseSharer;
+ r_decrementNumberOfPendingIntAcks
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_OIV, Proc_last_int_ack, L2_OIN) {
+ aa_removeResponseSharer;
+ r_decrementNumberOfPendingIntAcks
+ o_popIncomingResponseQueue;
+ d_issuePUTX;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(L2_OIV, {L1_UPGRADE, L1_UPGRADE_no_others, L1_GETX, L1_GETS, L1_GET_INSTR}) { // stall L1 requests
+ z_stall;
+ }
+
+ // transitions from L2_OIN
+ // L2 replacement put us here, we must stall all L1 requests
+ transition(L2_OIN, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX}) {
+ y_dataFromTBEToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_OIN, Dir_WB_ack, L2_I) {
+ s_deallocateTBE;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_OIN, {L1_UPGRADE, L1_UPGRADE_no_others, L1_GETX, L1_GETS, L1_GET_INSTR}) { // stall L1 requests
+ z_stall;
+ }
+
+ // transitions from L2_OIC
+ // directory put us in this state, should not see any forwards
+ // we must stall all L1 requests
+ transition(L2_OIC, Proc_int_ack) {
+ aa_removeResponseSharer;
+ r_decrementNumberOfPendingIntAcks
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_OIC, Proc_last_int_ack, L2_I) {
+ aa_removeResponseSharer;
+ r_decrementNumberOfPendingIntAcks
+ gg_dataFromL2CacheToGetXForwardID;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_OIC, {L1_UPGRADE, L1_UPGRADE_no_others, L1_GETX, L1_GETS, L1_GET_INSTR}) { // stall L1 requests
+ z_stall;
+ }
+
+ // Transitions from L2_OMV,
+ // int_acks needed
+ // waiting to see our Forwarded GETX from the directory
+ // if we see the Forwarded GETX before all invalidates received, stall
+ // stall all L1 requests
+ transition(L2_OMV, Proc_int_ack) {
+ aa_removeResponseSharer;
+ r_decrementNumberOfPendingIntAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_OMV, Proc_last_int_ack, L2_OM) {
+ aa_removeResponseSharer;
+ r_decrementNumberOfPendingIntAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_OMV, Proc_ext_ack) {
+ q_decrementNumberOfPendingExtAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_OMV, {Forwarded_GETS, Forwarded_GET_INSTR}) { // these are GetS that beat us to the directory
+ e_dataFromL2CacheToL2Requestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_OMV, Dir_exe_ack, L2_MV) {
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_OMV, Forwarded_GETX) { // the Forwarded GetX may or may not be ours, we can't respond until int_acks received
+ z_stall;
+ }
+
+ transition(L2_OMV, {L1_UPGRADE, L1_UPGRADE_no_others, L1_GETS, L1_GET_INSTR, L1_GETX}) { // must stall all L1 requests
+ z_stall;
+ }
+
+ // Transitions from L2_OM,
+ // all L1 copies invalid, no int_acks needed
+ // waiting to see our Forwarded GETX from the directory
+ // once we see the Forwarded GETX, we can move to IM and wait for the data_ack
+ // stall all L1 requests
+ transition(L2_OM, Proc_ext_ack) {
+ q_decrementNumberOfPendingExtAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(L2_OM, {Forwarded_GETS, Forwarded_GET_INSTR}) { // these are GetS that beat us to the directory
+ e_dataFromL2CacheToL2Requestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_OM, Forwarded_GETX, L2_IM) { // the Forwarded GetX may or may not be ours
+ e_dataFromL2CacheToL2Requestor; // we're probably sending a message to ourselves here, but not guarenteed
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_OM, Dir_exe_ack, L2_MT) { // Directory tells us we already have an exclusive copy
+ hh_issueStoreHit;
+ s_deallocateTBE;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(L2_OM, {L1_UPGRADE, L1_UPGRADE_no_others, L1_GETS, L1_GET_INSTR, L1_GETX}) { // must stall all L1 requests
+ z_stall;
+ }
+
+}
diff --git a/src/mem/protocol/MSI_MOSI_CMP_directory-dir.sm b/src/mem/protocol/MSI_MOSI_CMP_directory-dir.sm
new file mode 100644
index 000000000..435bf0eff
--- /dev/null
+++ b/src/mem/protocol/MSI_MOSI_CMP_directory-dir.sm
@@ -0,0 +1,497 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+machine(Directory, "MOSI Directory Optimized") {
+
+ // ** OUT QUEUES **
+ MessageBuffer dummyFrom0, network="To", virtual_network="0", ordered="false"; // dummy buffer that shouldn't be used
+ MessageBuffer dummyFrom1, network="To", virtual_network="1", ordered="false"; // dummy buffer that shouldn't be used
+ // Dir -> mod-L2 bank - Must be true for the 'opt' and 'GS' protocols BE CAREFUL HERE!!!
+ MessageBuffer forwardedRequestFromDir, network="To", virtual_network="2", ordered="true";
+ MessageBuffer responseFromDir, network="To", virtual_network="3", ordered="false"; // Dir -> mod-L2 bank
+ MessageBuffer dummyFrom4, network="To", virtual_network="4", ordered="false"; // dummy buffer that shouldn't be used
+
+ // ** IN QUEUES **
+ MessageBuffer dummyTo0, network="From", virtual_network="0", ordered="false"; // dummy buffer that shouldn't be used
+ MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
+ MessageBuffer dummyTo2, network="From", virtual_network="2", ordered="false"; // dummy buffer that shouldn't be used
+ MessageBuffer dummyTo3, network="From", virtual_network="3", ordered="false"; // dummy buffer that shouldn't be used
+ MessageBuffer finalAckToDir, network="From", virtual_network="4", ordered="false"; // a mod-L2 bank -> this Dir
+
+ // STATES
+ enumeration(State, desc="Directory states", default="Directory_State_NP") {
+ // Base states
+ NP, desc="Not present";
+ I, desc="Idle";
+ S, desc="Shared";
+ O, desc="Owned";
+ M, desc="Modified", format="!b";
+ OO, desc="transient state of O->GetS/GetInstr->O";
+ OM, desc="transient state of O->GetX->M";
+ MO, desc="transient state of M->GetS/GetInstr->O";
+ MM, desc="transient state of M->GetX->M";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ GETS, desc="A GETS arrives";
+ GET_INSTR, desc="";
+ GETX_Owner, desc="A GETX arrives, requestor is owner";
+ GETX_NotOwner, desc="A GETX arrives, requestor is not owner";
+ PUTX_Owner, "PUTX (requestor is owner)", desc="A PUTX arrives, requestor is owner";
+ PUTX_NotOwner, "PUTX (requestor not owner)",desc="A PUTX arrives, requestor is not owner";
+ FinalAck, desc="";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ State DirectoryState, desc="Directory state";
+ Set Sharers, desc="Set of sharers - must be L2 caches"; // Note this is a Set and not a NetDest for space concerns
+ bool DirOwner, default="true", desc="Is dir owner?";
+ NodeID ProcOwner, default="0", desc="Processor owner"; // Note this is an int for space concerns
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+
+ State getState(Address addr) {
+ if (directory.isPresent(addr)) {
+ return directory[addr].DirectoryState;
+ }
+ return State:NP;
+ }
+
+ string getDirStateStr(Address addr) {
+ return Directory_State_to_string(getState(addr));
+ }
+
+ string getRequestTypeStr(CoherenceRequestType type) {
+ return CoherenceRequestType_to_string(type);
+ }
+
+ void setState(Address addr, State state) {
+ if (directory.isPresent(addr)) {
+ DEBUG_EXPR(addr);
+ DEBUG_EXPR(directory[addr].DirectoryState);
+ directory[addr].DirectoryState := state;
+ DEBUG_EXPR(directory[addr].DirectoryState);
+ DEBUG_EXPR(state);
+ }
+ }
+
+ // ** OUT_PORTS **
+ out_port(forwardedRequestNetwork_out, RequestMsg, forwardedRequestFromDir);
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+ out_port(ownRequestQueue_out, RequestMsg, requestToDir);
+
+ // ** IN_PORTS **
+ in_port(dummyTo0_in, RequestMsg, dummyTo0) {
+ if (dummyTo0_in.isReady()) {
+ peek(dummyTo0_in, RequestMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.RequestorMachId);
+ }
+ error("dummyTo0 port should not be used");
+ }
+ }
+ in_port(dummyTo2_in, RequestMsg, dummyTo2) {
+ if (dummyTo2_in.isReady()) {
+ peek(dummyTo2_in, RequestMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.RequestorMachId);
+ }
+ error("dummyTo2 port should not be used");
+ }
+ }
+
+ in_port(dummyTo3_in, RequestMsg, dummyTo3) {
+ if (dummyTo3_in.isReady()) {
+ peek(dummyTo3_in, RequestMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.RequestorMachId);
+ }
+ error("dummyTo3 port should not be used");
+ }
+ }
+
+ in_port(finalAckNetwork_in, ResponseMsg, finalAckToDir){
+ if(finalAckNetwork_in.isReady()){
+ peek(finalAckNetwork_in, ResponseMsg){
+ assert(in_msg.Destination.isElement(machineID));
+ if(in_msg.Type == CoherenceResponseType:FINALACK){
+ trigger(Event:FinalAck, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(requestNetwork_in, RequestMsg, requestToDir) {
+ if (requestNetwork_in.isReady()) {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
+ trigger(Event:GET_INSTR, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ if(directory[in_msg.Address].DirOwner == false &&
+ L2CacheMachIDToChipID(in_msg.RequestorMachId) == directory[in_msg.Address].ProcOwner) {
+ trigger(Event:GETX_Owner, in_msg.Address);
+ } else {
+ trigger(Event:GETX_NotOwner, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ if (directory[in_msg.Address].DirOwner == false &&
+ L2CacheMachIDToChipID(in_msg.RequestorMachId) == directory[in_msg.Address].ProcOwner) {
+ trigger(Event:PUTX_Owner, in_msg.Address);
+ } else {
+ trigger(Event:PUTX_NotOwner, in_msg.Address);
+ }
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+
+ // Actions
+
+ // a_addRequestorToSharers
+
+ action(a_addRequestorToSharers, "a", desc="Add requestor to list of sharers") {
+ peek(requestNetwork_in, RequestMsg) {
+ directory[address].Sharers.add(L2CacheMachIDToChipID(in_msg.RequestorMachId));
+ DEBUG_EXPR(directory[address].Sharers);
+ }
+ }
+
+ // b_dataToRequestor
+
+ action(b_dataToRequestor, "b", desc="Send data to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.SenderMachId := machineID;
+ if(in_msg.Type == CoherenceRequestType:GETX) {
+ DEBUG_EXPR(directory[address].Sharers);
+ DEBUG_EXPR(directory[address].Sharers.count());
+ out_msg.NumPendingExtAcks := directory[address].Sharers.count();
+ } else {
+ out_msg.NumPendingExtAcks := 0; // don't need to send pending ack count to GETS requestor
+ }
+ out_msg.Destination.add(in_msg.RequestorMachId);
+ out_msg.DataBlk := directory[address].DataBlk;
+ DEBUG_EXPR(out_msg.Address);
+ DEBUG_EXPR(out_msg.DataBlk);
+ DEBUG_EXPR(out_msg.NumPendingExtAcks);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ // d_forwardRequestToOwner
+
+ action(d_forwardRequestToOwner, "d", desc="Forward request to owner") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.RequestorMachId := in_msg.RequestorMachId;
+ out_msg.Destination.add(map_L2ChipId_to_L2Cache(out_msg.Address, directory[address].ProcOwner));
+ DEBUG_EXPR(out_msg.Destination);
+
+ if(in_msg.Type == CoherenceRequestType:GETX) {
+ out_msg.NumPendingExtAcks := directory[address].Sharers.count();
+ } else {
+ out_msg.NumPendingExtAcks := 0; // don't need to send pending ack count to GETS requestor
+ }
+ out_msg.MessageSize := MessageSizeType:Control;
+ DEBUG_EXPR(out_msg.Address);
+ DEBUG_EXPR(out_msg.NumPendingExtAcks);
+ DEBUG_EXPR(out_msg.Destination);
+ }
+ }
+ }
+
+ action(f_setOwnerToRequestor, "f", desc="Set owner equal to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ directory[address].ProcOwner := L2CacheMachIDToChipID(in_msg.RequestorMachId);
+ directory[address].DirOwner := false;
+ }
+ DEBUG_EXPR(directory[address].ProcOwner);
+ }
+
+ action(g_clearSharers, "g", desc="Clear list of sharers") {
+ directory[address].Sharers.clear();
+ }
+
+ // currently done via multicast message
+
+ action(h_invToSharers, "h", desc="Send INVs to all sharers") {
+ peek(requestNetwork_in, RequestMsg) {
+ DEBUG_EXPR(directory[address].Sharers.count());
+ if(directory[address].Sharers.count() != 0){
+ enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.RequestorMachId := in_msg.RequestorMachId;
+ DEBUG_EXPR(directory[address].Sharers);
+ out_msg.Destination := getMultiStaticL2BankNetDest(address, directory[address].Sharers);
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+ DEBUG_EXPR(directory[address].Sharers);
+ }
+
+ action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
+ profileMsgDelay(1, requestNetwork_in.dequeue_getDelayCycles());
+ }
+
+ action(l_writeRequestDataToMemory, "l", desc="Write PUTX/DWN data to memory") {
+ peek(requestNetwork_in, RequestMsg) {
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ action(n_writebackAckToRequestor, "n", desc="Send WB_ack to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ // This needs to be DIRECTORY_LATENCY to keep the queue fifo
+ enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:WB_ACK;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination.add(in_msg.RequestorMachId);
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+
+ action(m_forwardExclusiveRequestToOwner, "m", desc="Send EXE_ack to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ // This needs to be DIRECTORY_LATENCY to keep the queue fifo
+ enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:EXE_ACK;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination.add(in_msg.RequestorMachId);
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+
+ action(uu_profile, "u/", desc="Profile this transition.") {
+ peek(requestNetwork_in, RequestMsg) {
+ profile_request(in_msg.L1CacheStateStr, in_msg.L2CacheStateStr, getDirStateStr(address), getRequestTypeStr(in_msg.Type));
+ }
+ }
+
+ action(p_clearOwner, "p", desc="Clear owner") {
+ directory[address].DirOwner := true; // set owner equal to dir
+ }
+
+ action(r_addOwnerToSharers, "r", desc="Add owner to list of sharers") {
+ DEBUG_EXPR(directory[address].ProcOwner);
+ directory[address].Sharers.add(directory[address].ProcOwner);
+ DEBUG_EXPR(directory[address].Sharers);
+ }
+
+ action(t_removeOwnerFromSharers, "t", desc="Remove owner from list of sharers") {
+ DEBUG_EXPR(directory[address].ProcOwner);
+ directory[address].Sharers.remove(directory[address].ProcOwner);
+ DEBUG_EXPR(directory[address].Sharers);
+ }
+
+ action(u_removeRequestorFromSharers, "u", desc="Remove requestor from list of sharers") {
+ peek(requestNetwork_in, RequestMsg) {
+ DEBUG_EXPR(in_msg.RequestorMachId);
+ directory[address].Sharers.remove(L2CacheMachIDToChipID(in_msg.RequestorMachId));
+ DEBUG_EXPR(directory[address].Sharers);
+ }
+ }
+
+ action(x_recycleRequest, "x", desc=""){
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(ownRequestQueue_out, RequestMsg, latency="RECYCLE_LATENCY"){
+ out_msg := in_msg;
+ }
+ }
+ }
+
+ action(hh_popFinalAckQueue, "\h", desc=""){
+ profileMsgDelay(4, finalAckNetwork_in.dequeue_getDelayCycles());
+ }
+
+ //action(z_stall, "z", desc=""){
+ //}
+
+ // TRANSITIONS
+
+ transition({OM,MM}, FinalAck, M){
+ hh_popFinalAckQueue;
+ }
+ transition({OO,MO}, FinalAck, O){
+ hh_popFinalAckQueue;
+ }
+
+ transition({OO, OM, MO, MM}, {GETS, GET_INSTR, GETX_Owner, GETX_NotOwner, PUTX_Owner}){
+ x_recycleRequest;
+ j_popIncomingRequestQueue;
+ // z_stall;
+ }
+
+ // ---------------------------
+
+ transition({NP, I, S, M, O, OO, OM, MO, MM}, PUTX_NotOwner) {
+ uu_profile;
+ n_writebackAckToRequestor;
+ j_popIncomingRequestQueue;
+ }
+
+ // Transitions from Idle
+ transition({NP,I}, {GETS,GET_INSTR}, S) {
+ uu_profile;
+ a_addRequestorToSharers;
+ b_dataToRequestor;
+ j_popIncomingRequestQueue;
+ }
+
+ transition({NP,I}, GETX_NotOwner, M) {
+ uu_profile;
+ f_setOwnerToRequestor;
+ b_dataToRequestor;
+ j_popIncomingRequestQueue;
+ }
+
+ // Transitions from Shared
+ transition(S, {GETS,GET_INSTR}) {
+ uu_profile;
+ a_addRequestorToSharers;
+ b_dataToRequestor;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(S, GETX_NotOwner, M) {
+ uu_profile;
+ u_removeRequestorFromSharers;
+ b_dataToRequestor;
+ f_setOwnerToRequestor;
+ h_invToSharers;
+ g_clearSharers;
+ j_popIncomingRequestQueue;
+ }
+
+ // Transitions from Owned
+ transition(O, {GETS,GET_INSTR}, OO) {
+ uu_profile;
+ a_addRequestorToSharers;
+ d_forwardRequestToOwner;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(O, {GETX_NotOwner, GETX_Owner}, OM) {
+ uu_profile;
+ u_removeRequestorFromSharers;
+ t_removeOwnerFromSharers;
+ d_forwardRequestToOwner;
+ f_setOwnerToRequestor;
+ h_invToSharers;
+ g_clearSharers;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(O, PUTX_Owner, S) {
+ uu_profile;
+ u_removeRequestorFromSharers;
+ l_writeRequestDataToMemory;
+ n_writebackAckToRequestor;
+ p_clearOwner;
+ j_popIncomingRequestQueue;
+ }
+
+
+ // Transitions from Modified
+ transition(M, {GETS,GET_INSTR}, MO) {
+ uu_profile;
+ a_addRequestorToSharers;
+ r_addOwnerToSharers;
+ d_forwardRequestToOwner;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(M, GETX_NotOwner, MM) {
+ uu_profile;
+ d_forwardRequestToOwner;
+ f_setOwnerToRequestor;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(M, GETX_Owner) {
+ uu_profile;
+ m_forwardExclusiveRequestToOwner;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(M, PUTX_Owner, I) {
+ uu_profile;
+ l_writeRequestDataToMemory;
+ n_writebackAckToRequestor;
+ p_clearOwner;
+ j_popIncomingRequestQueue;
+ }
+}
diff --git a/src/mem/protocol/MSI_MOSI_CMP_directory-msg.sm b/src/mem/protocol/MSI_MOSI_CMP_directory-msg.sm
new file mode 100644
index 000000000..80ab0f246
--- /dev/null
+++ b/src/mem/protocol/MSI_MOSI_CMP_directory-msg.sm
@@ -0,0 +1,115 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ GETX, desc="Get eXclusive";
+ UPGRADE, desc="UPGRADE to exclusive";
+ GETS, desc="Get Shared";
+ GET_INSTR, desc="Get Instruction";
+ PUTX, desc="Put eXclusive";
+ PUTS, desc="Put Shared";
+ INV, desc="INValidate";
+ INV_S, desc="INValidate the shared version";
+ L1_DG, desc="L1 cache DownGrade";
+ WB_ACK, desc="Write Back ACKnowledgment";
+ EXE_ACK, desc="EXclusivE ACKnowledgment";
+}
+
+// CoherenceResponseType
+enumeration(CoherenceResponseType, desc="...") {
+ ACK, desc="ACKnowledgment";
+ INV_ACK, desc="INValidation ACKnowledgment";
+ DG_ACK, desc="DownGrade ACKnowledgment";
+ NACK, desc="Negative ACKnowledgment";
+ DATA, desc="Data";
+ DATA_S, desc="Data to L1 cache, then imediately go to shared state";
+ DATA_I, desc="Data to L1 cache, then imediately go to inv state";
+ FINALACK, desc="";
+}
+
+// RequestMsg
+structure(RequestMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ AccessModeType AccessMode, desc="user/supervisor access type";
+ MachineID RequestorMachId, desc="What component request";
+ NetDest Destination, desc="What components receive the request, includes MachineType and num";
+ DataBlock DataBlk, desc="Data for the cache line (if PUTX)";
+ int NumPendingExtAcks, desc="Number of acks to wait for"; // Needed for forwarded responses only
+ MessageSizeType MessageSize, desc="size category of the message";
+ string L1CacheStateStr, desc="describes L1 cache block state";
+ string L2CacheStateStr, desc="describes L2 cache block state";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+}
+
+// ResponseMsg
+structure(ResponseMsg, desc="...", interface="NetworkMessage") {
+ Address Address, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
+ MachineID SenderMachId, desc="What component sent the data";
+ NetDest Destination, desc="Node to whom the data is sent";
+ DataBlock DataBlk, desc="data for the cache line";
+ int NumPendingExtAcks, desc="Number of acks to wait for";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+GenericRequestType convertToGenericType(CoherenceRequestType type) {
+ if(type == CoherenceRequestType:PUTX) {
+ return GenericRequestType:PUTX;
+ } else if(type == CoherenceRequestType:GETS) {
+ return GenericRequestType:GETS;
+ } else if(type == CoherenceRequestType:GET_INSTR) {
+ return GenericRequestType:GET_INSTR;
+ } else if(type == CoherenceRequestType:GETX) {
+ return GenericRequestType:GETX;
+ } else if(type == CoherenceRequestType:UPGRADE) {
+ return GenericRequestType:UPGRADE;
+ } else if(type == CoherenceRequestType:PUTS) {
+ return GenericRequestType:PUTS;
+ } else if(type == CoherenceRequestType:INV) {
+ return GenericRequestType:INV;
+ } else if(type == CoherenceRequestType:INV_S) {
+ return GenericRequestType:INV_S;
+ } else if(type == CoherenceRequestType:L1_DG) {
+ return GenericRequestType:DOWNGRADE;
+ } else if(type == CoherenceRequestType:WB_ACK) {
+ return GenericRequestType:WB_ACK;
+ } else if(type == CoherenceRequestType:EXE_ACK) {
+ return GenericRequestType:EXE_ACK;
+ } else {
+ DEBUG_EXPR(type);
+ error("invalid CoherenceRequestType");
+ }
+}
diff --git a/src/mem/protocol/MSI_MOSI_CMP_directory.slicc b/src/mem/protocol/MSI_MOSI_CMP_directory.slicc
new file mode 100644
index 000000000..8960b6486
--- /dev/null
+++ b/src/mem/protocol/MSI_MOSI_CMP_directory.slicc
@@ -0,0 +1,8 @@
+# protocol briefly described in
+# doc/MSI_MOSI_CMP_directory_2level-protocol-description.txt
+
+MSI_MOSI_CMP_directory-msg.sm
+MSI_MOSI_CMP_directory-L1cache.sm
+MSI_MOSI_CMP_directory-L2cache.sm
+MSI_MOSI_CMP_directory-dir.sm
+standard_CMP-protocol.sm
diff --git a/src/mem/protocol/RubySlicc_ComponentMapping.sm b/src/mem/protocol/RubySlicc_ComponentMapping.sm
new file mode 100644
index 000000000..0c205ac22
--- /dev/null
+++ b/src/mem/protocol/RubySlicc_ComponentMapping.sm
@@ -0,0 +1,62 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Mapping functions
+
+// NodeID map_address_to_node(Address addr);
+MachineID map_Address_to_Directory(Address addr);
+NodeID map_Address_to_DirectoryNode(Address addr);
+MachineID map_Address_to_CentralArbiterNode(Address addr);
+NodeID oldmap_L1RubyNode_to_L2Cache(Address addr, NodeID L1RubyNode);
+MachineID map_L1CacheMachId_to_L2Cache(Address addr, MachineID L1CacheMachId);
+MachineID map_L2ChipId_to_L2Cache(Address addr, NodeID L2ChipId);
+// MachineID map_L1RubyNode_to_Arb(NodeID L1RubyNode);
+
+MachineID getL1MachineID(NodeID L1RubyNode);
+NodeID getChipID(MachineID L2machID);
+MachineID getCollectorDest(MachineID L1machID);
+MachineID getCollectorL1Cache(MachineID colID);
+NetDest getMultiStaticL2BankNetDest(Address addr, Set sharers);
+bool isL1OnChip(MachineID L1machID, NodeID L2NodeID);
+bool isL2OnChip(MachineID L2machID, NodeID L2NodeID);
+
+int getNumBanksInBankSet();
+NodeID machineIDToNodeID(MachineID machID);
+NodeID machineIDToVersion(MachineID machID);
+MachineType machineIDToMachineType(MachineID machID);
+NodeID L1CacheMachIDToProcessorNum(MachineID machID);
+NodeID L2CacheMachIDToChipID(MachineID machID);
+Set getOtherLocalL1IDs(MachineID L1);
+Set getLocalL1IDs(MachineID L1);
+Set getExternalL1IDs(MachineID L1);
+NetDest getAllPertinentL2Banks(Address addr);
+bool isLocalProcessor(MachineID thisId, MachineID tarId);
+
+GenericMachineType ConvertMachToGenericMach(MachineType machType);
+
diff --git a/src/mem/protocol/RubySlicc_Defines.sm b/src/mem/protocol/RubySlicc_Defines.sm
new file mode 100644
index 000000000..9bafebf10
--- /dev/null
+++ b/src/mem/protocol/RubySlicc_Defines.sm
@@ -0,0 +1,34 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Hack, no node object since base class has them
+NodeID id, no_chip_object="yes", no_vector="yes", abstract_chip_ptr="true";
+NodeID version, no_chip_object="yes", no_vector="yes", abstract_chip_ptr="true";
+MachineID machineID, no_chip_object="yes", no_vector="yes", abstract_chip_ptr="true";
+
diff --git a/src/mem/protocol/RubySlicc_Exports.sm b/src/mem/protocol/RubySlicc_Exports.sm
new file mode 100644
index 000000000..36622cb40
--- /dev/null
+++ b/src/mem/protocol/RubySlicc_Exports.sm
@@ -0,0 +1,339 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+// defines
+external_type(int, primitive="yes", default="0");
+external_type(bool, primitive="yes", default="false");
+external_type(string, primitive="yes");
+external_type(uint64, primitive="yes");
+external_type(Time, primitive="yes", default="0");
+external_type(Address);
+
+
+// Declarations of external types that are common to all protocols
+
+// AccessPermission
+enumeration(AccessPermission, desc="...", default="AccessPermission_NotPresent") {
+ Busy, desc="No Read or Write";
+ Read_Only, desc="Read Only";
+ Read_Write, desc="Read/Write";
+ Invalid, desc="Invalid";
+ NotPresent, desc="NotPresent";
+ OnHold, desc="Holding a place in dnuca cache";
+ ReadUpgradingToWrite, desc="Read only, but trying to get Read/Write";
+ Stale, desc="local L1 has a modified copy, assume L2 copy is stale data";
+}
+
+// TesterStatus
+enumeration(TesterStatus, desc="...") {
+ Idle, desc="Idle";
+ Action_Pending, desc="Action Pending";
+ Ready, desc="Ready";
+ Check_Pending, desc="Check Pending";
+}
+
+// SpecifiedGeneratorTypes
+enumeration(SpecifiedGeneratorType, desc="...") {
+ DetermGETXGenerator, desc="deterministic GETX Tester";
+ DetermInvGenerator, desc="deterministic all shared then invalidate Tester";
+ DetermSeriesGETSGenerator, desc="deterministic Series of GETSs Tester for prefetcher tuning";
+}
+
+// RequestGeneratorStatus
+enumeration(RequestGeneratorStatus, desc="...") {
+ Thinking, desc="Doing work between release and next acquire";
+ Test_Pending, desc="Test pending";
+ Before_Swap, desc="We're about to perform the swap";
+ Swap_Pending, desc="The swap used for test-and-send is pending";
+ Holding, desc="We are holding the lock performing the critical section";
+ Release_Pending, desc="The write for the release is pending";
+ Done, desc="Done, waiting for end of run";
+}
+
+// DetermGETXGeneratorStatus
+enumeration(DetermGETXGeneratorStatus, desc="...") {
+ Thinking, desc="Doing work before next action";
+ Store_Pending, desc="Store pending";
+ Done, desc="Done, waiting for end of run";
+}
+
+// DetermGETXGeneratorStatus
+enumeration(DetermInvGeneratorStatus, desc="...") {
+ Thinking, desc="Doing work before next action";
+ Store_Pending, desc="Store pending";
+ Load_Complete, desc="Load complete";
+ Load_Pending, desc="Load pending";
+ Done, desc="Done, waiting for end of run";
+}
+
+// DetermSeriesGETSGeneratorStatus
+enumeration(DetermSeriesGETSGeneratorStatus, desc="...") {
+ Thinking, desc="Doing work before next action";
+ Load_Pending, desc="Load pending";
+ Done, desc="Done, waiting for end of run";
+}
+
+// LockStatus
+enumeration(LockStatus, desc="...") {
+ Unlocked, desc="Lock is not held";
+ Locked, desc="Lock is held";
+}
+
+// SequencerStatus
+enumeration(SequencerStatus, desc="...") {
+ Idle, desc="Idle";
+ Pending, desc="Pending";
+}
+
+enumeration(TransitionResult, desc="...") {
+ Valid, desc="Valid transition";
+ ResourceStall, desc="Stalled due to insufficient resources";
+ ProtocolStall, desc="Protocol specified stall";
+}
+
+// CacheRequestType
+enumeration(CacheRequestType, desc="...", default="CacheRequestType_NULL") {
+ LD, desc="Load";
+ ST, desc="Store";
+ ATOMIC, desc="Atomic Load/Store";
+ IFETCH, desc="Instruction fetch";
+ IO, desc="I/O";
+ REPLACEMENT, desc="Replacement";
+ COMMIT, desc="Commit version";
+ LD_XACT, desc="Transactional Load";
+ LDX_XACT, desc="Transactional Load-Intend-To-Modify";
+ ST_XACT, desc="Transactional Store";
+ BEGIN_XACT, desc="Begin Transaction";
+ COMMIT_XACT, desc="Commit Transaction";
+ ABORT_XACT, desc="Abort Transaction";
+ NULL, desc="Invalid request type";
+}
+
+enumeration(GenericRequestType, desc="...", default="GenericRequestType_NULL") {
+ GETS, desc="gets request";
+ GET_INSTR, desc="get instr request";
+ GETX, desc="getx request";
+ UPGRADE, desc="upgrade request";
+ DOWNGRADE, desc="downgrade request";
+ INV, desc="invalidate request";
+ INV_S, desc="invalidate shared copy request";
+ PUTS, desc="puts request";
+ PUTO, desc="puto request";
+ PUTX, desc="putx request";
+ L2_PF, desc="L2 prefetch";
+ LD, desc="Load";
+ ST, desc="Store";
+ ATOMIC, desc="Atomic Load/Store";
+ IFETCH, desc="Instruction fetch";
+ IO, desc="I/O";
+ NACK, desc="Nack";
+ REPLACEMENT, desc="Replacement";
+ WB_ACK, desc="WriteBack ack";
+ EXE_ACK, desc="Execlusive ack";
+ COMMIT, desc="Commit version";
+ LD_XACT, desc="Transactional Load";
+ LDX_XACT, desc="Transactional Load-Intend-Modify";
+ ST_XACT, desc="Transactional Store";
+ BEGIN_XACT, desc="Begin Transaction";
+ COMMIT_XACT, desc="Commit Transaction";
+ ABORT_XACT, desc="Abort Transaction";
+ NULL, desc="null request type";
+}
+
+enumeration(GenericMachineType, desc="...", default="GenericMachineType_NULL") {
+ L1Cache, desc="L1 Cache Mach";
+ L2Cache, desc="L2 Cache Mach";
+ L3Cache, desc="L3 Cache Mach";
+ Directory, desc="Directory Mach";
+ Collector, desc="Collector Mach";
+ L1Cache_wCC, desc="L1 Cache Mach with Cache Coherence (used for miss latency profile)";
+ L2Cache_wCC, desc="L1 Cache Mach with Cache Coherence (used for miss latency profile)";
+ NULL, desc="null mach type";
+}
+
+// MessageSizeType
+enumeration(MessageSizeType, default="MessageSizeType_Undefined", desc="...") {
+ Undefined, desc="Undefined";
+ Control, desc="Control Message";
+ Data, desc="Data Message";
+ Request_Control, desc="Request";
+ Reissue_Control, desc="Reissued request";
+ Response_Data, desc="data response";
+ ResponseL2hit_Data, desc="data response";
+ ResponseLocal_Data, desc="data response";
+ Response_Control, desc="non-data response";
+ Writeback_Data, desc="Writeback data";
+ Writeback_Control, desc="Writeback control";
+ Forwarded_Control, desc="Forwarded control";
+ Invalidate_Control, desc="Invalidate control";
+ Unblock_Control, desc="Unblock control";
+ Persistent_Control, desc="Persistent request activation messages";
+ Completion_Control, desc="Completion messages";
+}
+
+// AccessType
+enumeration(AccessType, desc="...") {
+ Read, desc="Reading from cache";
+ Write, desc="Writing to cache";
+}
+
+// AccessModeType
+enumeration(AccessModeType, default="AccessModeType_UserMode", desc="...") {
+ SupervisorMode, desc="Supervisor mode";
+ UserMode, desc="User mode";
+}
+
+enumeration(PrefetchBit, default="PrefetchBit_No", desc="...") {
+ No, desc="No, not a prefetch";
+ Yes, desc="Yes, a prefetch";
+ L1_HW, desc="This is a L1 hardware prefetch";
+ L2_HW, desc="This is a L2 hardware prefetch";
+}
+
+// CacheMsg
+structure(CacheMsg, desc="...", interface="Message") {
+ Address Address, desc="Line address for this request";
+ Address PhysicalAddress, desc="Physical address for this request";
+ CacheRequestType Type, desc="Type of request (LD, ST, etc)";
+ Address ProgramCounter, desc="Program counter of the instruction that caused the miss";
+ AccessModeType AccessMode, desc="user/supervisor access type";
+ int Size, desc="size in bytes of access";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+ // following field only used for MVC
+ int Version, desc="Version associated with this request";
+ // trans mem fields
+ //bool Aborted, desc="This flag is set if the request is from an aborted xact.";
+ Address LogicalAddress, desc="Virtual address for this request";
+ //int TransactionLevel, desc="Transaction Level of this request";
+ //uint64 SequenceNumber, desc="Sequence number of this request";
+ int ThreadID, desc="The SMT thread that initiated this request";
+ uint64 Timestamp, desc="The transaction timestamp of this request. Last commit time if request is non-transactional";
+ bool ExposedAction, desc="Is this request part of an exposed action";
+ //uint64 RequestTime, desc="The cycle in which this request was issued";
+}
+
+
+
+
+// MaskPredictorType
+enumeration(MaskPredictorType, "MaskPredictorType_Undefined", desc="...") {
+ Undefined, desc="Undefined";
+ AlwaysUnicast, desc="AlwaysUnicast";
+ TokenD, desc="TokenD";
+ AlwaysBroadcast, desc="AlwaysBroadcast";
+ TokenB, desc="TokenB";
+ TokenNull, desc="TokenNull";
+ Random, desc="Random";
+ Pairwise, desc="Pairwise";
+ Owner, desc="Owner";
+ BroadcastIfShared, desc="Broadcast-If-Shared";
+ BroadcastCounter, desc="Broadcast Counter";
+ Group, desc="Group";
+ Counter, desc="Counter";
+ StickySpatial, desc="StickySpatial";
+ OwnerBroadcast, desc="Owner/Broadcast Hybrid";
+ OwnerGroup, desc="Owner/Group Hybrid";
+ OwnerBroadcastMod, desc="Owner/Broadcast Hybrid-Mod";
+ OwnerGroupMod, desc="Owner/Group Hybrid-Mod";
+ LastNMasks, desc="Last N Masks";
+ BandwidthAdaptive, desc="Bandwidth Adaptive";
+}
+
+// MaskPredictorIndex
+enumeration(MaskPredictorIndex, "MaskPredictorIndex_Undefined", desc="...") {
+ Undefined, desc="Undefined";
+ DataBlock, desc="Data Block";
+ PC, desc="Program Counter";
+}
+
+// MaskPredictorTraining
+enumeration(MaskPredictorTraining, "MaskPredictorTraining_Undefined", desc="...") {
+ Undefined, desc="Undefined";
+ None, desc="None";
+ Implicit, desc="Implicit";
+ Explicit, desc="Explicit";
+ Both, desc="Both";
+}
+
+// Network Topologies
+enumeration(TopologyType, desc="...") {
+ CROSSBAR, desc="One node per chip, single switch crossbar";
+ HIERARCHICAL_SWITCH, desc="One node per chip, totally ordered hierarchical tree switched network";
+ TORUS_2D, desc="One node per chip, 2D torus";
+ PT_TO_PT, desc="One node per chip, Point to Point Network";
+ FILE_SPECIFIED, desc="described by the file NETWORK_FILE";
+}
+
+// DNUCA AllocationStrategy
+enumeration(AllocationStrategy, desc="...") {
+ InMiddle, desc="";
+ InInvCorners, desc="";
+ InSharedSides, desc="";
+ StaticDist, desc="";
+ RandomBank, desc="";
+ FrequencyBank, desc="";
+ FrequencyBlock, desc="";
+ LRUBlock, desc="";
+}
+
+// DNUCA SearchMechanism
+enumeration(SearchMechanism, desc="...") {
+ Perfect, desc="";
+ PartialTag, desc="";
+ BloomFilter, desc="";
+ Random, desc="";
+ None, desc="";
+}
+
+// DNUCA link type
+enumeration(LinkType, desc="...") {
+ RC_1500UM, desc="";
+ RC_2500UM, desc="";
+ TL_9000UM, desc="";
+ TL_11000UM, desc="";
+ TL_13000UM, desc="";
+ NO_ENERGY, desc="";
+ NULL, desc="";
+}
+
+// transient request type
+enumeration(TransientRequestType, desc="...", default="TransientRequestType_Undefined") {
+ Undefined, desc="";
+ OffChip, desc="";
+ OnChip, desc="";
+ LocalTransient, desc="";
+}
+
+
+
diff --git a/src/mem/protocol/RubySlicc_MemControl.sm b/src/mem/protocol/RubySlicc_MemControl.sm
new file mode 100644
index 000000000..a51bf09d4
--- /dev/null
+++ b/src/mem/protocol/RubySlicc_MemControl.sm
@@ -0,0 +1,67 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+// MemoryRequestType used in MemoryMsg
+
+enumeration(MemoryRequestType, desc="...") {
+
+ // Southbound request: from directory to memory cache
+ // or directory to memory or memory cache to memory
+ MEMORY_READ, desc="Read request to memory";
+ MEMORY_WB, desc="Write back data to memory";
+
+ // response from memory to directory
+ // (These are currently unused!)
+ MEMORY_DATA, desc="Data read from memory";
+ MEMORY_ACK, desc="Write to memory acknowledgement";
+}
+
+
+// Message to and from Memory Control
+
+structure(MemoryMsg, desc="...", interface="Message") {
+ Address Address, desc="Physical address for this request";
+ MemoryRequestType Type, desc="Type of memory request (MEMORY_READ or MEMORY_WB)";
+ MachineID Sender, desc="What component sent the data";
+ MachineID OriginalRequestorMachId, desc="What component originally requested";
+ DataBlock DataBlk, desc="Data to writeback";
+ MessageSizeType MessageSize, desc="size category of the message";
+ // Not all fields used by all protocols:
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+ bool ReadX, desc="Exclusive";
+ int Acks, desc="How many acks to expect";
+
+
+}
+
diff --git a/src/mem/protocol/RubySlicc_Profiler.sm b/src/mem/protocol/RubySlicc_Profiler.sm
new file mode 100644
index 000000000..7a7fbdae1
--- /dev/null
+++ b/src/mem/protocol/RubySlicc_Profiler.sm
@@ -0,0 +1,64 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Profiler function
+
+void profileStore(NodeID node, bool needCLB);
+void profileCacheCLBsize(int size, int numStaleI);
+void profileMemoryCLBsize(int size, int numStaleI);
+
+// used by 2level exclusive cache protocols
+void profile_miss(CacheMsg msg, NodeID id);
+
+// used by non-fast path protocols
+void profile_L1Cache_miss(CacheMsg msg, NodeID l1cacheID);
+
+// used by CMP protocols
+void profile_L2Cache_miss(GenericRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID l2cacheID);
+void profile_request(string L1CacheStateStr, string L2CacheStateStr, string directoryStateStr, string requestTypeStr);
+void profileMessageReordering(bool wasReordered);
+void profileMessageReorderingByNetwork(int vnet, bool wasReordered);
+void profile_token_retry(Address addr, AccessType type, int count);
+void profile_persistent_prediction(Address addr, AccessType type);
+void profile_filter_action(int act);
+void profile_multicast_retry(Address addr, int count);
+void profile_outstanding_request(int outstanding);
+void profile_outstanding_persistent_request(int outstanding);
+// void profile_overlapping_persistent_request(int overlapping);
+void profile_average_latency_estimate(int latency);
+
+// profile the total message delay of a message across a virtual network
+void profileMsgDelay(int virtualNetwork, int delayCycles);
+
+// used by transactional-memory protocols
+void profile_transaction(int numStores);
+void profile_trans_wb();
+void profileOverflow(Address addr, MachineID mach);
+
+
diff --git a/src/mem/protocol/RubySlicc_Types.sm b/src/mem/protocol/RubySlicc_Types.sm
new file mode 100644
index 000000000..a948322dd
--- /dev/null
+++ b/src/mem/protocol/RubySlicc_Types.sm
@@ -0,0 +1,168 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// External Types
+
+external_type(DataBlock, desc="..."){
+ void clear();
+}
+
+external_type(MessageBuffer, buffer="yes", inport="yes", outport="yes");
+
+external_type(OutPort, primitive="yes");
+
+external_type(InPort, primitive="yes") {
+ bool isReady();
+ void dequeue();
+ int dequeue_getDelayCycles();
+ void recycle();
+ bool isEmpty();
+}
+
+external_type(NodeID, default="0");
+external_type(MachineID);
+
+external_type(StoreBuffer);
+
+
+external_type(Set, non_obj="yes") {
+ void setSize(int);
+ void add(NodeID);
+ void addSet(Set);
+ void remove(NodeID);
+ void removeSet(Set);
+ void broadcast();
+ void addRandom();
+ void clear();
+ int count();
+ bool isElement(NodeID);
+ bool isEqual(Set);
+ bool isSuperset(Set);
+ bool intersectionIsEmpty(Set);
+ NodeID smallestElement();
+}
+
+external_type(NetDest, non_obj="yes") {
+ void setSize(int);
+ void setSize(int, int);
+ void add(NodeID);
+ void add(MachineID);
+ void addSet(Set);
+ void addNetDest(NetDest);
+ void setNetDest(MachineType, Set);
+ void remove(NodeID);
+ void remove(MachineID);
+ void removeSet(Set);
+ void removeNetDest(NetDest);
+ void broadcast();
+ void broadcast(MachineType);
+ void addRandom();
+ void clear();
+ Set toSet();
+ int count();
+ bool isElement(NodeID);
+ bool isElement(MachineID);
+ bool isSuperset(Set);
+ bool isSuperset(NetDest);
+ bool isEmpty();
+ bool intersectionIsEmpty(Set);
+ bool intersectionIsEmpty(NetDest);
+ MachineID smallestElement(MachineType);
+}
+
+external_type(PersistentTable) {
+ void persistentRequestLock(Address, MachineID, AccessType);
+ void persistentRequestUnlock(Address, MachineID);
+ bool okToIssueStarving(Address);
+ MachineID findSmallest(Address);
+ AccessType typeOfSmallest(Address);
+ void markEntries(Address);
+ bool isLocked(Address);
+ int countStarvingForAddress(Address);
+ int countReadStarvingForAddress(Address);
+}
+
+external_type(NodePersistentTable) {
+ void persistentRequestLock(Address, NodeID, AccessType);
+ void persistentRequestUnlock(Address, NodeID);
+ bool okToIssueStarving(Address);
+ NodeID findSmallest(Address);
+ AccessType typeOfSmallest(Address);
+ void markEntries(Address);
+ bool isLocked(Address);
+ int countStarvingForAddress(Address);
+ int countReadStarvingForAddress(Address);
+}
+
+external_type(Sequencer) {
+ void readCallback(Address, DataBlock, GenericMachineType, PrefetchBit, int);
+ void writeCallback(Address, DataBlock, GenericMachineType, PrefetchBit, int);
+ void readCallback(Address, DataBlock, GenericMachineType, PrefetchBit);
+ void writeCallback(Address, DataBlock, GenericMachineType, PrefetchBit);
+ void readCallback(Address, DataBlock);
+ void writeCallback(Address, DataBlock);
+ void readCallback(Address);
+ void writeCallback(Address);
+ void readCallbackAbort(Address, int);
+ void writeCallbackAbort(Address, int);
+ void readConflictCallback(Address);
+ void writeConflictCallback(Address);
+ void xactCallback(Address);
+ void updateCurrentVersion();
+ void updateLastCommittedVersion();
+ void systemRecovery();
+ void systemRestart();
+ void checkCoherence(Address);
+ void profileNack(Address, int, int, uint64);
+ void resetRequestTime(Address, int);
+ bool isReadAborted(Address, int);
+ bool isWriteAborted(Address, int);
+}
+
+external_type(TimerTable, inport="yes") {
+ bool isReady();
+ Address readyAddress();
+ void set(Address, int);
+ void unset(Address);
+ bool isSet(Address);
+}
+
+external_type(GenericBloomFilter) {
+
+ void clear(int);
+ void increment(Address, int);
+ void decrement(Address, int);
+ void set(Address, int);
+ void unset(Address, int);
+
+ bool isSet(Address, int);
+ int getCount(Address, int);
+}
+
+
diff --git a/src/mem/protocol/RubySlicc_Util.sm b/src/mem/protocol/RubySlicc_Util.sm
new file mode 100644
index 000000000..7f7ebf5ed
--- /dev/null
+++ b/src/mem/protocol/RubySlicc_Util.sm
@@ -0,0 +1,61 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Miscallaneous Functions
+
+void error(string msg);
+void assert(bool condition);
+int random(int number);
+Time get_time();
+Time zero_time();
+NodeID intToID(int nodenum);
+int IDToInt(NodeID id);
+int addressToInt(Address addr);
+int MessageSizeTypeToInt(MessageSizeType size_type);
+bool multicast_retry();
+int numberOfNodes();
+int numberOfL1CachePerChip();
+int getAddThenMod(int addend1, int addend2, int modulus);
+int time_to_int(Time time);
+Time getTimeModInt(Time time, int modulus);
+Time getTimePlusInt(Time addend1, int addend2);
+Time getTimeMinusTime(Time t1, Time t2);
+Time getPreviousDelayedCycles(Time t1, Time t2);
+void WARN_ERROR_TIME(Time time);
+void procProfileCoherenceRequest(NodeID node, bool needCLB);
+void dirProfileCoherenceRequest(NodeID node, bool needCLB);
+bool isPerfectProtocol();
+bool L1trainsPrefetcher();
+int max_tokens();
+int N_tokens();
+bool distributedPersistentEnabled();
+Address setOffset(Address addr, int offset);
+Address makeLineAddress(Address addr);
+
+
diff --git a/src/mem/protocol/RubySlicc_interfaces.slicc b/src/mem/protocol/RubySlicc_interfaces.slicc
new file mode 100644
index 000000000..280a84763
--- /dev/null
+++ b/src/mem/protocol/RubySlicc_interfaces.slicc
@@ -0,0 +1,7 @@
+RubySlicc_Exports.sm
+RubySlicc_Types.sm
+RubySlicc_Util.sm
+RubySlicc_ComponentMapping.sm
+RubySlicc_Profiler.sm
+RubySlicc_Defines.sm
+RubySlicc_MemControl.sm
diff --git a/src/mem/protocol/doc/MSI_MOSI_CMP_directory_2level-protocol-description.txt b/src/mem/protocol/doc/MSI_MOSI_CMP_directory_2level-protocol-description.txt
new file mode 100644
index 000000000..ef13e5cc1
--- /dev/null
+++ b/src/mem/protocol/doc/MSI_MOSI_CMP_directory_2level-protocol-description.txt
@@ -0,0 +1,49 @@
+MSI_dir_L1_MOSI_dir_L2_SNUCA_CMP
+---------------------------------------
+
+
+CMP System structure:
+
+A CMP (Chip MultiProcessor) System is composed of one or more CMP chips. A CMP chip consists of some number of processors, each with private L1 I+D caches and one shared L2 cache. The shared L2 cache can be sub-divided into banks where each bank as a separate cache controller. One global interconnect is defined that connects all the components in the system including the L1 caches, the L2 cache banks, and the memories/directories.
+
+High-level Protocol Description:
+
+- The L1 has 3 stable states: M, S, I
+
+- L2 maintains strict inclusion and has full knowledge of on-chip L1 sharers via individual bits in the cache tag. The stable
+states are below:
+
+ M,O,S,I : normal meanings
+ SS : L2 entry shared, also present in one or more L1s or ext L2s
+ SO : L2 entry owned, also present in one or more L1s or ext L2s
+ MT : L2 entry modified in a local L1. L2 copy is stale
+
+- The protocol has a strict on-chip hierarchy where the L1 caches only communicate with their on-chip L2 cache, while off-chip the L2 cache communicates with the directory as well as directly responding to the other L2 caches.
+
+High Level Event Description:
+
+On a L1 GETS miss -
+> The L2 controller will satisfy the miss immediately if in M, S, SS, or OS.
+> If the L2 controller is in MT, the L2 controller will issue a down-grade request to the on-chip L1 sharer. The L1 sharer will then downgrade to S and copy it's dirty block to the L2. The L2 will then forward that copy to the L1 GETS requestor.
+> If not present, the L2 will issue a GETS to the directory. The directory will either forward the request, or respond with data. If forwarded to another L2 controller, the L2 owner will respond with data directly to the L2 requestor which would then respond to the original L1 requestor.
+
+On a L1 GETX miss -
+> The L2 controller will satisfy the miss immediately if in M
+> If the L2 controller is in MT, the L2 controller will issue an invalidate request to the on-chip L1 sharer. The L1 sharer will then invalidate the block and writeback it's dirty block to the L2. The L2 will then forward that copy to the L1 GETX requestor.
+> If the L2 controller is in O, S, or I, the L2 controller will issue a GETX request to the directory. The L2 will wait to receive the data and the necessary number of acks before responding to the L1 GETX requestor with the data and exclusive permission.
+> If the L2 controller is in SS or SO, the L2 controller will issue a GETX request to the directory as well as invalidate requests to the on-chip L1 copies. The L2 will wait to receive the data and the necessary number of acks (both on and off chip) before responding to the L1 GETX requestor with the data and exclusive permission.
+
+Other minute details:
+
+ The L2 acknowledges all L1 replacements
+ The L1 acknowledges L2 invalidation requests
+ The L2 sends an ack to the directory on a 3-hop transfer
+ The forward request network (Dir -> L2 -> L1) is assumed to be pt-to-pt ordered
+ The L1 request network (L1 -> L2) is assumed to be pt-to-pt ordered
+ All other networks are unordered
+
+Ruby Implementation Details:
+
+- The new component network must be used with this protocol
+- Each ruby node contains a L1 cache controller, 0 or more L2 cache controllers, and 0 or more directories. There must be at least one directory in the system and at least one L2 cache controller per chip. The directories are statically partitioned by address across the entire system (SMP), while the L2Cache banks are statically partitioned by address across a single chip. There must be some power of 2 for each machine type.
+- BUG ALERT: There is a single pool of TBE Entries for the L2 Controller. Therefore it is possible to encounter a resource deadlock for these entries between satisfying L1 requests and directory requests.
diff --git a/src/mem/protocol/doc/MSI_dir_L1_MOSI_dir_L2_SNUCA_CMP-protocol-description.txt b/src/mem/protocol/doc/MSI_dir_L1_MOSI_dir_L2_SNUCA_CMP-protocol-description.txt
new file mode 100644
index 000000000..ef13e5cc1
--- /dev/null
+++ b/src/mem/protocol/doc/MSI_dir_L1_MOSI_dir_L2_SNUCA_CMP-protocol-description.txt
@@ -0,0 +1,49 @@
+MSI_dir_L1_MOSI_dir_L2_SNUCA_CMP
+---------------------------------------
+
+
+CMP System structure:
+
+A CMP (Chip MultiProcessor) System is composed of one or more CMP chips. A CMP chip consists of some number of processors, each with private L1 I+D caches and one shared L2 cache. The shared L2 cache can be sub-divided into banks where each bank as a separate cache controller. One global interconnect is defined that connects all the components in the system including the L1 caches, the L2 cache banks, and the memories/directories.
+
+High-level Protocol Description:
+
+- The L1 has 3 stable states: M, S, I
+
+- L2 maintains strict inclusion and has full knowledge of on-chip L1 sharers via individual bits in the cache tag. The stable
+states are below:
+
+ M,O,S,I : normal meanings
+ SS : L2 entry shared, also present in one or more L1s or ext L2s
+ SO : L2 entry owned, also present in one or more L1s or ext L2s
+ MT : L2 entry modified in a local L1. L2 copy is stale
+
+- The protocol has a strict on-chip hierarchy where the L1 caches only communicate with their on-chip L2 cache, while off-chip the L2 cache communicates with the directory as well as directly responding to the other L2 caches.
+
+High Level Event Description:
+
+On a L1 GETS miss -
+> The L2 controller will satisfy the miss immediately if in M, S, SS, or OS.
+> If the L2 controller is in MT, the L2 controller will issue a down-grade request to the on-chip L1 sharer. The L1 sharer will then downgrade to S and copy it's dirty block to the L2. The L2 will then forward that copy to the L1 GETS requestor.
+> If not present, the L2 will issue a GETS to the directory. The directory will either forward the request, or respond with data. If forwarded to another L2 controller, the L2 owner will respond with data directly to the L2 requestor which would then respond to the original L1 requestor.
+
+On a L1 GETX miss -
+> The L2 controller will satisfy the miss immediately if in M
+> If the L2 controller is in MT, the L2 controller will issue an invalidate request to the on-chip L1 sharer. The L1 sharer will then invalidate the block and writeback it's dirty block to the L2. The L2 will then forward that copy to the L1 GETX requestor.
+> If the L2 controller is in O, S, or I, the L2 controller will issue a GETX request to the directory. The L2 will wait to receive the data and the necessary number of acks before responding to the L1 GETX requestor with the data and exclusive permission.
+> If the L2 controller is in SS or SO, the L2 controller will issue a GETX request to the directory as well as invalidate requests to the on-chip L1 copies. The L2 will wait to receive the data and the necessary number of acks (both on and off chip) before responding to the L1 GETX requestor with the data and exclusive permission.
+
+Other minute details:
+
+ The L2 acknowledges all L1 replacements
+ The L1 acknowledges L2 invalidation requests
+ The L2 sends an ack to the directory on a 3-hop transfer
+ The forward request network (Dir -> L2 -> L1) is assumed to be pt-to-pt ordered
+ The L1 request network (L1 -> L2) is assumed to be pt-to-pt ordered
+ All other networks are unordered
+
+Ruby Implementation Details:
+
+- The new component network must be used with this protocol
+- Each ruby node contains a L1 cache controller, 0 or more L2 cache controllers, and 0 or more directories. There must be at least one directory in the system and at least one L2 cache controller per chip. The directories are statically partitioned by address across the entire system (SMP), while the L2Cache banks are statically partitioned by address across a single chip. There must be some power of 2 for each machine type.
+- BUG ALERT: There is a single pool of TBE Entries for the L2 Controller. Therefore it is possible to encounter a resource deadlock for these entries between satisfying L1 requests and directory requests.
diff --git a/src/mem/protocol/standard_1level_SMP-protocol.sm b/src/mem/protocol/standard_1level_SMP-protocol.sm
new file mode 100644
index 000000000..bfaca8466
--- /dev/null
+++ b/src/mem/protocol/standard_1level_SMP-protocol.sm
@@ -0,0 +1,39 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+// global protocol features
+global(Protocol, desc="Global properties of this protocol",
+ interface = "AbstractProtocol") {
+ bool TwoLevelCache := false;
+}
+
diff --git a/src/mem/protocol/standard_CMP-protocol.sm b/src/mem/protocol/standard_CMP-protocol.sm
new file mode 100644
index 000000000..dbd7e4ef5
--- /dev/null
+++ b/src/mem/protocol/standard_CMP-protocol.sm
@@ -0,0 +1,36 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// global protocol features
+global(Protocol, desc="Global properties of this protocol",
+ interface = "AbstractProtocol") {
+ bool TwoLevelCache := true;
+ bool CMP := true;
+}
+
diff --git a/src/mem/protocol/standard_SMP-protocol.sm b/src/mem/protocol/standard_SMP-protocol.sm
new file mode 100644
index 000000000..9a2c6ef39
--- /dev/null
+++ b/src/mem/protocol/standard_SMP-protocol.sm
@@ -0,0 +1,39 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+// global protocol features
+global(Protocol, desc="Global properties of this protocol",
+ interface = "AbstractProtocol") {
+ bool TwoLevelCache := true;
+}
+
diff --git a/src/mem/ruby/Decomissioning_note b/src/mem/ruby/Decomissioning_note
new file mode 100644
index 000000000..9b083d196
--- /dev/null
+++ b/src/mem/ruby/Decomissioning_note
@@ -0,0 +1,10 @@
+
+1. While decomissioning log_tm
+
+ a. I left lots of transaction related stuff ( Xact.... ) in commands.C
+ Made the minimal changes to compile it properly
+
+
+make[1]: *** No rule to make target `amd64-linux/generated/MI_example/obj/PartialAddressFilter.o', needed by `amd64-linux/generated/MI_example/bin/libruby.so
+
+
diff --git a/src/mem/ruby/FakeSimicsDataTypes.hh b/src/mem/ruby/FakeSimicsDataTypes.hh
new file mode 100644
index 000000000..b6fcda95c
--- /dev/null
+++ b/src/mem/ruby/FakeSimicsDataTypes.hh
@@ -0,0 +1,63 @@
+#ifndef FAKE_SIMICS_DATA_TYPES_H
+#define FAKE_SIMICS_DATA_TYPES_H
+
+typedef struct attr_value attr_value_t;
+
+typedef enum {
+ Sim_Val_Invalid = 0,
+ Sim_Val_String = 1,
+ Sim_Val_Integer = 2,
+ Sim_Val_Floating = 3,
+ Sim_Val_List = 4,
+ Sim_Val_Data = 5,
+ Sim_Val_Nil = 6,
+ Sim_Val_Object = 7
+} attr_kind_t;
+
+typedef struct attr_list attr_list_t;
+
+struct attr_list {
+ int size;
+ struct attr_value *vector;
+};
+
+struct attr_value {
+ attr_kind_t kind;
+ union {
+ const char *string; /* Sim_Val_String */
+ unsigned long long integer; /* Sim_Val_Integer */
+ double floating; /* Sim_Val_Floating */
+ void *object; /* Sim_Val_Object */
+ attr_list_t list; /* Sim_Val_List */
+ } u;
+};
+
+typedef enum {
+Sim_Set_Ok,
+Sim_Set_Need_Integer,
+Sim_Set_Need_Floating,
+Sim_Set_Need_String,
+Sim_Set_Need_List,
+Sim_Set_Need_Data,
+Sim_Set_Need_Object,
+Sim_Set_Object_Not_Found,
+Sim_Set_Interface_Not_Found,
+Sim_Set_Illegal_Value,
+Sim_Set_Illegal_Type,
+Sim_Set_Illegal_Index,
+Sim_Set_Attribute_Not_Found,
+Sim_Set_Not_Writable,
+Sim_Set_Ignored
+} set_error_t;
+
+
+typedef attr_value_t (*get_attr_t)(void *ptr,
+ void *obj,
+ attr_value_t *idx);
+
+typedef set_error_t (*set_attr_t)(void *ptr,
+ void *obj,
+ attr_value_t *val,
+ attr_value_t *idx);
+
+#endif // #ifndef FAKE_SIMICS_DATA_TYPES_H
diff --git a/src/mem/ruby/README.debugging b/src/mem/ruby/README.debugging
new file mode 100644
index 000000000..48a5e62f0
--- /dev/null
+++ b/src/mem/ruby/README.debugging
@@ -0,0 +1,104 @@
+# ------ Debugging the Ruby Tester ------
+
+You can compile Ruby with debugging turned on.
+
+cd ruby
+[vim or emacs] Makefile
+
+Change OPT_FLAGS to "-g -O0" (the first OPT_FLAGS line). Make
+sure all the other OPT_FLAGS lines are commented out.
+
+Change DEBUG_FLAGS to "-DRUBY_DEBUG=true". (Just uncomment the
+first DEBUG_FLAGS line, and comment out the second DEBUG_FLAGS
+line.)
+
+You can choose which component or components to debug, and the
+level of verbosity. For example,
+
+"x86-linux/generated/MOSI_SMP_bcast/bin/tester.exec -l 100000 -v med -c n"
+
+gives you debugging information about the network component at
+the medium verbosity level. -v selects the verbosity, which may
+be low, med, high, or none. -c selects the component or
+components.
+
+"x86-linux/generated/MOSI_SMP_bcast/bin/tester.exec -l 100000 -v med -c nSt"
+
+debugs the network, the sequencer, and the tester.
+
+For a list of the components you can debug, just run the tester with
+no arguments, and it will display a list of valid components. The
+components are defined in ruby/common/Debug.def.
+
+The protocol debug trace is especially useful for debugging cache coherence protocols. This must be enabled at compile-time by ensuring that PROTOCOL_DEBUG_TRACE is set to true for rubyconfig.defaults (if running in Simics) or tester.defaults. You must specify the time to start tracing. The following starts the protocol trace immediately (at time 1)
+
+"x86-linux/generated/MOSI_SMP_bcast/bin/tester.exec -l 100000 -s 1"
+
+Also, if something seems to be wrong and you're not sure where to
+start looking, it may help to run the tester for a longer time,
+e.g.
+
+"x86-linux/generated/MOSI_SMP_bcast/bin/tester.exec -l 500000"
+
+This may help because some problems eventually show up as
+deadlock, but the tester has to run for a long time before a
+deadlock is detected.
+
+Once your simulator has succeeded on the tester for a certain
+number of cycles, say 1000000, you may want to set the
+RANDOMIZATION variable in ruby/config/tester.defaults to "true"
+for more thorough testing. However, RANDOMIZATION may not work in
+all situations because it overrides some of the ordering in the
+network and may break your simulator in ways you don't like. For
+example, messages are added to MessageBuffers with random
+latency.
+
+By default the tester driver is a generator that issues random store
+and load requests. This driver does a good job of stressing the
+cache coherency protocol by issuing racy store requests from multiple
+processors to a cache line then checks the stores with a single load.
+
+Other tester drivers are available. By setting the g_SYNTHETIC_DRIVER
+to true in ruby/config/tester.defaults, you enable a tester that generates
+racy lock requests for a number of locks indicated by g_synthetic_locks.
+
+Another tester driver is a series of non-racy deterministic testers. By
+setting the g_DETERMINISTIC_DRIVER in ruby/config/tester.defaults to true,
+you enable the deterministic tester. The deterministic tester can do
+different types of deterministic tests as specified by g_SpecifiedGenerator
+string. The deterministic tester works best when RANDOMIZATION is set to
+false. To easily track the queues being used with the deterministic tester,
+use the following debug flags "-v low -c nq".
+
+# ------ Debugging Ruby in Simics ------
+
+When you're running Simics, the debugging components and
+verbosity levels are the same. However, the way you communicate
+with Ruby changes.
+
+See the README.quickstart for information on compiling the Ruby
+module and loading it into Simics. Once you've got Simics
+running, with the Ruby module loaded, you can set up Ruby
+debugging.
+
+To set the debugging verbosity level, run:
+
+simics> ruby0.debug-verb med
+
+To set the debugging components, run: (see common/Debug.def for complete list
+ of component shortcuts)
+
+simics> ruby0.debug-filter n
+
+(NOTE: sometimes simics will interpret a single letter as a
+command; e.g. expanding "p" into "print". If simics gives you an
+error when setting the debug filter, try setting it like so:
+simics> ruby0.debug-filter "n")
+
+This gives the same kind of debugging information as running the
+tester with "-v med -c n".
+
+You can also send the debugging output to a file (may be a good
+idea, since there's a lot of it). To do this, run:
+
+simics> ruby0.debug-output-file <filename>
diff --git a/src/mem/ruby/buffers/MessageBuffer.cc b/src/mem/ruby/buffers/MessageBuffer.cc
new file mode 100644
index 000000000..ff2547f0f
--- /dev/null
+++ b/src/mem/ruby/buffers/MessageBuffer.cc
@@ -0,0 +1,363 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#include "MessageBuffer.hh"
+#include "RubyConfig.hh"
+
+MessageBuffer::MessageBuffer()
+{
+ m_msg_counter = 0;
+ m_consumer_ptr = NULL;
+ m_ordering_set = false;
+ m_strict_fifo = true;
+ m_size = 0;
+ m_max_size = -1;
+ m_last_arrival_time = 0;
+ m_randomization = true;
+ m_size_last_time_size_checked = 0;
+ m_time_last_time_size_checked = 0;
+ m_time_last_time_enqueue = 0;
+ m_time_last_time_pop = 0;
+ m_size_at_cycle_start = 0;
+ m_msgs_this_cycle = 0;
+ m_not_avail_count = 0;
+ m_priority_rank = 0;
+}
+
+MessageBuffer::MessageBuffer(const Chip* chip_ptr) // The chip_ptr is ignored, but could be used for extra debugging
+{
+ m_msg_counter = 0;
+ m_consumer_ptr = NULL;
+ m_ordering_set = false;
+ m_strict_fifo = true;
+ m_size = 0;
+ m_max_size = -1;
+ m_last_arrival_time = 0;
+ m_randomization = true;
+ m_size_last_time_size_checked = 0;
+ m_time_last_time_size_checked = 0;
+ m_time_last_time_enqueue = 0;
+ m_time_last_time_pop = 0;
+ m_size_at_cycle_start = 0;
+ m_msgs_this_cycle = 0;
+ m_not_avail_count = 0;
+ m_priority_rank = 0;
+}
+
+int MessageBuffer::getSize()
+{
+ if(m_time_last_time_size_checked == g_eventQueue_ptr->getTime()){
+ return m_size_last_time_size_checked;
+ } else {
+ m_time_last_time_size_checked = g_eventQueue_ptr->getTime();
+ m_size_last_time_size_checked = m_size;
+ return m_size;
+ }
+}
+
+bool MessageBuffer::areNSlotsAvailable(int n)
+{
+
+ // fast path when message buffers have infinite size
+ if(m_max_size == -1) {
+ return true;
+ }
+
+ // determine my correct size for the current cycle
+ // pop operations shouldn't effect the network's visible size until next cycle,
+ // but enqueue operations effect the visible size immediately
+ int current_size = max(m_size_at_cycle_start, m_size);
+ if (m_time_last_time_pop < g_eventQueue_ptr->getTime()) { // no pops this cycle - m_size is correct
+ current_size = m_size;
+ } else {
+ if (m_time_last_time_enqueue < g_eventQueue_ptr->getTime()) { // no enqueues this cycle - m_size_at_cycle_start is correct
+ current_size = m_size_at_cycle_start;
+ } else { // both pops and enqueues occured this cycle - add new enqueued msgs to m_size_at_cycle_start
+ current_size = m_size_at_cycle_start+m_msgs_this_cycle;
+ }
+ }
+
+ // now compare the new size with our max size
+ if(current_size+n <= m_max_size){
+ return true;
+ } else {
+ DEBUG_MSG(QUEUE_COMP,MedPrio,n);
+ DEBUG_MSG(QUEUE_COMP,MedPrio,current_size);
+ DEBUG_MSG(QUEUE_COMP,MedPrio,m_size);
+ DEBUG_MSG(QUEUE_COMP,MedPrio,m_max_size);
+ m_not_avail_count++;
+ return false;
+ }
+}
+
+const MsgPtr MessageBuffer::getMsgPtrCopy() const
+{
+ assert(isReady());
+
+ MsgPtr temp_msg;
+ temp_msg = *(m_prio_heap.peekMin().m_msgptr.ref());
+ assert(temp_msg.ref() != NULL);
+ return temp_msg;
+}
+
+const Message* MessageBuffer::peekAtHeadOfQueue() const
+{
+ const Message* msg_ptr;
+ DEBUG_NEWLINE(QUEUE_COMP,MedPrio);
+
+ DEBUG_MSG(QUEUE_COMP,MedPrio,"Peeking at head of queue " + m_name + " time: "
+ + int_to_string(g_eventQueue_ptr->getTime()) + ".");
+ assert(isReady());
+
+ msg_ptr = m_prio_heap.peekMin().m_msgptr.ref();
+ assert(msg_ptr != NULL);
+
+ DEBUG_EXPR(QUEUE_COMP,MedPrio,*msg_ptr);
+ DEBUG_NEWLINE(QUEUE_COMP,MedPrio);
+ return msg_ptr;
+}
+
+// FIXME - move me somewhere else
+int random_time()
+{
+ int time = 1;
+ time += random() & 0x3; // [0...3]
+ if ((random() & 0x7) == 0) { // 1 in 8 chance
+ time += 100 + (random() % 0xf); // 100 + [1...15]
+ }
+ return time;
+}
+
+void MessageBuffer::enqueue(const MsgPtr& message, Time delta)
+{
+ DEBUG_NEWLINE(QUEUE_COMP,HighPrio);
+ DEBUG_MSG(QUEUE_COMP,HighPrio,"enqueue " + m_name + " time: "
+ + int_to_string(g_eventQueue_ptr->getTime()) + ".");
+ DEBUG_EXPR(QUEUE_COMP,MedPrio,message);
+ DEBUG_NEWLINE(QUEUE_COMP,HighPrio);
+
+ m_msg_counter++;
+ m_size++;
+
+ // record current time incase we have a pop that also adjusts my size
+ if (m_time_last_time_enqueue < g_eventQueue_ptr->getTime()) {
+ m_msgs_this_cycle = 0; // first msg this cycle
+ m_time_last_time_enqueue = g_eventQueue_ptr->getTime();
+ }
+ m_msgs_this_cycle++;
+
+ // ASSERT(m_max_size == -1 || m_size <= m_max_size + 1);
+ // the plus one is a kluge because of a SLICC issue
+
+ if (!m_ordering_set) {
+ WARN_EXPR(*this);
+ WARN_EXPR(m_name);
+ ERROR_MSG("Ordering property of this queue has not been set");
+ }
+
+ // Calculate the arrival time of the message, that is, the first
+ // cycle the message can be dequeued.
+ assert(delta>0);
+ Time current_time = g_eventQueue_ptr->getTime();
+ Time arrival_time = 0;
+ if (!RANDOMIZATION || (m_randomization == false)) {
+ // No randomization
+ arrival_time = current_time + delta;
+
+ } else {
+ // Randomization - ignore delta
+ if (m_strict_fifo) {
+ if (m_last_arrival_time < current_time) {
+ m_last_arrival_time = current_time;
+ }
+ arrival_time = m_last_arrival_time + random_time();
+ } else {
+ arrival_time = current_time + random_time();
+ }
+ }
+
+ // Check the arrival time
+ assert(arrival_time > current_time);
+ if (m_strict_fifo) {
+ if (arrival_time >= m_last_arrival_time) {
+
+ } else {
+ WARN_EXPR(*this);
+ WARN_EXPR(m_name);
+ WARN_EXPR(current_time);
+ WARN_EXPR(delta);
+ WARN_EXPR(arrival_time);
+ WARN_EXPR(m_last_arrival_time);
+ ERROR_MSG("FIFO ordering violated");
+ }
+ }
+ m_last_arrival_time = arrival_time;
+
+ // compute the delay cycles and set enqueue time
+ Message* msg_ptr = NULL;
+ msg_ptr = message.mod_ref();
+ assert(msg_ptr != NULL);
+ assert(g_eventQueue_ptr->getTime() >= msg_ptr->getLastEnqueueTime()); // ensure we aren't dequeued early
+ msg_ptr->setDelayedCycles((g_eventQueue_ptr->getTime() - msg_ptr->getLastEnqueueTime())+msg_ptr->getDelayedCycles());
+ msg_ptr->setLastEnqueueTime(arrival_time);
+
+ // Insert the message into the priority heap
+ MessageBufferNode thisNode(arrival_time, m_msg_counter, message);
+ m_prio_heap.insert(thisNode);
+
+ DEBUG_NEWLINE(QUEUE_COMP,HighPrio);
+ DEBUG_MSG(QUEUE_COMP,HighPrio,"enqueue " + m_name
+ + " with arrival_time " + int_to_string(arrival_time)
+ + " cur_time: " + int_to_string(g_eventQueue_ptr->getTime()) + ".");
+ DEBUG_EXPR(QUEUE_COMP,MedPrio,message);
+ DEBUG_NEWLINE(QUEUE_COMP,HighPrio);
+
+ // Schedule the wakeup
+ if (m_consumer_ptr != NULL) {
+ g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, arrival_time);
+ } else {
+ WARN_EXPR(*this);
+ WARN_EXPR(m_name);
+ ERROR_MSG("No consumer");
+ }
+}
+
+int MessageBuffer::dequeue_getDelayCycles(MsgPtr& message)
+{
+ int delay_cycles = -1; // null value
+
+ dequeue(message);
+
+ // get the delay cycles
+ delay_cycles = setAndReturnDelayCycles(message);
+
+ assert(delay_cycles >= 0);
+ return delay_cycles;
+}
+
+void MessageBuffer::dequeue(MsgPtr& message)
+{
+ DEBUG_MSG(QUEUE_COMP,MedPrio,"dequeue from " + m_name);
+ message = m_prio_heap.peekMin().m_msgptr;
+
+ pop();
+ DEBUG_EXPR(QUEUE_COMP,MedPrio,message);
+}
+
+int MessageBuffer::dequeue_getDelayCycles()
+{
+ int delay_cycles = -1; // null value
+
+ // get MsgPtr of the message about to be dequeued
+ MsgPtr message = m_prio_heap.peekMin().m_msgptr;
+
+ // get the delay cycles
+ delay_cycles = setAndReturnDelayCycles(message);
+
+ dequeue();
+
+ assert(delay_cycles >= 0);
+ return delay_cycles;
+}
+
+void MessageBuffer::pop()
+{
+ DEBUG_MSG(QUEUE_COMP,MedPrio,"pop from " + m_name);
+ assert(isReady());
+ Time ready_time = m_prio_heap.extractMin().m_time;
+ // record previous size and time so the current buffer size isn't adjusted until next cycle
+ if (m_time_last_time_pop < g_eventQueue_ptr->getTime()) {
+ m_size_at_cycle_start = m_size;
+ m_time_last_time_pop = g_eventQueue_ptr->getTime();
+ }
+ m_size--;
+}
+
+void MessageBuffer::clear()
+{
+ while(m_prio_heap.size() > 0){
+ m_prio_heap.extractMin();
+ }
+
+ ASSERT(m_prio_heap.size() == 0);
+
+ m_msg_counter = 0;
+ m_size = 0;
+ m_time_last_time_enqueue = 0;
+ m_time_last_time_pop = 0;
+ m_size_at_cycle_start = 0;
+ m_msgs_this_cycle = 0;
+}
+
+void MessageBuffer::recycle()
+{
+ // const int RECYCLE_LATENCY = 3;
+ DEBUG_MSG(QUEUE_COMP,MedPrio,"recycling " + m_name);
+ assert(isReady());
+ MessageBufferNode node = m_prio_heap.extractMin();
+ node.m_time = g_eventQueue_ptr->getTime() + RECYCLE_LATENCY;
+ m_prio_heap.insert(node);
+ g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, g_eventQueue_ptr->getTime() + RECYCLE_LATENCY);
+}
+
+int MessageBuffer::setAndReturnDelayCycles(MsgPtr& message)
+{
+ int delay_cycles = -1; // null value
+
+ // get the delay cycles of the message at the top of the queue
+ Message* msg_ptr = message.ref();
+
+ // this function should only be called on dequeue
+ // ensure the msg hasn't been enqueued
+ assert(msg_ptr->getLastEnqueueTime() <= g_eventQueue_ptr->getTime());
+ msg_ptr->setDelayedCycles((g_eventQueue_ptr->getTime() - msg_ptr->getLastEnqueueTime())+msg_ptr->getDelayedCycles());
+ delay_cycles = msg_ptr->getDelayedCycles();
+
+ assert(delay_cycles >= 0);
+ return delay_cycles;
+}
+
+void MessageBuffer::print(ostream& out) const
+{
+ out << "[MessageBuffer: ";
+ if (m_consumer_ptr != NULL) {
+ out << " consumer-yes ";
+ }
+ out << m_prio_heap << "] " << m_name << endl;
+}
+
+void MessageBuffer::printStats(ostream& out)
+{
+ out << "MessageBuffer: " << m_name << " stats - msgs:" << m_msg_counter << " full:" << m_not_avail_count << endl;
+}
+
diff --git a/src/mem/ruby/buffers/MessageBuffer.hh b/src/mem/ruby/buffers/MessageBuffer.hh
new file mode 100644
index 000000000..6851423c3
--- /dev/null
+++ b/src/mem/ruby/buffers/MessageBuffer.hh
@@ -0,0 +1,156 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: Unordered buffer of messages that can be inserted such
+ * that they can be dequeued after a given delta time has expired.
+ *
+ */
+
+#ifndef MESSAGEBUFFER_H
+#define MESSAGEBUFFER_H
+
+#include "Global.hh"
+#include "MessageBufferNode.hh"
+#include "Consumer.hh"
+#include "EventQueue.hh"
+#include "Message.hh"
+#include "PrioHeap.hh"
+#include "util.hh"
+
+class Chip;
+
+class MessageBuffer {
+public:
+ // Constructors
+ MessageBuffer();
+ MessageBuffer(const Chip* chip_ptr); // The chip_ptr is ignored, but could be used for extra debugging
+
+ // Use Default Destructor
+ // ~MessageBuffer()
+
+ // Public Methods
+
+ static void printConfig(ostream& out) {}
+
+ // TRUE if head of queue timestamp <= SystemTime
+ bool isReady() const {
+ return ((m_prio_heap.size() > 0) &&
+ (m_prio_heap.peekMin().m_time <= g_eventQueue_ptr->getTime()));
+ }
+
+ bool areNSlotsAvailable(int n);
+ int getPriority() { return m_priority_rank; }
+ void setPriority(int rank) { m_priority_rank = rank; }
+ void setConsumer(Consumer* consumer_ptr) { ASSERT(m_consumer_ptr==NULL); m_consumer_ptr = consumer_ptr; }
+ void setDescription(const string& name) { m_name = name; }
+ string getDescription() { return m_name;}
+
+ Consumer* getConsumer() { return m_consumer_ptr; }
+
+ const Message* peekAtHeadOfQueue() const;
+ const Message* peek() const { return peekAtHeadOfQueue(); }
+ const MsgPtr getMsgPtrCopy() const;
+ const MsgPtr& peekMsgPtr() const { assert(isReady()); return m_prio_heap.peekMin().m_msgptr; }
+ const MsgPtr& peekMsgPtrEvenIfNotReady() const {return m_prio_heap.peekMin().m_msgptr; }
+
+ void enqueue(const MsgPtr& message) { enqueue(message, 1); }
+ void enqueue(const MsgPtr& message, Time delta);
+ // void enqueueAbsolute(const MsgPtr& message, Time absolute_time);
+ int dequeue_getDelayCycles(MsgPtr& message); // returns delay cycles of the message
+ void dequeue(MsgPtr& message);
+ int dequeue_getDelayCycles(); // returns delay cycles of the message
+ void dequeue() { pop(); }
+ void pop();
+ void recycle();
+ bool isEmpty() const { return m_prio_heap.size() == 0; }
+
+ void setOrdering(bool order) { m_strict_fifo = order; m_ordering_set = true; }
+ void setSize(int size) {m_max_size = size;}
+ int getSize();
+ void setRandomization(bool random_flag) { m_randomization = random_flag; }
+
+ void clear();
+
+ void print(ostream& out) const;
+ void printStats(ostream& out);
+ void clearStats() { m_not_avail_count = 0; m_msg_counter = 0; }
+
+private:
+ // Private Methods
+ int setAndReturnDelayCycles(MsgPtr& message);
+
+ // Private copy constructor and assignment operator
+ MessageBuffer(const MessageBuffer& obj);
+ MessageBuffer& operator=(const MessageBuffer& obj);
+
+ // Data Members (m_ prefix)
+ Consumer* m_consumer_ptr; // Consumer to signal a wakeup(), can be NULL
+ PrioHeap<MessageBufferNode> m_prio_heap;
+ string m_name;
+
+ int m_max_size;
+ int m_size;
+
+ Time m_time_last_time_size_checked;
+ int m_size_last_time_size_checked;
+
+ // variables used so enqueues appear to happen imediately, while pop happen the next cycle
+ Time m_time_last_time_enqueue;
+ Time m_time_last_time_pop;
+ int m_size_at_cycle_start;
+ int m_msgs_this_cycle;
+
+ int m_not_avail_count; // count the # of times I didn't have N slots available
+ int m_msg_counter;
+ int m_priority_rank;
+ bool m_strict_fifo;
+ bool m_ordering_set;
+ bool m_randomization;
+ Time m_last_arrival_time;
+};
+
+// Output operator declaration
+//template <class TYPE>
+ostream& operator<<(ostream& out, const MessageBuffer& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const MessageBuffer& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //MESSAGEBUFFER_H
diff --git a/src/mem/ruby/buffers/MessageBufferNode.cc b/src/mem/ruby/buffers/MessageBufferNode.cc
new file mode 100644
index 000000000..c84347a38
--- /dev/null
+++ b/src/mem/ruby/buffers/MessageBufferNode.cc
@@ -0,0 +1,48 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * EventQueueNode.C
+ *
+ * Description: See EventQueueNode.h
+ *
+ * $Id: MessageBufferNode.C,v 3.1 2001/02/02 16:57:54 sorin Exp $
+ *
+ */
+
+#include "MessageBufferNode.hh"
+
+void MessageBufferNode::print(ostream& out) const
+{
+ out << "[";
+ out << m_time << ", ";
+ out << m_msg_counter << ", ";
+ out << m_msgptr << "; ";
+ out << "]";
+}
diff --git a/src/mem/ruby/buffers/MessageBufferNode.hh b/src/mem/ruby/buffers/MessageBufferNode.hh
new file mode 100644
index 000000000..c562c45eb
--- /dev/null
+++ b/src/mem/ruby/buffers/MessageBufferNode.hh
@@ -0,0 +1,88 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MESSAGEBUFFERNODE_H
+#define MESSAGEBUFFERNODE_H
+
+#include "Global.hh"
+#include "Message.hh"
+
+class MessageBufferNode {
+public:
+ // Constructors
+ MessageBufferNode() { m_time = 0; m_msg_counter = 0; }
+ MessageBufferNode(const Time& time, int counter, const MsgPtr& msgptr)
+ { m_time = time; m_msgptr = msgptr; m_msg_counter = counter; }
+ // Destructor
+ //~MessageBufferNode();
+
+ // Public Methods
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Default copy constructor and assignment operator
+ // MessageBufferNode(const MessageBufferNode& obj);
+ // MessageBufferNode& operator=(const MessageBufferNode& obj);
+
+ // Data Members (m_ prefix)
+public:
+ Time m_time;
+ int m_msg_counter; // FIXME, should this be a 64-bit value?
+ MsgPtr m_msgptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const MessageBufferNode& obj);
+
+// ******************* Definitions *******************
+
+inline extern bool node_less_then_eq(const MessageBufferNode& n1, const MessageBufferNode& n2);
+
+inline extern
+bool node_less_then_eq(const MessageBufferNode& n1, const MessageBufferNode& n2)
+{
+ if (n1.m_time == n2.m_time) {
+ assert(n1.m_msg_counter != n2.m_msg_counter);
+ return (n1.m_msg_counter <= n2.m_msg_counter);
+ } else {
+ return (n1.m_time <= n2.m_time);
+ }
+}
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const MessageBufferNode& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //MESSAGEBUFFERNODE_H
diff --git a/src/mem/ruby/common/Address.cc b/src/mem/ruby/common/Address.cc
new file mode 100644
index 000000000..5d38faae0
--- /dev/null
+++ b/src/mem/ruby/common/Address.cc
@@ -0,0 +1,68 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#include "Address.hh"
+
+void Address::output(ostream& out) const
+{
+ // Note: this outputs addresses in the form "ffff", not "0xffff".
+ // This code should always be able to write out addresses in a
+ // format that can be read in by the below input() method. Please
+ // don't change this without talking to Milo first.
+ out << hex << m_address << dec;
+}
+
+void Address::input(istream& in)
+{
+ // Note: this only works with addresses in the form "ffff", not
+ // "0xffff". This code should always be able to read in addresses
+ // written out by the above output() method. Please don't change
+ // this without talking to Milo first.
+ in >> hex >> m_address >> dec;
+}
+
+Address::Address(const Address& obj)
+{
+ m_address = obj.m_address;
+}
+
+Address& Address::operator=(const Address& obj)
+{
+ if (this == &obj) {
+ // assert(false);
+ } else {
+ m_address = obj.m_address;
+ }
+ return *this;
+}
+
diff --git a/src/mem/ruby/common/Address.hh b/src/mem/ruby/common/Address.hh
new file mode 100644
index 000000000..3c3c5ad30
--- /dev/null
+++ b/src/mem/ruby/common/Address.hh
@@ -0,0 +1,255 @@
+
+/*
+ * Copyright (c) 1999 by Mark Hill and David Wood for the Wisconsin
+ * Multifacet Project. ALL RIGHTS RESERVED.
+ *
+ * ##HEADER##
+ *
+ * This software is furnished under a license and may be used and
+ * copied only in accordance with the terms of such license and the
+ * inclusion of the above copyright notice. This software or any
+ * other copies thereof or any derivative works may not be provided or
+ * otherwise made available to any other persons. Title to and
+ * ownership of the software is retained by Mark Hill and David Wood.
+ * Any use of this software must include the above copyright notice.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS". THE LICENSOR MAKES NO
+ * WARRANTIES ABOUT ITS CORRECTNESS OR PERFORMANCE.
+ * */
+
+/*
+ * $Id$
+ */
+
+#ifndef ADDRESS_H
+#define ADDRESS_H
+
+#include <iomanip>
+#include "Global.hh"
+#include "RubyConfig.hh"
+#include "NodeID.hh"
+#include "MachineID.hh"
+
+const int ADDRESS_WIDTH = 64; // address width in bytes
+
+class Address;
+typedef Address PhysAddress;
+typedef Address VirtAddress;
+
+class Address {
+public:
+ // Constructors
+ Address() { m_address = 0; }
+ explicit Address(physical_address_t address) { m_address = address; }
+
+ Address(const Address& obj);
+ Address& operator=(const Address& obj);
+
+ // Destructor
+ // ~Address();
+
+ // Public Methods
+
+ void setAddress(physical_address_t address) { m_address = address; }
+ physical_address_t getAddress() const {return m_address;}
+ // selects bits inclusive
+ physical_address_t bitSelect(int small, int big) const;
+ physical_address_t maskLowOrderBits(int number) const;
+ physical_address_t maskHighOrderBits(int number) const;
+ physical_address_t shiftLowOrderBits(int number) const;
+ physical_address_t getLineAddress() const
+ { return bitSelect(RubyConfig::dataBlockBits(), ADDRESS_WIDTH); }
+ physical_address_t getOffset() const
+ { return bitSelect(0, RubyConfig::dataBlockBits()-1); }
+
+ void makeLineAddress() { m_address = maskLowOrderBits(RubyConfig::dataBlockBits()); }
+ // returns the next stride address based on line address
+ void makeNextStrideAddress( int stride) {
+ m_address = maskLowOrderBits(RubyConfig::dataBlockBits())
+ + RubyConfig::dataBlockBytes()*stride;
+ }
+ void makePageAddress() { m_address = maskLowOrderBits(RubyConfig::pageSizeBits()); }
+ int getBankSetNum() const;
+ int getBankSetDist() const;
+
+ Index memoryModuleIndex() const;
+
+ void print(ostream& out) const;
+ void output(ostream& out) const;
+ void input(istream& in);
+
+ void setOffset( int offset ){
+ // first, zero out the offset bits
+ makeLineAddress();
+ m_address |= (physical_address_t) offset;
+ }
+
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ // Address(const Address& obj);
+ // Address& operator=(const Address& obj);
+
+ // Data Members (m_ prefix)
+ physical_address_t m_address;
+};
+
+inline
+Address line_address(const Address& addr) { Address temp(addr); temp.makeLineAddress(); return temp; }
+
+inline
+Address next_stride_address(const Address& addr, int stride) {
+ Address temp = addr;
+ temp.makeNextStrideAddress(stride);
+ temp.setAddress(temp.maskHighOrderBits(ADDRESS_WIDTH-RubyConfig::memorySizeBits())); // surpress wrap-around problem
+ return temp;
+}
+
+inline
+Address page_address(const Address& addr) { Address temp(addr); temp.makePageAddress(); return temp; }
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Address& obj);
+// comparison operator declaration
+bool operator==(const Address& obj1, const Address& obj2);
+bool operator!=(const Address& obj1, const Address& obj2);
+bool operator<(const Address& obj1, const Address& obj2);
+/* Address& operator=(const physical_address_t address); */
+
+inline
+bool operator<(const Address& obj1, const Address& obj2)
+{
+ return obj1.getAddress() < obj2.getAddress();
+}
+
+// ******************* Definitions *******************
+
+// Output operator definition
+inline
+ostream& operator<<(ostream& out, const Address& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+inline
+bool operator==(const Address& obj1, const Address& obj2)
+{
+ return (obj1.getAddress() == obj2.getAddress());
+}
+
+inline
+bool operator!=(const Address& obj1, const Address& obj2)
+{
+ return (obj1.getAddress() != obj2.getAddress());
+}
+
+inline
+physical_address_t Address::bitSelect(int small, int big) const // rips bits inclusive
+{
+ physical_address_t mask;
+ assert(big >= small);
+
+ if (big >= ADDRESS_WIDTH - 1) {
+ return (m_address >> small);
+ } else {
+ mask = ~((physical_address_t)~0 << (big + 1));
+ // FIXME - this is slow to manipulate a 64-bit number using 32-bits
+ physical_address_t partial = (m_address & mask);
+ return (partial >> small);
+ }
+}
+
+inline
+physical_address_t Address::maskLowOrderBits(int number) const
+{
+ physical_address_t mask;
+
+ if (number >= ADDRESS_WIDTH - 1) {
+ mask = ~0;
+ } else {
+ mask = (physical_address_t)~0 << number;
+ }
+ return (m_address & mask);
+}
+
+inline
+physical_address_t Address::maskHighOrderBits(int number) const
+{
+ physical_address_t mask;
+
+ if (number >= ADDRESS_WIDTH - 1) {
+ mask = ~0;
+ } else {
+ mask = (physical_address_t)~0 >> number;
+ }
+ return (m_address & mask);
+}
+
+inline
+physical_address_t Address::shiftLowOrderBits(int number) const
+{
+ return (m_address >> number);
+}
+
+inline
+integer_t Address::memoryModuleIndex() const
+{
+ integer_t index = bitSelect(RubyConfig::dataBlockBits()+RubyConfig::memoryBits(), ADDRESS_WIDTH);
+ assert (index >= 0);
+ if (index >= RubyConfig::memoryModuleBlocks()) {
+ cerr << " memoryBits: " << RubyConfig::memoryBits() << " memorySizeBits: " << RubyConfig::memorySizeBits()
+ << " Address: " << "[" << hex << "0x" << m_address << "," << " line 0x" << maskLowOrderBits(RubyConfig::dataBlockBits()) << dec << "]" << flush
+ << "error: limit exceeded. " <<
+ " dataBlockBits: " << RubyConfig::dataBlockBits() <<
+ " memoryModuleBlocks: " << RubyConfig::memoryModuleBlocks() <<
+ " index: " << index << endl;
+ }
+ assert (index < RubyConfig::memoryModuleBlocks());
+ return index;
+
+ // Index indexHighPortion = address.bitSelect(MEMORY_SIZE_BITS-1, PAGE_SIZE_BITS+NUMBER_OF_MEMORY_MODULE_BITS);
+ // Index indexLowPortion = address.bitSelect(DATA_BLOCK_BITS, PAGE_SIZE_BITS-1);
+
+ //Index index = indexLowPortion | (indexHighPortion << (PAGE_SIZE_BITS - DATA_BLOCK_BITS));
+
+ /*
+ Round-robin mapping of addresses, at page size granularity
+
+ADDRESS_WIDTH MEMORY_SIZE_BITS PAGE_SIZE_BITS DATA_BLOCK_BITS
+ | | | |
+ \ / \ / \ / \ / 0
+ -----------------------------------------------------------------------
+ | unused |xxxxxxxxxxxxxxx| |xxxxxxxxxxxxxxx| |
+ | |xxxxxxxxxxxxxxx| |xxxxxxxxxxxxxxx| |
+ -----------------------------------------------------------------------
+ indexHighPortion indexLowPortion
+ <------->
+ NUMBER_OF_MEMORY_MODULE_BITS
+ */
+}
+
+inline
+void Address::print(ostream& out) const
+{
+ out << "[" << hex << "0x" << m_address << "," << " line 0x" << maskLowOrderBits(RubyConfig::dataBlockBits()) << dec << "]" << flush;
+}
+
+class Address;
+namespace __gnu_cxx {
+ template <> struct hash<Address>
+ {
+ size_t operator()(const Address &s) const { return (size_t) s.getAddress(); }
+ };
+}
+namespace std {
+ template <> struct equal_to<Address>
+ {
+ bool operator()(const Address& s1, const Address& s2) const { return s1 == s2; }
+ };
+}
+
+#endif //ADDRESS_H
+
diff --git a/src/mem/ruby/common/BigSet.cc b/src/mem/ruby/common/BigSet.cc
new file mode 100644
index 000000000..e16284f15
--- /dev/null
+++ b/src/mem/ruby/common/BigSet.cc
@@ -0,0 +1,249 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "Set.hh"
+#include "RubyConfig.hh"
+
+Set::Set()
+{
+ setSize(RubyConfig::numberOfProcessors());
+}
+
+Set::Set(int size)
+{
+ setSize(size);
+}
+
+void Set::add(NodeID index)
+{
+ m_bits[index] = Present;
+}
+
+void Set::addSet(const Set& set)
+{
+ assert(m_bits.size() == set.getSize());
+ for (int i=0; i<m_bits.size(); i++) {
+ if(set.isElement(i)){
+ add(i);
+ }
+ }
+}
+
+void Set::addRandom()
+{
+ int rand = random();
+ for (int i=0; i<m_bits.size(); i++) {
+ if(rand & 0x1 == 0) { // Look at the low order bit
+ add(i);
+ }
+ rand = (rand >> 1); // Shift the random number to look at the next bit
+ }
+}
+
+void Set::remove(NodeID index)
+{
+ m_bits[index] = NotPresent;
+}
+
+void Set::removeSet(const Set& set)
+{
+ assert(m_bits.size() == set.getSize());
+ for (int i=0; i<m_bits.size(); i++) {
+ if(set.isElement(i)){
+ remove(i);
+ }
+ }
+}
+
+void Set::clear()
+{
+ for (int i=0; i<m_bits.size(); i++) {
+ m_bits[i] = NotPresent;
+ }
+}
+
+void Set::broadcast()
+{
+ for (int i=0; i<m_bits.size(); i++) {
+ m_bits[i] = Present;
+ }
+}
+
+int Set::count() const
+{
+ int counter = 0;
+ for (int i=0; i<m_bits.size(); i++) {
+ if (m_bits[i] == Present) {
+ counter++;
+ }
+ }
+ return counter;
+}
+
+bool Set::isEqual(const Set& set) const
+{
+ assert(m_bits.size() == set.getSize());
+ for (int i=0; i<m_bits.size(); i++) {
+ if (m_bits[i] != set.isElement(i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+NodeID Set::smallestElement() const
+{
+ assert(count() > 0);
+ for (int i=0; i<m_bits.size(); i++) {
+ if (isElement(i)) {
+ return i;
+ }
+ }
+ ERROR_MSG("No smallest element of an empty set.");
+}
+
+// Returns true iff all bits are set
+bool Set::isBroadcast() const
+{
+ for (int i=0; i<m_bits.size(); i++) {
+ if (m_bits[i] == NotPresent) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Returns true iff no bits are set
+bool Set::isEmpty() const
+{
+ for (int i=0; i<m_bits.size(); i++) {
+ if (m_bits[i] == Present) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// returns the logical OR of "this" set and orSet
+Set Set::OR(const Set& orSet) const
+{
+ Set result;
+ assert(m_bits.size() == orSet.getSize());
+ result.setSize(m_bits.size());
+ for (int i=0; i<m_bits.size(); i++) {
+ if(m_bits[i] == Present || orSet.isElement(i)){
+ result.add(i);
+ }else{
+ result.remove(i);
+ }
+ }
+
+ return result;
+
+}
+
+// returns the logical AND of "this" set and andSet
+Set Set::AND(const Set& andSet) const
+{
+ Set result;
+ assert(m_bits.size() == andSet.getSize());
+ result.setSize(m_bits.size());
+ for (int i=0; i<m_bits.size(); i++) {
+ if(m_bits[i] == Present && andSet.isElement(i)){
+ result.add(i);
+ }else{
+ result.remove(i);
+ }
+ }
+ return result;
+}
+
+// Returns true if the intersection of the two sets is non-empty
+bool Set::intersectionIsNotEmpty(const Set& other_set) const
+{
+ assert(m_bits.size() == other_set.getSize());
+ for(int index=0; index < m_bits.size(); index++){
+ if(other_set.isElement(index) && isElement(index)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Returns true if the intersection of the two sets is non-empty
+bool Set::intersectionIsEmpty(const Set& other_set) const
+{
+ assert(m_bits.size() == other_set.getSize());
+ for(int index=0; index < m_bits.size(); index++){
+ if(other_set.isElement(index) && isElement(index)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Set::isSuperset(const Set& test) const
+{
+ assert(m_bits.size() == test.getSize());
+ for(int index=0; index < m_bits.size(); index++){
+ if(test.isElement(index) && !isElement(index)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Set::isElement(NodeID element) const
+{
+ return (m_bits[element] == Present);
+}
+
+NodeID Set::elementAt(int index) const
+{
+ if (m_bits[index] == Present) {
+ return m_bits[index] == Present;
+ } else {
+ return 0;
+ }
+}
+
+void Set::setSize(int size)
+{
+ m_bits.setSize(size);
+ clear();
+}
+
+void Set::print(ostream& out) const
+{
+ out << "[Set ";
+ for (int i=0; i<m_bits.size(); i++) {
+ out << (bool)m_bits[i] << " ";
+ }
+ out << "]";
+}
diff --git a/src/mem/ruby/common/BigSet.hh b/src/mem/ruby/common/BigSet.hh
new file mode 100644
index 000000000..4eae01681
--- /dev/null
+++ b/src/mem/ruby/common/BigSet.hh
@@ -0,0 +1,125 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// NOTE: Never include this file directly, this should only be
+// included from Set.h
+
+#ifndef SET_H
+#define SET_H
+
+#include "Global.hh"
+#include "Vector.hh"
+#include "NodeID.hh"
+#include "RubyConfig.hh"
+
+enum PresenceBit {NotPresent, Present};
+
+class Set {
+public:
+ // Constructors
+ // creates and empty set
+ Set();
+ Set (int size);
+
+ // used during the replay mechanism
+ // Set(const char *str);
+
+ // Set(const Set& obj);
+ // Set& operator=(const Set& obj);
+
+ // Destructor
+ // ~Set();
+
+ // Public Methods
+
+ void add(NodeID newElement);
+ void addSet(const Set& set);
+ void addRandom();
+ void remove(NodeID newElement);
+ void removeSet(const Set& set);
+ void clear();
+ void broadcast();
+ int count() const;
+ bool isEqual(const Set& set) const;
+
+ Set OR(const Set& orSet) const; // return the logical OR of this set and orSet
+ Set AND(const Set& andSet) const; // return the logical AND of this set and andSet
+
+ // Returns true if the intersection of the two sets is non-empty
+ bool intersectionIsNotEmpty(const Set& other_set) const;
+
+ // Returns true if the intersection of the two sets is empty
+ bool intersectionIsEmpty(const Set& other_set) const;
+
+ bool isSuperset(const Set& test) const;
+ bool isSubset(const Set& test) const { return test.isSuperset(*this); }
+ bool isElement(NodeID element) const;
+ bool isBroadcast() const;
+ bool isEmpty() const;
+
+ NodeID smallestElement() const;
+
+ // int size() const;
+ void setSize (int size);
+
+ // get element for a index
+ NodeID elementAt(int index) const;
+ int getSize() const { return m_bits.size(); }
+
+ // DEPRECATED METHODS
+ void addToSet(NodeID newElement) { add(newElement); } // Deprecated
+ void removeFromSet(NodeID newElement) { remove(newElement); } // Deprecated
+ void clearSet() { clear(); } // Deprecated
+ void setBroadcast() { broadcast(); } // Deprecated
+ bool presentInSet(NodeID element) const { return isElement(element); } // Deprecated
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Data Members (m_ prefix)
+ Vector<uint8> m_bits; // This is an vector of uint8 to reduce the size of the set
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Set& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Set& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //SET_H
+
diff --git a/src/mem/ruby/common/Consumer.hh b/src/mem/ruby/common/Consumer.hh
new file mode 100644
index 000000000..bd51af7ba
--- /dev/null
+++ b/src/mem/ruby/common/Consumer.hh
@@ -0,0 +1,89 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: This is the virtual base class of all classes that can
+ * be the targets of wakeup events. There is only two methods,
+ * wakeup() and print() and no data members.
+ *
+ */
+
+#ifndef CONSUMER_H
+#define CONSUMER_H
+
+#include "Global.hh"
+#include "EventQueue.hh"
+
+class MessageBuffer;
+
+class Consumer {
+public:
+ // Constructors
+ Consumer() { m_last_scheduled_wakeup = 0; m_last_wakeup = 0; m_out_link_vec.setSize(0); }
+
+ // Destructor
+ virtual ~Consumer() { }
+
+ // Public Methods - pure virtual methods
+ void triggerWakeup() { Time time = g_eventQueue_ptr->getTime(); if (m_last_wakeup != time) { wakeup(); m_last_wakeup = time; }}
+ virtual void wakeup() = 0;
+ virtual void print(ostream& out) const = 0;
+ const Time& getLastScheduledWakeup() const { return m_last_scheduled_wakeup; }
+ void setLastScheduledWakeup(const Time& time) { m_last_scheduled_wakeup = time; }
+ Vector< Vector<MessageBuffer*> > getOutBuffers() { return m_out_link_vec; }
+
+protected:
+ Vector< Vector<MessageBuffer*> > m_out_link_vec;
+
+private:
+ // Private Methods
+
+ // Data Members (m_ prefix)
+ Time m_last_scheduled_wakeup;
+ Time m_last_wakeup;
+};
+
+// Output operator declaration
+inline extern
+ostream& operator<<(ostream& out, const Consumer& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+inline extern
+ostream& operator<<(ostream& out, const Consumer& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //CONSUMER_H
diff --git a/src/mem/ruby/common/DataBlock.cc b/src/mem/ruby/common/DataBlock.cc
new file mode 100644
index 000000000..c4d6d7a33
--- /dev/null
+++ b/src/mem/ruby/common/DataBlock.cc
@@ -0,0 +1,91 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#include "DataBlock.hh"
+
+DataBlock::DataBlock()
+{
+ if (DATA_BLOCK || XACT_MEMORY) {
+ m_data.setSize(RubyConfig::dataBlockBytes());
+ }
+ clear();
+}
+
+DataBlock::~DataBlock()
+{
+
+}
+
+void DataBlock::clear()
+{
+ int size = m_data.size();
+ for (int i = 0; i < size; i++) {
+ m_data[i] = 0;
+ }
+}
+
+bool DataBlock::equal(const DataBlock& obj) const
+{
+ bool value = true;
+ int size = m_data.size();
+ for (int i = 0; i < size; i++) {
+ value = value && (m_data[i] == obj.m_data[i]);
+ }
+ return value;
+}
+
+void DataBlock::print(ostream& out) const
+{
+ int size = m_data.size();
+ for (int i = 0; i < size; i+=4) {
+ out << hex << *((uint32*)(&(m_data[i]))) << " ";
+ }
+ out << dec << "]" << flush;
+}
+
+uint8 DataBlock::getByte(int whichByte) const
+{
+ if (DATA_BLOCK || XACT_MEMORY) {
+ return m_data[whichByte];
+ } else {
+ return 0;
+ }
+}
+
+void DataBlock::setByte(int whichByte, uint8 data)
+{
+ if (DATA_BLOCK || XACT_MEMORY) {
+ m_data[whichByte] = data;
+ }
+}
+
diff --git a/src/mem/ruby/common/DataBlock.hh b/src/mem/ruby/common/DataBlock.hh
new file mode 100644
index 000000000..aae364078
--- /dev/null
+++ b/src/mem/ruby/common/DataBlock.hh
@@ -0,0 +1,82 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DATABLOCK_H
+#define DATABLOCK_H
+
+#include "Global.hh"
+#include "RubyConfig.hh"
+#include "Vector.hh"
+
+class DataBlock {
+public:
+ // Constructors
+ DataBlock();
+
+ // Destructor
+ ~DataBlock();
+
+ // Public Methods
+ void clear();
+ uint8 getByte(int whichByte) const;
+ void setByte(int whichByte, uint8 data);
+ bool equal(const DataBlock& obj) const;
+ void print(ostream& out) const;
+
+private:
+ // Private Methods
+
+ // Data Members (m_ prefix)
+ Vector<uint8> m_data;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const DataBlock& obj);
+
+bool operator==(const DataBlock& obj1, const DataBlock& obj2);
+
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const DataBlock& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+extern inline
+bool operator==(const DataBlock& obj1,const DataBlock& obj2)
+{
+ return (obj1.equal(obj2));
+}
+
+#endif //DATABLOCK_H
diff --git a/src/mem/ruby/common/Debug.cc b/src/mem/ruby/common/Debug.cc
new file mode 100644
index 000000000..f0319ceb8
--- /dev/null
+++ b/src/mem/ruby/common/Debug.cc
@@ -0,0 +1,369 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include <fstream>
+#include <stdarg.h>
+
+#include "Global.hh"
+#include "Debug.hh"
+#include "EventQueue.hh"
+
+class Debug;
+extern Debug* g_debug_ptr;
+std::ostream * debug_cout_ptr;
+
+// component character list
+const char DEFINE_COMP_CHAR[] =
+{
+#undef DEFINE_COMP
+#define DEFINE_COMP(component, character, description) character,
+#include "Debug.def"
+};
+
+// component description list
+const char* DEFINE_COMP_DESCRIPTION[] =
+{
+#undef DEFINE_COMP
+#define DEFINE_COMP(component, character, description) description,
+#include "Debug.def"
+};
+
+extern "C" void changeDebugVerbosity(VerbosityLevel vb);
+extern "C" void changeDebugFilter(int filter);
+
+void changeDebugVerbosity(VerbosityLevel vb)
+{
+ g_debug_ptr->setVerbosity(vb);
+}
+
+void changeDebugFilter(int filter)
+{
+ g_debug_ptr->setFilter(filter);
+}
+
+Debug::Debug( const char *filterString, const char *verboseString,
+ Time filterStartTime, const char *filename )
+{
+ m_verbosityLevel = No_Verb;
+ clearFilter();
+ debug_cout_ptr = &cout;
+
+ m_starting_cycle = filterStartTime;
+ setFilterString( filterString );
+ setVerbosityString( verboseString );
+ setDebugOutputFile( filename );
+}
+
+Debug::~Debug()
+{
+}
+
+void Debug::printVerbosity(ostream& out) const
+{
+ switch (getVerbosity()) {
+ case No_Verb:
+ out << "verbosity = No_Verb" << endl;
+ break;
+ case Low_Verb:
+ out << "verbosity = Low_Verb" << endl;
+ break;
+ case Med_Verb:
+ out << "verbosity = Med_Verb" << endl;
+ break;
+ case High_Verb:
+ out << "verbosity = High_Verb" << endl;
+ break;
+ default:
+ out << "verbosity = unknown" << endl;
+ }
+}
+
+bool Debug::validDebug(int module, PriorityLevel priority)
+{
+ int local_module = (1 << module);
+ if(m_filter & local_module) {
+ if (g_eventQueue_ptr == NULL ||
+ g_eventQueue_ptr->getTime() >= m_starting_cycle) {
+ switch(m_verbosityLevel) {
+ case No_Verb:
+ return false;
+ break;
+ case Low_Verb:
+ if(priority == HighPrio) {
+ return true;
+ }else{
+ return false;
+ }
+ break;
+ case Med_Verb:
+ if(priority == HighPrio || priority == MedPrio ) {
+ return true;
+ }else{
+ return false;
+ }
+ break;
+ case High_Verb:
+ return true;
+ break;
+ }
+ }
+ }
+ return false;
+}
+
+void Debug::setDebugTime(Time t)
+{
+ m_starting_cycle = t;
+}
+
+void Debug::setVerbosity(VerbosityLevel vb)
+{
+ m_verbosityLevel = vb;
+}
+
+void Debug::setFilter(int filter)
+{
+ m_filter = filter;
+}
+
+bool Debug::checkVerbosityString(const char *verb_str)
+{
+ if (verb_str == NULL) {
+ cerr << "Error: unrecognized verbosity (use none, low, med, high): NULL" << endl;
+ return true; // error
+ } else if ( (string(verb_str) == "none") ||
+ (string(verb_str) == "low") ||
+ (string(verb_str) == "med") ||
+ (string(verb_str) == "high") ) {
+ return false;
+ }
+ cerr << "Error: unrecognized verbosity (use none, low, med, high): NULL" << endl;
+ return true; // error
+}
+
+bool Debug::setVerbosityString(const char *verb_str)
+{
+ bool check_fails = checkVerbosityString(verb_str);
+ if (check_fails) {
+ return true; // error
+ }
+ if (string(verb_str) == "none") {
+ setVerbosity(No_Verb);
+ } else if (string(verb_str) == "low") {
+ setVerbosity(Low_Verb);
+ } else if (string(verb_str) == "med") {
+ setVerbosity(Med_Verb);
+ } else if (string(verb_str) == "high") {
+ setVerbosity(High_Verb);
+ } else {
+ cerr << "Error: unrecognized verbosity (use none, low, med, high): " << verb_str << endl;
+ return true; // error
+ }
+ return false; // no error
+}
+
+bool Debug::checkFilter(char ch)
+{
+ for (int i=0; i<NUMBER_OF_COMPS; i++) {
+ // Look at all components to find a character match
+ if (DEFINE_COMP_CHAR[i] == ch) {
+ // We found a match - return no error
+ return false; // no error
+ }
+ }
+ return true; // error
+}
+
+bool Debug::checkFilterString(const char *filter_str)
+{
+ if (filter_str == NULL) {
+ cerr << "Error: unrecognized component filter: NULL" << endl;
+ return true; // error
+ }
+
+ // check for default filter ("none") before reporting RUBY_DEBUG error
+ if ( (string(filter_str) == "none") ) {
+ return false; // no error
+ }
+
+ if (RUBY_DEBUG == false) {
+ cerr << "Error: User specified set of debug components, but the RUBY_DEBUG compile-time flag is false." << endl;
+ cerr << "Solution: Re-compile with RUBY_DEBUG set to true." << endl;
+ return true; // error
+ }
+
+ if ( (string(filter_str) == "all") ) {
+ return false; // no error
+ }
+
+ // scan string checking each character
+ for (unsigned int i = 0; i < strlen(filter_str); i++) {
+ bool unrecognized = checkFilter( filter_str[i] );
+ if (unrecognized == true) {
+ return true; // error
+ }
+ }
+ return false; // no error
+}
+
+bool Debug::setFilterString(const char *filter_str)
+{
+ if (checkFilterString(filter_str)) {
+ return true; // error
+ }
+
+ if (string(filter_str) == "all" ) {
+ allFilter();
+ } else if (string(filter_str) == "none") {
+ clearFilter();
+ } else {
+ // scan string adding to bit mask for each component which is present
+ for (unsigned int i = 0; i < strlen(filter_str); i++) {
+ bool error = addFilter( filter_str[i] );
+ if (error) {
+ return true; // error
+ }
+ }
+ }
+ return false; // no error
+}
+
+bool Debug::addFilter(char ch)
+{
+ for (int i=0; i<NUMBER_OF_COMPS; i++) {
+ // Look at all components to find a character match
+ if (DEFINE_COMP_CHAR[i] == ch) {
+ // We found a match - update the filter bit mask
+ cout << " Debug: Adding to filter: '" << ch << "' (" << DEFINE_COMP_DESCRIPTION[i] << ")" << endl;
+ m_filter |= (1 << i);
+ return false; // no error
+ }
+ }
+
+ // We didn't find the character
+ cerr << "Error: unrecognized component filter: " << ch << endl;
+ usageInstructions();
+ return true; // error
+}
+
+void Debug::clearFilter()
+{
+ m_filter = 0;
+}
+
+void Debug::allFilter()
+{
+ m_filter = ~0;
+}
+
+void Debug::usageInstructions(void)
+{
+ cerr << "Debug components: " << endl;
+ for (int i=0; i<NUMBER_OF_COMPS; i++) {
+ cerr << " " << DEFINE_COMP_CHAR[i] << ": " << DEFINE_COMP_DESCRIPTION[i] << endl;
+ }
+}
+
+void Debug::print(ostream& out) const
+{
+ out << "[Debug]" << endl;
+}
+
+void Debug::setDebugOutputFile (const char * filename)
+{
+ if ( (filename == NULL) ||
+ (!strcmp(filename, "none")) ) {
+ debug_cout_ptr = &cout;
+ return;
+ }
+
+ if (m_fout.is_open() ) {
+ m_fout.close ();
+ }
+ m_fout.open (filename, std::ios::out);
+ if (! m_fout.is_open() ) {
+ cerr << "setDebugOutputFile: can't open file " << filename << endl;
+ }
+ else {
+ debug_cout_ptr = &m_fout;
+ }
+}
+
+void Debug::closeDebugOutputFile ()
+{
+ if (m_fout.is_open() ) {
+ m_fout.close ();
+ debug_cout_ptr = &cout;
+ }
+}
+
+void Debug::debugMsg( const char *fmt, ... )
+{
+ va_list args;
+
+ // you could check validDebug() here before printing the message
+ va_start(args, fmt);
+ vfprintf(stdout, fmt, args);
+ va_end(args);
+}
+
+/*
+void DEBUG_OUT( const char* fmt, ...) {
+ if (RUBY_DEBUG) {
+ cout << "Debug: in fn "
+ << __PRETTY_FUNCTION__
+ << " in " << __FILE__ << ":"
+ << __LINE__ << ": ";
+ va_list args;
+ va_start(args, fmt);
+ vfprintf(stdout, fmt, args);
+ va_end(args);
+ }
+}
+
+void ERROR_OUT( const char* fmt, ... ) {
+ if (ERROR_MESSAGE_FLAG) {
+ cout << "error: in fn "
+ << __PRETTY_FUNCTION__ << " in "
+ << __FILE__ << ":"
+ << __LINE__ << ": ";
+ va_list args;
+ va_start(args, fmt);
+ vfprintf(stdout, fmt, args);
+ va_end(args);
+ }
+ assert(0);
+}
+*/
+
diff --git a/src/mem/ruby/common/Debug.def b/src/mem/ruby/common/Debug.def
new file mode 100644
index 000000000..23af06655
--- /dev/null
+++ b/src/mem/ruby/common/Debug.def
@@ -0,0 +1,17 @@
+DEFINE_COMP(SYSTEM_COMP, 's', "System")
+DEFINE_COMP(NODE_COMP, 'N', "Node")
+DEFINE_COMP(QUEUE_COMP, 'q', "Queue")
+DEFINE_COMP(EVENTQUEUE_COMP, 'e', "Event Queue")
+DEFINE_COMP(NETWORK_COMP, 'n', "Network")
+DEFINE_COMP(SEQUENCER_COMP, 'S', "Sequencer")
+DEFINE_COMP(TESTER_COMP, 't', "Tester")
+DEFINE_COMP(GENERATED_COMP, 'g', "Generated")
+DEFINE_COMP(SLICC_COMP, 'l', "SLICC")
+DEFINE_COMP(NETWORKQUEUE_COMP, 'Q', "Network Queues")
+DEFINE_COMP(TIME_COMP, 'T', "Time")
+DEFINE_COMP(NETWORK_INTERNALS_COMP, 'i', "Network Internals")
+DEFINE_COMP(STOREBUFFER_COMP, 'b', "Store Buffer")
+DEFINE_COMP(CACHE_COMP, 'c', "Cache")
+DEFINE_COMP(PREDICTOR_COMP, 'p', "Predictor")
+DEFINE_COMP(ALLOCATOR_COMP, 'a', "Allocator")
+
diff --git a/src/mem/ruby/common/Debug.hh b/src/mem/ruby/common/Debug.hh
new file mode 100644
index 000000000..afa10f57f
--- /dev/null
+++ b/src/mem/ruby/common/Debug.hh
@@ -0,0 +1,291 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#ifndef DEBUG_H
+#define DEBUG_H
+
+#include <unistd.h>
+#include <iostream>
+
+extern std::ostream * debug_cout_ptr;
+
+// component enumeration
+enum DebugComponents
+{
+#undef DEFINE_COMP
+#define DEFINE_COMP(component, character, description) component,
+#include "Debug.def"
+ NUMBER_OF_COMPS
+};
+
+enum PriorityLevel {HighPrio, MedPrio, LowPrio};
+enum VerbosityLevel {No_Verb, Low_Verb, Med_Verb, High_Verb};
+
+class Debug {
+public:
+ // Constructors
+ Debug( const char *filterString, const char *verboseString,
+ Time filterStartTime, const char *filename );
+
+ // Destructor
+ ~Debug();
+
+ // Public Methods
+ bool validDebug(int module, PriorityLevel priority);
+ void printVerbosity(ostream& out) const;
+ void setVerbosity(VerbosityLevel vb);
+ static bool checkVerbosityString(const char *verb_str);
+ bool setVerbosityString(const char *);
+ VerbosityLevel getVerbosity() const { return m_verbosityLevel; }
+ void setFilter(int);
+ static bool checkFilter( char);
+ static bool checkFilterString(const char *);
+ bool setFilterString(const char *);
+ void setDebugTime(Time);
+ Time getDebugTime() const { return m_starting_cycle; }
+ bool addFilter(char);
+ void clearFilter();
+ void allFilter();
+ void print(ostream& out) const;
+ /* old school debugging "vararg": sends messages to screen and log */
+ void debugMsg( const char *fmt, ... );
+
+ void setDebugOutputFile (const char * filename);
+ void closeDebugOutputFile ();
+ static void usageInstructions(void);
+
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ Debug(const Debug& obj);
+ Debug& operator=(const Debug& obj);
+
+ // Data Members (m_ prefix)
+ VerbosityLevel m_verbosityLevel;
+ int m_filter;
+ Time m_starting_cycle;
+
+ std::fstream m_fout;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Debug& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Debug& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+const bool ERROR_MESSAGE_FLAG = true;
+const bool WARNING_MESSAGE_FLAG = true;
+
+#ifdef RUBY_NO_ASSERT
+const bool ASSERT_FLAG = false;
+#else
+const bool ASSERT_FLAG = true;
+#endif
+
+#undef assert
+#define assert(EXPR) ASSERT(EXPR)
+#undef ASSERT
+#define ASSERT(EXPR)\
+{\
+ if (ASSERT_FLAG) {\
+ if (!(EXPR)) {\
+ cerr << "failed assertion '"\
+ << #EXPR << "' at fn "\
+ << __PRETTY_FUNCTION__ << " in "\
+ << __FILE__ << ":"\
+ << __LINE__ << endl << flush;\
+ (* debug_cout_ptr) << "failed assertion '"\
+ << #EXPR << "' at fn "\
+ << __PRETTY_FUNCTION__ << " in "\
+ << __FILE__ << ":"\
+ << __LINE__ << endl << flush;\
+ if(isatty(STDIN_FILENO)) {\
+ cerr << "At this point you might want to attach a debug to ";\
+ cerr << "the running and get to the" << endl;\
+ cerr << "crash site; otherwise press enter to continue" << endl;\
+ cerr << "PID: " << getpid();\
+ cerr << endl << flush; \
+ char c; \
+ cin.get(c); \
+ }\
+ abort();\
+ }\
+ }\
+}
+
+#define BREAK(X)\
+{\
+ cerr << "breakpoint '"\
+ << #X << "' reached at fn "\
+ << __PRETTY_FUNCTION__ << " in "\
+ << __FILE__ << ":"\
+ << __LINE__ << endl << flush;\
+ if(isatty(STDIN_FILENO)) {\
+ cerr << "press enter to continue" << endl;\
+ cerr << "PID: " << getpid();\
+ cerr << endl << flush; \
+ char c; \
+ cin.get(c); \
+ }\
+}
+
+#define ERROR_MSG(MESSAGE)\
+{\
+ if (ERROR_MESSAGE_FLAG) {\
+ cerr << "Fatal Error: in fn "\
+ << __PRETTY_FUNCTION__ << " in "\
+ << __FILE__ << ":"\
+ << __LINE__ << ": "\
+ << (MESSAGE) << endl << flush;\
+ (* debug_cout_ptr) << "Fatal Error: in fn "\
+ << __PRETTY_FUNCTION__ << " in "\
+ << __FILE__ << ":"\
+ << __LINE__ << ": "\
+ << (MESSAGE) << endl << flush;\
+ abort();\
+ }\
+}
+
+#define WARN_MSG(MESSAGE)\
+{\
+ if (WARNING_MESSAGE_FLAG) {\
+ cerr << "Warning: in fn "\
+ << __PRETTY_FUNCTION__ << " in "\
+ << __FILE__ << ":"\
+ << __LINE__ << ": "\
+ << (MESSAGE) << endl << flush;\
+ (* debug_cout_ptr) << "Warning: in fn "\
+ << __PRETTY_FUNCTION__ << " in "\
+ << __FILE__ << ":"\
+ << __LINE__ << ": "\
+ << (MESSAGE) << endl << flush;\
+ }\
+}
+
+#define WARN_EXPR(EXPR)\
+{\
+ if (WARNING_MESSAGE_FLAG) {\
+ cerr << "Warning: in fn "\
+ << __PRETTY_FUNCTION__ << " in "\
+ << __FILE__ << ":"\
+ << __LINE__ << ": "\
+ << #EXPR << " is "\
+ << (EXPR) << endl << flush;\
+ (* debug_cout_ptr) << "Warning: in fn "\
+ << __PRETTY_FUNCTION__ << " in "\
+ << __FILE__ << ":"\
+ << __LINE__ << ": "\
+ << #EXPR << " is "\
+ << (EXPR) << endl << flush;\
+ }\
+}
+
+#define DEBUG_MSG(module, priority, MESSAGE)\
+{\
+ if (RUBY_DEBUG) {\
+ if (g_debug_ptr->validDebug(module, priority)) {\
+ (* debug_cout_ptr) << "Debug: in fn "\
+ << __PRETTY_FUNCTION__\
+ << " in " << __FILE__ << ":"\
+ << __LINE__ << ": "\
+ << (MESSAGE) << endl << flush;\
+ }\
+ }\
+}
+
+#define DEBUG_EXPR(module, priority, EXPR)\
+{\
+ if (RUBY_DEBUG) {\
+ if (g_debug_ptr->validDebug(module, priority)) {\
+ (* debug_cout_ptr) << "Debug: in fn "\
+ << __PRETTY_FUNCTION__\
+ << " in " << __FILE__ << ":"\
+ << __LINE__ << ": "\
+ << #EXPR << " is "\
+ << (EXPR) << endl << flush;\
+ }\
+ }\
+}
+
+#define DEBUG_NEWLINE(module, priority)\
+{\
+ if (RUBY_DEBUG) {\
+ if (g_debug_ptr->validDebug(module, priority)) {\
+ (* debug_cout_ptr) << endl << flush;\
+ }\
+ }\
+}
+
+#define DEBUG_SLICC(priority, LINE, MESSAGE)\
+{\
+ if (RUBY_DEBUG) {\
+ if (g_debug_ptr->validDebug(SLICC_COMP, priority)) {\
+ (* debug_cout_ptr) << (LINE) << (MESSAGE) << endl << flush;\
+ }\
+ }\
+}
+
+#define DEBUG_OUT( rest... ) \
+{\
+ if (RUBY_DEBUG) {\
+ cout << "Debug: in fn "\
+ << __PRETTY_FUNCTION__\
+ << " in " << __FILE__ << ":"\
+ << __LINE__ << ": "; \
+ g_debug_ptr->debugMsg(rest); \
+ }\
+}
+
+#define ERROR_OUT( rest... ) \
+{\
+ if (ERROR_MESSAGE_FLAG) {\
+ cout << "error: in fn "\
+ << __PRETTY_FUNCTION__ << " in "\
+ << __FILE__ << ":"\
+ << __LINE__ << ": ";\
+ g_debug_ptr->debugMsg(rest); \
+ }\
+}
+
+#endif //DEBUG_H
+
diff --git a/src/mem/ruby/common/Driver.cc b/src/mem/ruby/common/Driver.cc
new file mode 100644
index 000000000..019ac6403
--- /dev/null
+++ b/src/mem/ruby/common/Driver.cc
@@ -0,0 +1,39 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "Driver.hh"
+
+Driver::Driver()
+{
+}
+
+// still need to be defined for subclasses
+Driver::~Driver()
+{
+}
diff --git a/src/mem/ruby/common/Driver.hh b/src/mem/ruby/common/Driver.hh
new file mode 100644
index 000000000..911cb742b
--- /dev/null
+++ b/src/mem/ruby/common/Driver.hh
@@ -0,0 +1,98 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DRIVER_H
+#define DRIVER_H
+
+#include "Global.hh"
+#include "Consumer.hh"
+#include "NodeID.hh"
+#include "CacheRequestType.hh"
+
+class System;
+class SubBlock;
+class Address;
+class MachineID;
+class SimicsHypervisor;
+
+class Driver {
+public:
+ // Constructors
+ Driver();
+
+ // Destructor
+ virtual ~Driver() = 0;
+
+ // Public Methods
+ virtual void get_network_config() {}
+ virtual void hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread) = 0; // Called by sequencer
+ virtual void conflictCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread) { assert(0); } // Called by sequencer
+ virtual integer_t getInstructionCount(int procID) const { return 1; }
+ virtual integer_t getCycleCount(int procID) const { return 1; }
+ virtual SimicsHypervisor * getHypervisor() { return NULL; }
+ virtual void notifySendNack( int procID, const Address & addr, uint64 remote_timestamp, const MachineID & remote_id) { assert(0); }; //Called by Sequencer
+ virtual void notifyReceiveNack( int procID, const Address & addr, uint64 remote_timestamp, const MachineID & remote_id) { assert(0); }; //Called by Sequencer
+ virtual void notifyReceiveNackFinal( int procID, const Address & addr) { assert(0); }; // Called by Sequencer
+ virtual void notifyTrapStart( int procID, const Address & handlerPC, int threadID, int smtThread ) { assert(0); } //called by Sequencer
+ virtual void notifyTrapComplete( int procID, const Address & newPC, int smtThread ) {assert(0); } // called by Sequencer
+ virtual int getOpalTransactionLevel(int procID, int thread) const {
+ cout << "Driver.hh getOpalTransactionLevel() " << endl;
+ return 0; } //called by Sequencer
+ virtual void addThreadDependency(int procID, int requestor_thread, int conflict_thread) const { assert(0);}
+ virtual uint64 getOpalTime(int procID) const{ return 0; } //called by Sequencer
+ virtual uint64 getOpalTimestamp(int procID, int thread) const{
+ cout << "Driver.hh getOpalTimestamp " << endl;
+ return 0; } // called by Sequencer
+ virtual int inTransaction(int procID, int thread ) const{
+ cout << "Driver.hh inTransaction " << endl;
+return false; } //called by Sequencer
+ virtual void printDebug(){} //called by Sequencer
+
+ virtual void printStats(ostream& out) const = 0;
+ virtual void clearStats() = 0;
+
+ virtual void printConfig(ostream& out) const = 0;
+
+ //virtual void abortCallback(NodeID proc){}
+
+ virtual integer_t readPhysicalMemory(int procID, physical_address_t address,
+ int len ){ ASSERT(0); return 0; }
+
+ virtual void writePhysicalMemory( int procID, physical_address_t address,
+ integer_t value, int len ){ ASSERT(0); }
+
+protected:
+ // accessible by subclasses
+
+private:
+ // inaccessible by subclasses
+
+};
+
+#endif //DRIVER_H
diff --git a/src/mem/ruby/common/Global.cc b/src/mem/ruby/common/Global.cc
new file mode 100644
index 000000000..e60cd4fd3
--- /dev/null
+++ b/src/mem/ruby/common/Global.cc
@@ -0,0 +1,35 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "Global.hh"
+
+EventQueue* g_eventQueue_ptr = NULL;
+System* g_system_ptr = NULL;
+Debug* g_debug_ptr = NULL;
+
diff --git a/src/mem/ruby/common/Global.hh b/src/mem/ruby/common/Global.hh
new file mode 100644
index 000000000..eaec05d46
--- /dev/null
+++ b/src/mem/ruby/common/Global.hh
@@ -0,0 +1,110 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * */
+
+#ifndef GLOBAL_H
+#define GLOBAL_H
+
+#ifdef SINGLE_LEVEL_CACHE
+const bool TWO_LEVEL_CACHE = false;
+#define L1I_CACHE_MEMBER_VARIABLE m_L1Cache_cacheMemory_vec[m_version] // currently all protocols require L1s == nodes
+#define L1D_CACHE_MEMBER_VARIABLE m_L1Cache_cacheMemory_vec[m_version] // "
+#define L2_CACHE_MEMBER_VARIABLE m_L1Cache_cacheMemory_vec[m_version] // "
+#define L2_CACHE_VARIABLE m_L1Cache_cacheMemory_vec
+#else
+const bool TWO_LEVEL_CACHE = true;
+#ifdef IS_CMP
+#define L1I_CACHE_MEMBER_VARIABLE m_L1Cache_L1IcacheMemory_vec[m_version]
+#define L1D_CACHE_MEMBER_VARIABLE m_L1Cache_L1DcacheMemory_vec[m_version]
+#define L2_CACHE_MEMBER_VARIABLE m_L2Cache_L2cacheMemory_vec[m_version]
+#define L2_CACHE_VARIABLE m_L2Cache_L2cacheMemory_vec
+#else // not IS_CMP
+#define L1I_CACHE_MEMBER_VARIABLE m_L1Cache_L1IcacheMemory_vec[m_version] // currently all protocols require L1s == nodes
+#define L1D_CACHE_MEMBER_VARIABLE m_L1Cache_L1DcacheMemory_vec[m_version] // "
+// #define L2_CACHE_MEMBER_VARIABLE m_L1Cache_L2cacheMemory_vec[m_version] // old exclusive caches don't support L2s != nodes
+#define L2_CACHE_MEMBER_VARIABLE m_L1Cache_cacheMemory_vec[m_version] // old exclusive caches don't support L2s != nodes
+#define L2_CACHE_VARIABLE m_L1Cache_L2cacheMemory_vec
+#endif // IS_CMP
+#endif //SINGLE_LEVEL_CACHE
+
+#define DIRECTORY_MEMBER_VARIABLE m_Directory_directory_vec[m_version]
+#define TBE_TABLE_MEMBER_VARIABLE m_L1Cache_TBEs_vec[m_version]
+
+typedef unsigned char uint8;
+typedef unsigned int uint32;
+typedef unsigned long long uint64;
+
+typedef signed char int8;
+typedef int int32;
+typedef long long int64;
+
+typedef long long integer_t;
+typedef unsigned long long uinteger_t;
+
+typedef int64 Time;
+typedef uint64 physical_address_t;
+typedef uint64 la_t;
+typedef uint64 pa_t;
+typedef integer_t simtime_t;
+
+// external includes for all classes
+#include "std-includes.hh"
+#include "Debug.hh"
+
+// simple type declarations
+typedef Time LogicalTime;
+typedef int64 Index; // what the address bit ripper returns
+typedef int word; // one word of a cache line
+typedef unsigned int uint;
+typedef int SwitchID;
+typedef int LinkID;
+
+class EventQueue;
+extern EventQueue* g_eventQueue_ptr;
+
+class System;
+extern System* g_system_ptr;
+
+class Debug;
+extern Debug* g_debug_ptr;
+
+// FIXME: this is required by the contructor of Directory_Entry.h. It can't go
+// into slicc_util.h because it opens a can of ugly worms
+extern inline int max_tokens()
+{
+ return 1024;
+}
+
+
+#endif //GLOBAL_H
+
diff --git a/src/mem/ruby/common/Histogram.cc b/src/mem/ruby/common/Histogram.cc
new file mode 100644
index 000000000..9c5e8e623
--- /dev/null
+++ b/src/mem/ruby/common/Histogram.cc
@@ -0,0 +1,185 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "Histogram.hh"
+
+Histogram::Histogram(int binsize, int bins)
+{
+ m_binsize = binsize;
+ m_bins = bins;
+ clear();
+}
+
+Histogram::~Histogram()
+{
+}
+
+void Histogram::clear(int binsize, int bins)
+{
+ m_binsize = binsize;
+ clear(bins);
+}
+
+void Histogram::clear(int bins)
+{
+ m_bins = bins;
+ m_largest_bin = 0;
+ m_max = 0;
+ m_data.setSize(m_bins);
+ for (int i = 0; i < m_bins; i++) {
+ m_data[i] = 0;
+ }
+ m_count = 0;
+ m_max = 0;
+
+ m_sumSamples = 0;
+ m_sumSquaredSamples = 0;
+}
+
+
+void Histogram::add(int64 value)
+{
+ assert(value >= 0);
+ m_max = max(m_max, value);
+ m_count++;
+
+ m_sumSamples += value;
+ m_sumSquaredSamples += (value*value);
+
+ int index;
+ if (m_binsize == -1) {
+ // This is a log base 2 histogram
+ if (value == 0) {
+ index = 0;
+ } else {
+ index = int(log(double(value))/log(2.0))+1;
+ if (index >= m_data.size()) {
+ index = m_data.size()-1;
+ }
+ }
+ } else {
+ // This is a linear histogram
+ while (m_max >= (m_bins * m_binsize)) {
+ for (int i = 0; i < m_bins/2; i++) {
+ m_data[i] = m_data[i*2] + m_data[i*2 + 1];
+ }
+ for (int i = m_bins/2; i < m_bins; i++) {
+ m_data[i] = 0;
+ }
+ m_binsize *= 2;
+ }
+ index = value/m_binsize;
+ }
+ assert(index >= 0);
+ m_data[index]++;
+ m_largest_bin = max(m_largest_bin, index);
+}
+
+void Histogram::add(const Histogram& hist)
+{
+ assert(hist.getBins() == m_bins);
+ assert(hist.getBinSize() == -1); // assume log histogram
+ assert(m_binsize == -1);
+
+ for (int j = 0; j < hist.getData(0); j++) {
+ add(0);
+ }
+
+ for (int i = 1; i < m_bins; i++) {
+ for (int j = 0; j < hist.getData(i); j++) {
+ add(1<<(i-1)); // account for the + 1 index
+ }
+ }
+
+}
+
+// Computation of standard deviation of samples a1, a2, ... aN
+// variance = [SUM {ai^2} - (SUM {ai})^2/N]/(N-1)
+// std deviation equals square root of variance
+double Histogram::getStandardDeviation() const
+{
+ double variance;
+ if(m_count > 1){
+ variance = (double)(m_sumSquaredSamples - m_sumSamples*m_sumSamples/m_count)/(m_count - 1);
+ } else {
+ return 0;
+ }
+ return sqrt(variance);
+}
+
+void Histogram::print(ostream& out) const
+{
+ printWithMultiplier(out, 1.0);
+}
+
+void Histogram::printPercent(ostream& out) const
+{
+ if (m_count == 0) {
+ printWithMultiplier(out, 0.0);
+ } else {
+ printWithMultiplier(out, 100.0/double(m_count));
+ }
+}
+
+void Histogram::printWithMultiplier(ostream& out, double multiplier) const
+{
+ if (m_binsize == -1) {
+ out << "[binsize: log2 ";
+ } else {
+ out << "[binsize: " << m_binsize << " ";
+ }
+ out << "max: " << m_max << " ";
+ out << "count: " << m_count << " ";
+ // out << "total: " << m_sumSamples << " ";
+ if (m_count == 0) {
+ out << "average: NaN |";
+ out << "standard deviation: NaN |";
+ } else {
+ out << "average: " << setw(5) << ((double) m_sumSamples)/m_count << " | ";
+ out << "standard deviation: " << getStandardDeviation() << " |";
+ }
+ for (int i = 0; i < m_bins && i <= m_largest_bin; i++) {
+ if (multiplier == 1.0) {
+ out << " " << m_data[i];
+ } else {
+ out << " " << double(m_data[i]) * multiplier;
+ }
+ }
+ out << " ]";
+}
+
+bool node_less_then_eq(const Histogram* n1, const Histogram* n2)
+{
+ return (n1->size() > n2->size());
+}
diff --git a/src/mem/ruby/common/Histogram.hh b/src/mem/ruby/common/Histogram.hh
new file mode 100644
index 000000000..e48efc35f
--- /dev/null
+++ b/src/mem/ruby/common/Histogram.hh
@@ -0,0 +1,104 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: The histogram class implements a simple histogram
+ *
+ */
+
+#ifndef HISTOGRAM_H
+#define HISTOGRAM_H
+
+#include "Global.hh"
+#include "Vector.hh"
+
+class Histogram {
+public:
+ // Constructors
+ Histogram(int binsize = 1, int bins = 50);
+
+ // Destructor
+ ~Histogram();
+
+ // Public Methods
+
+ void add(int64 value);
+ void add(const Histogram& hist);
+ void clear() { clear(m_bins); }
+ void clear(int bins);
+ void clear(int binsize, int bins);
+ int64 size() const { return m_count; }
+ int getBins() const { return m_bins; }
+ int getBinSize() const { return m_binsize; }
+ int64 getTotal() const { return m_sumSamples; }
+ int64 getData(int index) const { return m_data[index]; }
+
+ void printWithMultiplier(ostream& out, double multiplier) const;
+ void printPercent(ostream& out) const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ // Histogram(const Histogram& obj);
+ // Histogram& operator=(const Histogram& obj);
+
+ // Data Members (m_ prefix)
+ Vector<int64> m_data;
+ int64 m_max; // the maximum value seen so far
+ int64 m_count; // the number of elements added
+ int m_binsize; // the size of each bucket
+ int m_bins; // the number of buckets
+ int m_largest_bin; // the largest bin used
+
+ int64 m_sumSamples; // the sum of all samples
+ int64 m_sumSquaredSamples; // the sum of the square of all samples
+
+ double getStandardDeviation() const;
+};
+
+bool node_less_then_eq(const Histogram* n1, const Histogram* n2);
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Histogram& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Histogram& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //HISTOGRAM_H
diff --git a/src/mem/ruby/common/Message.cc b/src/mem/ruby/common/Message.cc
new file mode 100644
index 000000000..baad8ac9b
--- /dev/null
+++ b/src/mem/ruby/common/Message.cc
@@ -0,0 +1,34 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#include "Message.hh"
diff --git a/src/mem/ruby/common/NetDest.cc b/src/mem/ruby/common/NetDest.cc
new file mode 100644
index 000000000..79a6078e9
--- /dev/null
+++ b/src/mem/ruby/common/NetDest.cc
@@ -0,0 +1,259 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NetDest.C
+ *
+ * Description: See NetDest.h
+ *
+ * $Id$
+ *
+ */
+
+#include "NetDest.hh"
+#include "RubyConfig.hh"
+#include "Protocol.hh"
+
+NetDest::NetDest()
+{
+ setSize();
+}
+
+void NetDest::add(MachineID newElement)
+{
+ m_bits[vecIndex(newElement)].add(bitIndex(newElement.num));
+}
+
+void NetDest::addNetDest(const NetDest& netDest)
+{
+ assert(m_bits.size() == netDest.getSize());
+ for (int i = 0; i < m_bits.size(); i++) {
+ m_bits[i].addSet(netDest.m_bits[i]);
+ }
+}
+
+void NetDest::addRandom()
+{
+ int i = random()%m_bits.size();
+ m_bits[i].addRandom();
+}
+
+void NetDest::setNetDest(MachineType machine, const Set& set)
+{
+ // assure that there is only one set of destinations for this machine
+ assert(MachineType_base_level((MachineType)(machine+1)) - MachineType_base_level(machine) == 1);
+ m_bits[MachineType_base_level(machine)] = set;
+}
+
+void NetDest::remove(MachineID oldElement)
+{
+ m_bits[vecIndex(oldElement)].remove(bitIndex(oldElement.num));
+}
+
+void NetDest::removeNetDest(const NetDest& netDest)
+{
+ assert(m_bits.size() == netDest.getSize());
+ for (int i = 0; i < m_bits.size(); i++) {
+ m_bits[i].removeSet(netDest.m_bits[i]);
+
+ }
+}
+
+void NetDest::clear()
+{
+ for (int i = 0; i < m_bits.size(); i++) {
+ m_bits[i].clear();
+ }
+}
+
+void NetDest::broadcast()
+{
+ for (MachineType machine = MachineType_FIRST; machine < MachineType_NUM; ++machine) {
+ broadcast(machine);
+ }
+}
+
+void NetDest::broadcast(MachineType machineType) {
+
+ for (int i = 0; i < MachineType_base_count(machineType); i++) {
+ MachineID mach = {machineType, i};
+ add(mach);
+ }
+}
+
+//For Princeton Network
+Vector<NodeID> NetDest::getAllDest() {
+ Vector<NodeID> dest;
+ dest.clear();
+ for (int i=0; i<m_bits.size(); i++) {
+ for (int j=0; j<m_bits[i].getSize(); j++) {
+ if (m_bits[i].isElement(j)) {
+ dest.insertAtBottom((NodeID) (MachineType_base_number((MachineType) i) + j));
+ }
+ }
+ }
+ return dest;
+}
+
+int NetDest::count() const
+{
+ int counter = 0;
+ for (int i=0; i<m_bits.size(); i++) {
+ counter += m_bits[i].count();
+ }
+ return counter;
+}
+
+NodeID NetDest::elementAt(MachineID index) {
+ return m_bits[vecIndex(index)].elementAt(bitIndex(index.num));
+}
+
+NodeID NetDest::smallestElement() const
+{
+ assert(count() > 0);
+ for (int i=0; i<m_bits.size(); i++) {
+ for (int j=0; j<m_bits[i].getSize(); j++) {
+ if (m_bits[i].isElement(j)) {
+ return j;
+ }
+ }
+ }
+ ERROR_MSG("No smallest element of an empty set.");
+}
+
+MachineID NetDest::smallestElement(MachineType machine) const
+{
+ for (int j = 0; j < m_bits[MachineType_base_level(machine)].getSize(); j++) {
+ if (m_bits[MachineType_base_level(machine)].isElement(j)) {
+ MachineID mach = {machine, j};
+ return mach;
+ }
+ }
+
+ ERROR_MSG("No smallest element of given MachineType.");
+}
+
+
+// Returns true iff all bits are set
+bool NetDest::isBroadcast() const
+{
+ for (int i=0; i<m_bits.size(); i++) {
+ if (!m_bits[i].isBroadcast()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Returns true iff no bits are set
+bool NetDest::isEmpty() const
+{
+ for (int i=0; i<m_bits.size(); i++) {
+ if (!m_bits[i].isEmpty()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// returns the logical OR of "this" set and orNetDest
+NetDest NetDest::OR(const NetDest& orNetDest) const
+{
+ assert(m_bits.size() == orNetDest.getSize());
+ NetDest result;
+ for (int i=0; i<m_bits.size(); i++) {
+ result.m_bits[i] = m_bits[i].OR(orNetDest.m_bits[i]);
+ }
+ return result;
+}
+
+
+// returns the logical AND of "this" set and andNetDest
+NetDest NetDest::AND(const NetDest& andNetDest) const
+{
+ assert(m_bits.size() == andNetDest.getSize());
+ NetDest result;
+ for (int i=0; i<m_bits.size(); i++) {
+ result.m_bits[i] = m_bits[i].AND(andNetDest.m_bits[i]);
+ }
+ return result;
+}
+
+// Returns true if the intersection of the two sets is non-empty
+bool NetDest::intersectionIsNotEmpty(const NetDest& other_netDest) const
+{
+ assert(m_bits.size() == other_netDest.getSize());
+ for (int i=0; i<m_bits.size(); i++) {
+ if (m_bits[i].intersectionIsNotEmpty(other_netDest.m_bits[i])) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool NetDest::isSuperset(const NetDest& test) const
+{
+ assert(m_bits.size() == test.getSize());
+
+ for (int i=0; i<m_bits.size(); i++) {
+ if (!m_bits[i].isSuperset(test.m_bits[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool NetDest::isElement(MachineID element) const
+{
+ return ((m_bits[vecIndex(element)])).isElement(bitIndex(element.num));
+}
+
+void NetDest::setSize()
+{
+ m_bits.setSize(MachineType_base_level(MachineType_NUM));
+ assert(m_bits.size() == MachineType_NUM);
+
+ for (int i = 0; i < m_bits.size(); i++) {
+ m_bits[i].setSize(MachineType_base_count((MachineType)i));
+ }
+}
+
+void NetDest::print(ostream& out) const
+{
+ out << "[NetDest (" << m_bits.size() << ") ";
+
+ for (int i=0; i<m_bits.size(); i++) {
+ for (int j=0; j<m_bits[i].getSize(); j++) {
+ out << (bool) m_bits[i].isElement(j) << " ";
+ }
+ out << " - ";
+ }
+ out << "]";
+}
+
diff --git a/src/mem/ruby/common/NetDest.hh b/src/mem/ruby/common/NetDest.hh
new file mode 100644
index 000000000..04f7871f6
--- /dev/null
+++ b/src/mem/ruby/common/NetDest.hh
@@ -0,0 +1,145 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Set.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+// NetDest specifies the network destination of a NetworkMessage
+// This is backward compatible with the Set class that was previously
+// used to specify network destinations.
+// NetDest supports both node networks and component networks
+
+#ifndef NETDEST_H
+#define NETDEST_H
+
+#include "Global.hh"
+#include "Vector.hh"
+#include "NodeID.hh"
+#include "MachineID.hh"
+#include "RubyConfig.hh"
+#include "Set.hh"
+#include "MachineType.hh"
+
+class Set;
+
+class NetDest {
+public:
+ // Constructors
+ // creates and empty set
+ NetDest();
+ explicit NetDest(int bit_size);
+
+ NetDest& operator=(const Set& obj);
+
+ // Destructor
+ // ~NetDest();
+
+ // Public Methods
+ void add(MachineID newElement);
+ void addNetDest(const NetDest& netDest);
+ void addRandom();
+ void setNetDest(MachineType machine, const Set& set);
+ void remove(MachineID oldElement);
+ void removeNetDest(const NetDest& netDest);
+ void clear();
+ void broadcast();
+ void broadcast(MachineType machine);
+ int count() const;
+ bool isEqual(const NetDest& netDest);
+
+ NetDest OR(const NetDest& orNetDest) const; // return the logical OR of this netDest and orNetDest
+ NetDest AND(const NetDest& andNetDest) const; // return the logical AND of this netDest and andNetDest
+
+ // Returns true if the intersection of the two netDests is non-empty
+ bool intersectionIsNotEmpty(const NetDest& other_netDest) const;
+
+ // Returns true if the intersection of the two netDests is empty
+ bool intersectionIsEmpty(const NetDest& other_netDest) const;
+
+ bool isSuperset(const NetDest& test) const;
+ bool isSubset(const NetDest& test) const { return test.isSuperset(*this); }
+ bool isElement(MachineID element) const;
+ bool isBroadcast() const;
+ bool isEmpty() const;
+
+ //For Princeton Network
+ Vector<NodeID> getAllDest();
+
+ NodeID smallestElement() const;
+ MachineID smallestElement(MachineType machine) const;
+
+ void setSize();
+ int getSize() const { return m_bits.size(); }
+
+ // get element for a index
+ NodeID elementAt(MachineID index);
+
+ void print(ostream& out) const;
+
+private:
+
+ // Private Methods
+ // returns a value >= MachineType_base_level("this machine") and < MachineType_base_level("next highest machine")
+ int vecIndex(MachineID m) const {
+ int vec_index = MachineType_base_level(m.type);
+ assert(vec_index < m_bits.size());
+ return vec_index;
+ }
+
+ NodeID bitIndex(NodeID index) const {
+ return index;
+ }
+
+ // Data Members (m_ prefix)
+ Vector < Set > m_bits; // a Vector of bit vectors - i.e. Sets
+
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const NetDest& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const NetDest& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //NETDEST_H
+
diff --git a/src/mem/ruby/common/OptBigSet.cc b/src/mem/ruby/common/OptBigSet.cc
new file mode 100644
index 000000000..51214e936
--- /dev/null
+++ b/src/mem/ruby/common/OptBigSet.cc
@@ -0,0 +1,576 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Set.C
+ *
+ * Description: See Set.h
+ *
+ * $Id: BigSet.C 1.9 05/01/19 13:12:25-06:00 mikem@maya.cs.wisc.edu $
+ *
+ */
+
+// modified (rewritten) 05/20/05 by Dan Gibson to accomimdate FASTER >32 bit
+// set sizes
+
+#include "Set.hh"
+#include "RubyConfig.hh"
+
+#if __amd64__ || __LP64__
+#define __64BITS__
+#else
+#define __32BITS__
+#endif
+
+Set::Set()
+{
+ m_p_nArray = NULL;
+ setSize(RubyConfig::numberOfProcessors());
+}
+
+// copy constructor
+Set::Set(const Set& obj) {
+ m_p_nArray = NULL;
+ setSize(obj.m_nSize);
+
+ // copy from the host to this array
+ for(int i=0; i<m_nArrayLen; i++) {
+ m_p_nArray[i] = obj.m_p_nArray[i];
+ }
+
+}
+
+Set::Set(int size)
+{
+ m_p_nArray = NULL;
+ assert(size>0);
+ setSize(size);
+}
+
+Set::~Set() {
+ if( (m_p_nArray != (&m_p_nArray_Static[0])) && (m_p_nArray != NULL))
+ delete [] m_p_nArray;
+ m_p_nArray = NULL;
+}
+
+
+// /*
+// * This function should set the bit corresponding to index
+// * to 1.
+// */
+
+// void Set::add(NodeID index)
+// {
+// assert(index<m_nSize && index >= 0);
+
+// #ifdef __32BITS__
+// m_p_nArray[index>>5] |= (1 << (index & 0x01F));
+// #else
+// m_p_nArray[index>>6] |= (((unsigned long) 1) << (index & 0x03F));
+// #endif // __32BITS__
+
+// }
+
+/*
+ * This function should set all the bits in the current set
+ * that are already set in the parameter set
+ */
+void Set::addSet(const Set& set)
+{
+ assert(getSize()==set.getSize());
+ for(int i=0; i<m_nArrayLen; i++) {
+ m_p_nArray[i] |= set.m_p_nArray[i];
+ }
+
+}
+
+/*
+ * This function should randomly assign 1 to the bits in the set--
+ * it should not clear the bits bits first, though?
+ */
+void Set::addRandom()
+{
+
+ for(int i=0; i<m_nArrayLen; i++) {
+ m_p_nArray[i] |= random() ^ (random() << 4); // this ensures that all 32 bits are subject to random effects,
+ // as RAND_MAX typically = 0x7FFFFFFF
+ }
+
+ // now just ensure that no bits over the maximum size were set
+#ifdef __32BITS__
+ long mask = 0x7FFFFFFF;
+
+ // the number of populated spaces in the higest-order array slot is:
+ // m_nSize % 32, so the uppermost 32 - m_nSize%32 bits should be
+ // cleared
+
+ if((m_nSize % 32) != 0) {
+ for(int j=0; j<32-(m_nSize&0x01F); j++) {
+ m_p_nArray[m_nArrayLen-1] &= mask;
+ mask = mask >> 1;
+ }
+ }
+#else
+ long mask = 0x7FFFFFFFFFFFFFFF;
+
+ // the number of populated spaces in the higest-order array slot is:
+ // m_nSize % 64, so the uppermost 64 - m_nSize%64 bits should be
+ // cleared
+
+ if((m_nSize % 64) != 0) {
+ for(int j=0; j<64-(m_nSize&0x03F); j++) {
+ m_p_nArray[m_nArrayLen-1] &= mask;
+ mask = mask >> 1;
+ }
+ }
+#endif // __32BITS__
+
+}
+
+// /*
+// * This function unsets the bit associated with index
+// */
+// void Set::remove(NodeID index)
+// {
+// assert(index<m_nSize && index>=0);
+
+// #ifdef __32BITS__
+// m_p_nArray[index>>5] &= ~(0x00000001 << (index & 0x01F));
+// #else
+// m_p_nArray[index>>6] &= ~(((unsigned long) 0x0000000000000001) << (index & 0x03F));
+// #endif // __32BITS__
+
+// }
+
+
+/*
+ * This function clears bits that are =1 in the parameter set
+ */
+void Set::removeSet(const Set& set)
+{
+
+ assert(m_nSize==set.m_nSize);
+ for(int i=0; i<m_nArrayLen; i++) {
+ m_p_nArray[i] &= ~(set.m_p_nArray[i]);
+ }
+
+}
+
+// /*
+// * This function clears all bits in the set
+// */
+// void Set::clear()
+// {
+// for(int i=0; i<m_nArrayLen; i++) {
+// m_p_nArray[i] = 0;
+// }
+// }
+
+/*
+ * this function sets all bits in the set
+ */
+void Set::broadcast()
+{
+
+ for(int i=0; i<m_nArrayLen; i++) {
+ m_p_nArray[i] = -1; // note that -1 corresponds to all 1's in 2's comp.
+ }
+
+ // now just ensure that no bits over the maximum size were set
+#ifdef __32BITS__
+ long mask = 0x7FFFFFFF;
+
+ // the number of populated spaces in the higest-order array slot is:
+ // m_nSize % 32, so the uppermost 32 - m_nSize%32 bits should be
+ // cleared
+
+ if((m_nSize % 32) != 0) {
+ for(int j=0; j<32-(m_nSize&0x01F); j++) {
+ m_p_nArray[m_nArrayLen-1] &= mask;
+ mask = mask >> 1;
+ }
+ }
+#else
+ long mask = 0x7FFFFFFFFFFFFFFF;
+
+ // the number of populated spaces in the higest-order array slot is:
+ // m_nSize % 64, so the uppermost 64 - m_nSize%64 bits should be
+ // cleared
+
+ if((m_nSize % 64) != 0) {
+ for(int j=0; j<64-(m_nSize&0x03F); j++) {
+ m_p_nArray[m_nArrayLen-1] &= mask;
+ mask = mask >> 1;
+ }
+ }
+#endif // __32BITS__
+
+}
+
+/*
+ * This function returns the population count of 1's in the set
+ */
+int Set::count() const
+{
+ int counter = 0;
+ long mask;
+ for( int i=0; i<m_nArrayLen; i++) {
+ mask = (long) 0x01;
+
+#ifdef __32BITS__
+ for( int j=0; j<32; j++) {
+ if(m_p_nArray[i] & mask) counter++;
+ mask = mask << 1;
+ }
+
+#else
+
+ for( int j=0; j<64; j++) { // FIXME - significant performance loss when array population << 64
+ if((m_p_nArray[i] & mask) != 0) {
+ counter++;
+ }
+ mask = mask << 1;
+ }
+
+#endif // __32BITS__
+
+ }
+
+ return counter;
+}
+
+/*
+ * This function checks for set equality
+ */
+
+bool Set::isEqual(const Set& set) const
+{
+ assert(m_nSize==set.m_nSize);
+
+ for(int i=0;i<m_nArrayLen;i++) {
+ if(m_p_nArray[i] != set.m_p_nArray[i]) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * This function returns the NodeID (int) of the
+ * least set bit
+ */
+NodeID Set::smallestElement() const
+{
+ assert(count() > 0);
+ long x;
+ for( int i=0; i<m_nArrayLen; i++) {
+ if(m_p_nArray[i]!=0) {
+ // the least-set bit must be in here
+ x = m_p_nArray[i];
+
+#ifdef __32BITS__
+ for( int j=0; j<32; j++) {
+ if(x & 0x00000001) {
+ return 32*i+j;
+ }
+
+ x = x >> 1;
+ }
+#else
+ for( int j=0; j<64; j++) {
+ if(x & 0x0000000000000001) {
+ return 64*i+j;
+ }
+
+ x = x >> 1;
+ }
+#endif // __32BITS__
+
+ ERROR_MSG("No smallest element of an empty set.");
+ }
+ }
+
+ ERROR_MSG("No smallest element of an empty set.");
+
+ return 0;
+}
+
+/*
+ * this function returns true iff all bits are set
+ */
+bool Set::isBroadcast() const
+{
+ // check the fully-loaded words by equal to 0xffffffff
+ // only the last word may not be fully loaded, it is not
+ // fully loaded iff m_nSize % 32 or 64 !=0 => fully loaded iff
+ // m_nSize % 32 or 64 == 0
+
+#ifdef __32BITS__
+ for(int i=0; i< (((m_nSize % 32)==0) ? m_nArrayLen : m_nArrayLen-1); i++) {
+ if(m_p_nArray[i]!=-1) {
+ return false;
+ }
+ }
+
+ // now check the last word, which may not be fully loaded
+ long mask = 1;
+ for(int j=0; j< (m_nSize % 32); j++) {
+ if((mask & m_p_nArray[m_nArrayLen-1])==0) {
+ return false;
+ }
+ mask = mask << 1;
+ }
+#else
+ for(int i=0; i< (((m_nSize % 64)==0) ? m_nArrayLen : m_nArrayLen-1); i++) {
+ if(m_p_nArray[i]!=-1) {
+ return false;
+ }
+ }
+
+ // now check the last word, which may not be fully loaded
+ long mask = 1;
+ for(int j=0; j< (m_nSize % 64); j++) {
+ if((mask & m_p_nArray[m_nArrayLen-1])==0) {
+ return false;
+ }
+ mask = mask << 1;
+ }
+
+#endif // __32BITS__
+
+ return true;
+}
+
+/*
+ * this function returns true iff no bits are set
+ */
+bool Set::isEmpty() const
+{
+
+ // here we can simply check if all = 0, since we ensure
+ // that "extra slots" are all zero
+ for(int i=0; i< m_nArrayLen ; i++) {
+ if(m_p_nArray[i]!=0) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// returns the logical OR of "this" set and orSet
+Set Set::OR(const Set& orSet) const
+{
+ Set result(m_nSize);
+ assert(m_nSize == orSet.m_nSize);
+ for(int i=0; i< m_nArrayLen; i++) {
+ result.m_p_nArray[i] = m_p_nArray[i] | orSet.m_p_nArray[i];
+ }
+
+ return result;
+
+}
+
+// returns the logical AND of "this" set and andSet
+Set Set::AND(const Set& andSet) const
+{
+ Set result(m_nSize);
+ assert(m_nSize == andSet.m_nSize);
+
+ for(int i=0; i< m_nArrayLen; i++) {
+ result.m_p_nArray[i] = m_p_nArray[i] & andSet.m_p_nArray[i];
+ }
+
+ return result;
+}
+
+// // Returns true if the intersection of the two sets is non-empty
+// bool Set::intersectionIsNotEmpty(const Set& other_set) const
+// {
+// assert(m_nSize == other_set.m_nSize);
+// for(int i=0; i< m_nArrayLen; i++) {
+// if(m_p_nArray[i] & other_set.m_p_nArray[i]) {
+// return true;
+// }
+// }
+
+// return false;
+
+// }
+
+// // Returns true if the intersection of the two sets is empty
+// bool Set::intersectionIsEmpty(const Set& other_set) const
+// {
+// assert(m_nSize == other_set.m_nSize);
+// for(int i=0; i< m_nArrayLen; i++) {
+// if(m_p_nArray[i] & other_set.m_p_nArray[i]) {
+// return false;
+// }
+// }
+
+// return true;
+
+// }
+
+/*
+ * Returns false if a bit is set in the parameter set that is
+ * NOT set in this set
+ */
+bool Set::isSuperset(const Set& test) const
+{
+ assert(m_nSize == test.m_nSize);
+
+ for(int i=0;i<m_nArrayLen;i++) {
+ if(((test.m_p_nArray[i] & m_p_nArray[i]) | ~test.m_p_nArray[i]) != -1) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// /*
+// * Returns true iff this bit is set
+// */
+// bool Set::isElement(NodeID element) const
+// {
+// bool result;
+
+// #ifdef __32BITS__
+// result = ((m_p_nArray[element>>5] & (0x00000001 << (element & 0x01F)))!=0);
+// #else
+// result = ((m_p_nArray[element>>6] & (((unsigned long) 0x0000000000000001) << (element & 0x03F)))!=0);
+// #endif // __32BITS__
+
+// return result;
+// }
+
+/*
+ * "Supposed" to return the node id of the (n+1)th set
+ * bit, IE n=0 => returns nodeid of first set bit, BUT
+ * since BigSet.C behaves strangely, this implementation
+ * will behave strangely just for reverse compatability.
+ *
+ * Was originally implemented for the flight data recorder
+ * FDR
+ */
+
+// NodeID Set::elementAt(int n) const
+// {
+// if(isElement(n)) return (NodeID) true;
+// else return 0;
+
+// /*
+// int match = -1;
+// for(int i=0;i<m_nSize;i++) {
+// if(isElement(i)) match++;
+// if(match==n) {
+// return i;
+// }
+// }
+
+// return -1;
+// */
+// }
+
+void Set::setSize(int size)
+{
+ m_nSize = size;
+
+#ifdef __32BITS__
+ m_nArrayLen = m_nSize/32 + ((m_nSize%32==0) ? 0 : 1 );
+#else
+ m_nArrayLen = m_nSize/64 + ((m_nSize%64==0) ? 0 : 1 );
+#endif // __32BITS__
+
+ // decide whether to use dynamic or static alloction
+ if(m_nArrayLen<=NUMBER_WORDS_PER_SET) { // constant defined in RubyConfig.h
+ // its OK to use the static allocation, and it will
+ // probably be faster (as m_nArrayLen is already in the
+ // cache and they will probably share the same cache line)
+
+ // if switching from dyanamic to static allocation (which
+ // is probably rare, but why not be complete?), must delete
+ // the dynamically allocated space
+ if((m_p_nArray != NULL) && (m_p_nArray != &m_p_nArray_Static[0]))
+ delete [] m_p_nArray;
+
+ m_p_nArray = & m_p_nArray_Static[0];
+ } else {
+
+ // can't use static allocation...simply not enough room
+ // so dynamically allocate some space
+ if((m_p_nArray != NULL) && (m_p_nArray != &m_p_nArray_Static[0]))
+ delete [] m_p_nArray;
+
+ m_p_nArray = new long[m_nArrayLen];
+ }
+
+ clear();
+}
+
+Set& Set::operator=(const Set& obj) {
+ if(this == &obj) {
+ // do nothing
+ } else {
+
+ // resize this item
+ setSize(obj.getSize());
+
+ // copy the elements from obj to this
+ for(int i=0; i<m_nArrayLen; i++) {
+ m_p_nArray[i] = obj.m_p_nArray[i];
+ }
+ }
+
+ return *this;
+}
+
+void Set::print(ostream& out) const
+{
+ if(m_p_nArray==NULL) {
+ out << "[Set {Empty}]";
+ return;
+ }
+ char buff[24];
+ out << "[Set 0x ";
+ for (int i=m_nArrayLen-1; i>=0; i--) {
+#ifdef __32BITS__
+ sprintf(buff,"%08X ",m_p_nArray[i]);
+#else
+ sprintf(buff,"0x %016llX ",m_p_nArray[i]);
+#endif // __32BITS__
+ out << buff;
+ }
+ out << " ]";
+
+}
+
+
diff --git a/src/mem/ruby/common/OptBigSet.hh b/src/mem/ruby/common/OptBigSet.hh
new file mode 100644
index 000000000..a57a07e13
--- /dev/null
+++ b/src/mem/ruby/common/OptBigSet.hh
@@ -0,0 +1,202 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Set.h
+ *
+ * Description:
+ *
+ * $Id: BigSet.h 1.6 05/01/19 13:12:25-06:00 mikem@maya.cs.wisc.edu $
+ *
+ */
+
+// modified by Dan Gibson on 05/20/05 to accomidate FASTER
+// >32 set lengths, using an array of ints w/ 32 bits/int
+
+// NOTE: Never include this file directly, this should only be
+// included from Set.h
+
+#ifndef SET_H
+#define SET_H
+
+#include "Global.hh"
+#include "Vector.hh"
+#include "NodeID.hh"
+#include "RubyConfig.hh"
+
+// gibson 05/20/05
+// enum PresenceBit {NotPresent, Present};
+
+class Set {
+public:
+ // Constructors
+ // creates and empty set
+ Set();
+ Set (int size);
+
+ // used during the replay mechanism
+ // Set(const char *str);
+
+ Set(const Set& obj);
+ Set& operator=(const Set& obj);
+
+ // Destructor
+ ~Set();
+
+ // Public Methods
+
+ inline void add(NodeID index)
+ {
+#ifdef __32BITS__
+ m_p_nArray[index>>5] |= (1 << (index & 0x01F));
+#else
+ m_p_nArray[index>>6] |= (((unsigned long) 1) << (index & 0x03F));
+#endif // __32BITS__
+ }
+
+ void addSet(const Set& set);
+ void addRandom();
+
+ inline void remove(NodeID index)
+ {
+#ifdef __32BITS__
+ m_p_nArray[index>>5] &= ~(0x00000001 << (index & 0x01F));
+#else
+ m_p_nArray[index>>6] &= ~(((unsigned long) 0x0000000000000001) << (index & 0x03F));
+#endif // __32BITS__
+ }
+
+
+ void removeSet(const Set& set);
+
+ inline void clear() { for(int i=0; i<m_nArrayLen; i++) m_p_nArray[i] = 0; }
+
+ void broadcast();
+ int count() const;
+ bool isEqual(const Set& set) const;
+
+ Set OR(const Set& orSet) const; // return the logical OR of this set and orSet
+ Set AND(const Set& andSet) const; // return the logical AND of this set and andSet
+
+ // Returns true if the intersection of the two sets is non-empty
+ inline bool intersectionIsNotEmpty(const Set& other_set) const
+ {
+ for(int i=0; i< m_nArrayLen; i++) {
+ if(m_p_nArray[i] & other_set.m_p_nArray[i]) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Returns true if the intersection of the two sets is empty
+ inline bool intersectionIsEmpty(const Set& other_set) const
+ {
+ for(int i=0; i< m_nArrayLen; i++) {
+ if(m_p_nArray[i] & other_set.m_p_nArray[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool isSuperset(const Set& test) const;
+ bool isSubset(const Set& test) const { return test.isSuperset(*this); }
+
+ inline bool isElement(NodeID element) const
+ {
+#ifdef __32BITS__
+ return ((m_p_nArray[element>>5] & (0x00000001 << (element & 0x01F)))!=0);
+#else
+ return ((m_p_nArray[element>>6] & (((unsigned long) 0x0000000000000001) << (element & 0x03F)))!=0);
+#endif // __32BITS__
+
+ }
+
+ bool isBroadcast() const;
+ bool isEmpty() const;
+
+ NodeID smallestElement() const;
+
+ // int size() const;
+ void setSize (int size);
+
+ // get element for a index
+ inline NodeID elementAt(int index) const
+ {
+ if(isElement(index)) return (NodeID) true;
+ else return 0;
+ }
+
+ // gibson 05/20/05
+ int getSize() const { return m_nSize; }
+
+ // DEPRECATED METHODS
+ void addToSet(NodeID newElement) { add(newElement); } // Deprecated
+ void removeFromSet(NodeID newElement) { remove(newElement); } // Deprecated
+ void clearSet() { clear(); } // Deprecated
+ void setBroadcast() { broadcast(); } // Deprecated
+ bool presentInSet(NodeID element) const { return isElement(element); } // Deprecated
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Data Members (m_ prefix)
+ // gibson 05/20/05
+ // Vector<uint8> m_bits; // This is an vector of uint8 to reduce the size of the set
+
+ int m_nSize; // the number of bits in this set
+ int m_nArrayLen; // the number of 32-bit words that are held in the array
+
+ // Changed 5/24/05 for static allocation of array
+ // note that "long" corresponds to 32 bits on a 32-bit machine,
+ // 64 bits if the -m64 parameter is passed to g++, which it is
+ // for an AMD opteron under our configuration
+
+ long * m_p_nArray; // an word array to hold the bits in the set
+ long m_p_nArray_Static[NUMBER_WORDS_PER_SET];
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Set& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Set& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //SET_H
+
diff --git a/src/mem/ruby/common/Set.cc b/src/mem/ruby/common/Set.cc
new file mode 100644
index 000000000..1f5b49f90
--- /dev/null
+++ b/src/mem/ruby/common/Set.cc
@@ -0,0 +1,231 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Set.C
+ *
+ * Description: See Set.h
+ *
+ * $Id$
+ *
+ */
+
+#include "Set.hh"
+#include "RubyConfig.hh"
+
+#ifdef OPTBIGSET
+#include "OptBigSet.cc"
+#else
+
+#ifdef BIGSET
+#include "BigSet.cc" // code to supports sets larger than 32
+#else
+
+Set::Set()
+{
+ setSize(RubyConfig::numberOfChips());
+}
+
+Set::Set(int size)
+{
+ setSize(size);
+}
+
+bool Set::isEqual(const Set& set)
+{
+ return (m_bits == set.m_bits);
+}
+
+void Set::add(NodeID index)
+{
+ assert((m_bits & m_mask) == m_bits); // check for any bits outside the range
+ assert(index < m_size);
+ m_bits |= (1 << index);
+ assert((m_bits & m_mask) == m_bits); // check for any bits outside the range
+}
+
+void Set::addSet(const Set& set)
+{
+ assert(m_size == set.m_size);
+ m_bits |= set.m_bits;
+ assert((m_bits & m_mask) == m_bits); // check for any bits outside the range
+}
+
+void Set::addRandom()
+{
+ m_bits |= random();
+ m_bits &= m_mask;
+ assert((m_bits & m_mask) == m_bits); // check for any bits outside the range
+}
+
+void Set::remove(NodeID index)
+{
+ assert(index < m_size);
+ m_bits &= ~(1 << index);
+ assert((m_bits & m_mask) == m_bits); // check for any bits outside the range
+}
+
+void Set::removeSet(const Set& set)
+{
+ assert(m_size == set.m_size);
+ m_bits &= ~(set.m_bits);
+ assert((m_bits & m_mask) == m_bits); // check for any bits outside the range
+}
+
+void Set::clear()
+{
+ m_bits = 0;
+}
+
+void Set::broadcast()
+{
+ m_bits = m_mask;
+}
+
+int Set::count() const
+{
+ int counter = 0;
+ for (int i=0; i<m_size; i++) {
+ if ((m_bits & (1 << i)) != 0) {
+ counter++;
+ }
+ }
+ return counter;
+}
+
+NodeID Set::elementAt(int index) {
+ // count from right to left, index starts from 0
+ for (int i=0; i<m_size; i++) {
+ if ((m_bits & (1 << i)) != 0) {
+ if (index == 0) return i;
+ index --;
+ }
+ }
+ assert(0); // index out of range
+ return 0;
+}
+
+NodeID Set::smallestElement() const
+{
+ assert(count() > 0);
+ int counter = 0;
+ for (int i=0; i<m_size; i++) {
+ if (isElement(i)) {
+ return i;
+ }
+ }
+ ERROR_MSG("No smallest element of an empty set.");
+}
+
+// Returns true iff all bits are set
+bool Set::isBroadcast() const
+{
+ assert((m_bits & m_mask) == m_bits); // check for any bits outside the range
+ return (m_mask == m_bits);
+}
+
+// Returns true iff no bits are set
+bool Set::isEmpty() const
+{
+ assert((m_bits & m_mask) == m_bits); // check for any bits outside the range
+ return (m_bits == 0);
+}
+
+// returns the logical OR of "this" set and orSet
+Set Set::OR(const Set& orSet) const
+{
+ assert(m_size == orSet.m_size);
+ Set result(m_size);
+ result.m_bits = (m_bits | orSet.m_bits);
+ assert((result.m_bits & result.m_mask) == result.m_bits); // check for any bits outside the range
+ return result;
+}
+
+// returns the logical AND of "this" set and andSet
+Set Set::AND(const Set& andSet) const
+{
+ assert(m_size == andSet.m_size);
+ Set result(m_size);
+ result.m_bits = (m_bits & andSet.m_bits);
+ assert((result.m_bits & result.m_mask) == result.m_bits); // check for any bits outside the range
+ return result;
+}
+
+// Returns true if the intersection of the two sets is non-empty
+bool Set::intersectionIsNotEmpty(const Set& other_set) const
+{
+ assert(m_size == other_set.m_size);
+ return ((m_bits & other_set.m_bits) != 0);
+}
+
+// Returns true if the intersection of the two sets is empty
+bool Set::intersectionIsEmpty(const Set& other_set) const
+{
+ assert(m_size == other_set.m_size);
+ return ((m_bits & other_set.m_bits) == 0);
+}
+
+bool Set::isSuperset(const Set& test) const
+{
+ assert(m_size == test.m_size);
+ uint32 temp = (test.m_bits & (~m_bits));
+ return (temp == 0);
+}
+
+bool Set::isElement(NodeID element) const
+{
+ return ((m_bits & (1 << element)) != 0);
+}
+
+void Set::setSize(int size)
+{
+ // We're using 32 bit ints, and the 32nd bit acts strangely due to
+ // signed/unsigned, so restrict the set size to 31 bits.
+ assert(size < 32);
+ m_size = size;
+ m_bits = 0;
+ m_mask = ~((~0) << m_size);
+ assert(m_mask != 0);
+ assert((m_bits & m_mask) == m_bits); // check for any bits outside the range
+}
+
+void Set::print(ostream& out) const
+{
+ out << "[Set (" << m_size << ") ";
+
+ for (int i=0; i<m_size; i++) {
+ out << (bool) isElement(i) << " ";
+ }
+ out << "]";
+}
+
+#endif // BIGSET
+
+#endif // OPTBIGSET
+
diff --git a/src/mem/ruby/common/Set.hh b/src/mem/ruby/common/Set.hh
new file mode 100644
index 000000000..7c33c840d
--- /dev/null
+++ b/src/mem/ruby/common/Set.hh
@@ -0,0 +1,149 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Set.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+// Define this to use the BigSet class which is slower, but supports
+// sets of size larger than 32.
+
+// #define BIGSET
+
+#define OPTBIGSET
+
+#ifdef OPTBIGSET
+#include "OptBigSet.hh"
+#else
+
+#ifdef BIGSET
+#include "BigSet.hh" // code to supports sets larger than 32
+#else
+
+#ifndef SET_H
+#define SET_H
+
+#include "Global.hh"
+#include "Vector.hh"
+#include "NodeID.hh"
+#include "RubyConfig.hh"
+
+class Set {
+public:
+ // Constructors
+ // creates and empty set
+ Set();
+ Set(int size);
+
+ // used during the replay mechanism
+ // Set(const char *str);
+
+ // Set(const Set& obj);
+ // Set& operator=(const Set& obj);
+
+ // Destructor
+ // ~Set();
+
+ // Public Methods
+
+ void add(NodeID newElement);
+ void addSet(const Set& set);
+ void addRandom();
+ void remove(NodeID newElement);
+ void removeSet(const Set& set);
+ void clear();
+ void broadcast();
+ int count() const;
+ bool isEqual(const Set& set);
+
+ Set OR(const Set& orSet) const; // return the logical OR of this set and orSet
+ Set AND(const Set& andSet) const; // return the logical AND of this set and andSet
+
+ // Returns true if the intersection of the two sets is non-empty
+ bool intersectionIsNotEmpty(const Set& other_set) const;
+
+ // Returns true if the intersection of the two sets is empty
+ bool intersectionIsEmpty(const Set& other_set) const;
+
+ bool isSuperset(const Set& test) const;
+ bool isSubset(const Set& test) const { return test.isSuperset(*this); }
+ bool isElement(NodeID element) const;
+ bool isBroadcast() const;
+ bool isEmpty() const;
+
+ NodeID smallestElement() const;
+
+ // int size() const;
+ void setSize (int size);
+
+ // get element for a index
+ NodeID elementAt(int index);
+ int getSize() const { return m_size; }
+
+ // DEPRECATED METHODS
+ void addToSet(NodeID newElement) { add(newElement); } // Deprecated
+ void removeFromSet(NodeID newElement) { remove(newElement); } // Deprecated
+ void clearSet() { clear(); } // Deprecated
+ void setBroadcast() { broadcast(); } // Deprecated
+ bool presentInSet(NodeID element) const { return isElement(element); } // Deprecated
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Data Members (m_ prefix)
+ int m_size;
+ uint32 m_bits; // Set as a bit vector
+ uint32 m_mask; // a 000001111 mask where the number of 1s is equal to m_size
+
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Set& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Set& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //SET_H
+#endif //BIGSET
+#endif //OPTBIGSET
+
diff --git a/src/mem/ruby/common/SubBlock.cc b/src/mem/ruby/common/SubBlock.cc
new file mode 100644
index 000000000..f79e33d9c
--- /dev/null
+++ b/src/mem/ruby/common/SubBlock.cc
@@ -0,0 +1,81 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#include "SubBlock.hh"
+
+SubBlock::SubBlock(const Address& addr, int size)
+{
+ m_address = addr;
+ setSize(size);
+ for(int i=0; i<size; i++) {
+ setByte(i, 0);
+ }
+}
+
+SubBlock::SubBlock(const Address& addr, const Address& logicalAddress, int size)
+{
+ m_address = addr;
+ m_logicalAddress = logicalAddress;
+ setSize(size);
+ for(int i=0; i<size; i++) {
+ setByte(i, 0);
+ }
+}
+
+void SubBlock::internalMergeFrom(const DataBlock& data)
+{
+ int size = getSize();
+ assert(size > 0);
+ int offset = m_address.getOffset();
+ for(int i=0; i<size; i++) {
+ this->setByte(i, data.getByte(offset+i));
+ }
+}
+
+void SubBlock::internalMergeTo(DataBlock& data) const
+{
+ int size = getSize();
+ assert(size > 0);
+ int offset = m_address.getOffset();
+ for(int i=0; i<size; i++) {
+ data.setByte(offset+i, this->getByte(i)); // This will detect crossing a cache line boundary
+ }
+}
+
+void SubBlock::print(ostream& out) const
+{
+ out << "[" << m_address << ", " << getSize() << ", " << m_data << "]";
+}
+
+
+
diff --git a/src/mem/ruby/common/SubBlock.hh b/src/mem/ruby/common/SubBlock.hh
new file mode 100644
index 000000000..43f91e191
--- /dev/null
+++ b/src/mem/ruby/common/SubBlock.hh
@@ -0,0 +1,105 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#ifndef SubBlock_H
+#define SubBlock_H
+
+#include "Global.hh"
+#include "Address.hh"
+#include "RubyConfig.hh"
+#include "DataBlock.hh"
+#include "Vector.hh"
+
+class SubBlock {
+public:
+ // Constructors
+ SubBlock() { }
+ SubBlock(const Address& addr, int size);
+ SubBlock(const Address& addr, const Address& logicalAddress, int size);
+
+ // Destructor
+ ~SubBlock() { }
+
+ // Public Methods
+ const Address& getAddress() const { return m_address; }
+ const Address& getLogicalAddress() const { return m_logicalAddress; }
+ void setAddress(const Address& addr) { m_address = addr; }
+ void setLogicalAddress(const Address& addr) { m_logicalAddress = addr; }
+
+ int getSize() const { return m_data.size(); }
+ void setSize(int size) { m_data.setSize(size); }
+ uint8 getByte(int offset) const { return m_data[offset]; }
+ void setByte(int offset, uint8 data) { m_data[offset] = data; }
+
+ // Shorthands
+ uint8 readByte() const { return getByte(0); }
+ void writeByte(uint8 data) { setByte(0, data); }
+
+ // Merging to and from DataBlocks - We only need to worry about
+ // updates when we are using DataBlocks
+ void mergeTo(DataBlock& data) const { if (DATA_BLOCK) { internalMergeTo(data); } }
+ void mergeFrom(const DataBlock& data) { if (DATA_BLOCK) { internalMergeFrom(data); } }
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+ // SubBlock(const SubBlock& obj);
+ // SubBlock& operator=(const SubBlock& obj);
+ // bool bytePresent(const Address& addr) { return ((addr.getAddress() >= m_address.getAddress()) && (addr.getAddress() < (m_address.getAddress()+getSize()))); }
+ // uint8 getByte(const Address& addr) { return m_data[addr.getAddress() - m_address.getAddress()]; }
+
+ void internalMergeTo(DataBlock& data) const;
+ void internalMergeFrom(const DataBlock& data);
+
+ // Data Members (m_ prefix)
+ Address m_address;
+ Address m_logicalAddress;
+ Vector<uint> m_data;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const SubBlock& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const SubBlock& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //SubBlock_H
diff --git a/src/mem/ruby/config/RubyConfig.cc b/src/mem/ruby/config/RubyConfig.cc
new file mode 100644
index 000000000..fe4e3be8f
--- /dev/null
+++ b/src/mem/ruby/config/RubyConfig.cc
@@ -0,0 +1,193 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * RubyConfig.C
+ *
+ * Description: See RubyConfig.h
+ *
+ * $Id$
+ *
+ */
+
+#include "RubyConfig.hh"
+#include "protocol_name.hh"
+#include "util.hh"
+#include "interface.hh"
+#include "Protocol.hh"
+
+#define CHECK_POWER_OF_2(N) { if (!is_power_of_2(N)) { ERROR_MSG(#N " must be a power of 2."); }}
+#define CHECK_ZERO(N) { if (N != 0) { ERROR_MSG(#N " must be zero at initialization."); }}
+#define CHECK_NON_ZERO(N) { if (N == 0) { ERROR_MSG(#N " must be non-zero."); }}
+
+
+void RubyConfig::init()
+{
+ // MemoryControl:
+ CHECK_NON_ZERO(MEM_BUS_CYCLE_MULTIPLIER);
+ CHECK_NON_ZERO(BANKS_PER_RANK);
+ CHECK_NON_ZERO(RANKS_PER_DIMM);
+ CHECK_NON_ZERO(DIMMS_PER_CHANNEL);
+ CHECK_NON_ZERO(BANK_QUEUE_SIZE);
+ CHECK_NON_ZERO(BANK_BUSY_TIME);
+ CHECK_NON_ZERO(MEM_CTL_LATENCY);
+ CHECK_NON_ZERO(REFRESH_PERIOD);
+ CHECK_NON_ZERO(BASIC_BUS_BUSY_TIME);
+
+ CHECK_POWER_OF_2(BANKS_PER_RANK);
+ CHECK_POWER_OF_2(RANKS_PER_DIMM);
+ CHECK_POWER_OF_2(DIMMS_PER_CHANNEL);
+
+ CHECK_NON_ZERO(g_MEMORY_SIZE_BYTES);
+ CHECK_NON_ZERO(g_DATA_BLOCK_BYTES);
+ CHECK_NON_ZERO(g_PAGE_SIZE_BYTES);
+ CHECK_NON_ZERO(g_NUM_PROCESSORS);
+ CHECK_NON_ZERO(g_PROCS_PER_CHIP);
+ if(g_NUM_SMT_THREADS == 0){ //defaults to single-threaded
+ g_NUM_SMT_THREADS = 1;
+ }
+ if (g_NUM_L2_BANKS == 0) { // defaults to number of ruby nodes
+ g_NUM_L2_BANKS = g_NUM_PROCESSORS;
+ }
+ if (g_NUM_MEMORIES == 0) { // defaults to number of ruby nodes
+ g_NUM_MEMORIES = g_NUM_PROCESSORS;
+ }
+
+ CHECK_ZERO(g_MEMORY_SIZE_BITS);
+ CHECK_ZERO(g_DATA_BLOCK_BITS);
+ CHECK_ZERO(g_PAGE_SIZE_BITS);
+ CHECK_ZERO(g_NUM_PROCESSORS_BITS);
+ CHECK_ZERO(g_NUM_CHIP_BITS);
+ CHECK_ZERO(g_NUM_L2_BANKS_BITS);
+ CHECK_ZERO(g_NUM_MEMORIES_BITS);
+ CHECK_ZERO(g_PROCS_PER_CHIP_BITS);
+ CHECK_ZERO(g_NUM_L2_BANKS_PER_CHIP);
+ CHECK_ZERO(g_NUM_L2_BANKS_PER_CHIP_BITS);
+ CHECK_ZERO(g_NUM_MEMORIES_BITS);
+ CHECK_ZERO(g_MEMORY_MODULE_BLOCKS);
+ CHECK_ZERO(g_MEMORY_MODULE_BITS);
+ CHECK_ZERO(g_NUM_MEMORIES_PER_CHIP);
+
+ CHECK_POWER_OF_2(g_MEMORY_SIZE_BYTES);
+ CHECK_POWER_OF_2(g_DATA_BLOCK_BYTES);
+ CHECK_POWER_OF_2(g_NUM_PROCESSORS);
+ CHECK_POWER_OF_2(g_NUM_L2_BANKS);
+ CHECK_POWER_OF_2(g_NUM_MEMORIES);
+ CHECK_POWER_OF_2(g_PROCS_PER_CHIP);
+
+ ASSERT(g_NUM_PROCESSORS >= g_PROCS_PER_CHIP); // obviously can't have less processors than procs/chip
+ g_NUM_CHIPS = g_NUM_PROCESSORS/g_PROCS_PER_CHIP;
+ ASSERT(g_NUM_L2_BANKS >= g_NUM_CHIPS); // cannot have a single L2cache across multiple chips
+
+ g_NUM_L2_BANKS_PER_CHIP = g_NUM_L2_BANKS/g_NUM_CHIPS;
+
+ ASSERT(L2_CACHE_NUM_SETS_BITS > log_int(g_NUM_L2_BANKS_PER_CHIP)); // cannot have less than one set per bank
+ L2_CACHE_NUM_SETS_BITS = L2_CACHE_NUM_SETS_BITS - log_int(g_NUM_L2_BANKS_PER_CHIP);
+
+ if (g_NUM_CHIPS > g_NUM_MEMORIES) {
+ g_NUM_MEMORIES_PER_CHIP = 1; // some chips have a memory, others don't
+ } else {
+ g_NUM_MEMORIES_PER_CHIP = g_NUM_MEMORIES/g_NUM_CHIPS;
+ }
+
+ g_NUM_CHIP_BITS = log_int(g_NUM_CHIPS);
+ g_MEMORY_SIZE_BITS = log_int(g_MEMORY_SIZE_BYTES);
+ g_DATA_BLOCK_BITS = log_int(g_DATA_BLOCK_BYTES);
+ g_PAGE_SIZE_BITS = log_int(g_PAGE_SIZE_BYTES);
+ g_NUM_PROCESSORS_BITS = log_int(g_NUM_PROCESSORS);
+ g_NUM_L2_BANKS_BITS = log_int(g_NUM_L2_BANKS);
+ g_NUM_L2_BANKS_PER_CHIP_BITS = log_int(g_NUM_L2_BANKS_PER_CHIP);
+ g_NUM_MEMORIES_BITS = log_int(g_NUM_MEMORIES);
+ g_PROCS_PER_CHIP_BITS = log_int(g_PROCS_PER_CHIP);
+
+ g_MEMORY_MODULE_BITS = g_MEMORY_SIZE_BITS - g_DATA_BLOCK_BITS - g_NUM_MEMORIES_BITS;
+ g_MEMORY_MODULE_BLOCKS = (int64(1) << g_MEMORY_MODULE_BITS);
+
+ if ((!Protocol::m_CMP) && (g_PROCS_PER_CHIP > 1)) {
+ ERROR_MSG("Non-CMP protocol should set g_PROCS_PER_CHIP to 1");
+ }
+
+ // Randomize the execution
+ srandom(g_RANDOM_SEED);
+}
+
+int RubyConfig::L1CacheNumToL2Base(NodeID L1CacheNum)
+{
+ return L1CacheNum/g_PROCS_PER_CHIP;
+}
+
+static void print_parameters(ostream& out)
+{
+
+#define PARAM(NAME) { out << #NAME << ": " << NAME << endl; }
+#define PARAM_UINT(NAME) { out << #NAME << ": " << NAME << endl; }
+#define PARAM_ULONG(NAME) { out << #NAME << ": " << NAME << endl; }
+#define PARAM_BOOL(NAME) { out << #NAME << ": " << bool_to_string(NAME) << endl; }
+#define PARAM_DOUBLE(NAME) { out << #NAME << ": " << NAME << endl; }
+#define PARAM_STRING(NAME) { assert(NAME != NULL); out << #NAME << ": " << string(NAME) << endl; }
+#define PARAM_ARRAY(PTYPE, NAME, ARRAY_SIZE) \
+ { \
+ out << #NAME << ": ("; \
+ for (int i = 0; i < ARRAY_SIZE; i++) { \
+ if (i != 0) { \
+ out << ", "; \
+ } \
+ out << NAME[i]; \
+ } \
+ out << ")" << endl; \
+ } \
+
+
+#include CONFIG_VAR_FILENAME
+#undef PARAM
+#undef PARAM_UINT
+#undef PARAM_ULONG
+#undef PARAM_BOOL
+#undef PARAM_DOUBLE
+#undef PARAM_STRING
+#undef PARAM_ARRAY
+}
+
+void RubyConfig::printConfiguration(ostream& out) {
+ out << "Ruby Configuration" << endl;
+ out << "------------------" << endl;
+
+ out << "protocol: " << CURRENT_PROTOCOL << endl;
+ SIMICS_print_version(out);
+ out << "compiled_at: " << __TIME__ << ", " << __DATE__ << endl;
+ out << "RUBY_DEBUG: " << bool_to_string(RUBY_DEBUG) << endl;
+
+ char buffer[100];
+ gethostname(buffer, 50);
+ out << "hostname: " << buffer << endl;
+
+ print_parameters(out);
+}
+
+
diff --git a/src/mem/ruby/config/RubyConfig.hh b/src/mem/ruby/config/RubyConfig.hh
new file mode 100644
index 000000000..b2cc745bc
--- /dev/null
+++ b/src/mem/ruby/config/RubyConfig.hh
@@ -0,0 +1,157 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * RubyConfig.h
+ *
+ * Description: This class has only static members and class methods,
+ * and thus should never need to be instantiated.
+ *
+ * $Id$
+ *
+ */
+
+#ifndef RUBYCONFIG_H
+#define RUBYCONFIG_H
+
+#include "Global.hh"
+#define CONFIG_VAR_FILENAME "config.include"
+#include "vardecl.hh"
+#include "NodeID.hh"
+
+#define MEMORY_LATENCY RubyConfig::memoryResponseLatency()
+#define ABORT_DELAY m_chip_ptr->getTransactionManager(m_version)->getAbortDelay()
+
+// Set paramterization
+/*
+ * This defines the number of longs (32-bits on 32 bit machines,
+ * 64-bit on 64-bit AMD machines) to use to hold the set...
+ * the default is 4, allowing 128 or 256 different members
+ * of the set.
+ *
+ * This should never need to be changed for correctness reasons,
+ * though increasing it will increase performance for larger
+ * set sizes at the cost of a (much) larger memory footprint
+ *
+ */
+const int NUMBER_WORDS_PER_SET = 4;
+
+class RubyConfig {
+public:
+
+ // CACHE BLOCK CONFIG VARIBLES
+ static int dataBlockBits() { return g_DATA_BLOCK_BITS; }
+ static int dataBlockBytes() { return g_DATA_BLOCK_BYTES; }
+
+ // SUPPORTED PHYSICAL MEMORY CONFIG VARIABLES
+ static int pageSizeBits() { return g_PAGE_SIZE_BITS; }
+ static int pageSizeBytes() { return g_PAGE_SIZE_BYTES; }
+ static int memorySizeBits() { return g_MEMORY_SIZE_BITS; }
+ static int64 memorySizeBytes() { return g_MEMORY_SIZE_BYTES; }
+ static int memoryModuleBits() { return g_MEMORY_MODULE_BITS; }
+ static int64 memoryModuleBlocks() { return g_MEMORY_MODULE_BLOCKS; }
+
+ // returns number of SMT threads per physical processor
+ static int numberofSMTThreads() { return g_NUM_SMT_THREADS; }
+ // defines the number of simics processors (power of 2)
+ static int numberOfProcessors() { return g_NUM_PROCESSORS; }
+ static int procsPerChipBits() { return g_PROCS_PER_CHIP_BITS; }
+ static int numberOfProcsPerChip() { return g_PROCS_PER_CHIP; }
+ static int numberOfChips() { return g_NUM_CHIPS; }
+
+ // MACHINE INSTANIATION CONFIG VARIABLES
+ // -------------------------------------
+ // L1 CACHE MACHINES
+ // defines the number of L1banks - idependent of ruby chips (power of 2)
+ // NOTE - no protocols currently supports L1s != processors, just a placeholder
+ static int L1CacheBits() { return g_NUM_PROCESSORS_BITS; }
+ static int numberOfL1Cache() { return g_NUM_PROCESSORS; }
+ static int L1CachePerChipBits() { return procsPerChipBits() ; } // L1s != processors not currently supported
+ static int numberOfL1CachePerChip() { return numberOfProcsPerChip(); } // L1s != processors not currently supported
+ static int numberOfL1CachePerChip(NodeID myNodeID) { return numberOfL1CachePerChip(); }
+ static int L1CacheTransitionsPerCycle() { return L1CACHE_TRANSITIONS_PER_RUBY_CYCLE; }
+
+ // L2 CACHE MACHINES
+ // defines the number of L2banks/L2Caches - idependent of ruby chips (power of 2)
+ static int L2CacheBits() { return g_NUM_L2_BANKS_BITS; }
+ static int numberOfL2Cache() { return g_NUM_L2_BANKS; }
+ static int L1CacheNumToL2Base(NodeID L1RubyNodeID);
+ static int L2CachePerChipBits() { return g_NUM_L2_BANKS_PER_CHIP_BITS; }
+ static int numberOfL2CachePerChip() { return g_NUM_L2_BANKS_PER_CHIP; }
+ static int numberOfL2CachePerChip(NodeID myNodeID) { return numberOfL2CachePerChip(); }
+ static int L2CacheTransitionsPerCycle() { return L2CACHE_TRANSITIONS_PER_RUBY_CYCLE; }
+
+ // DIRECTORY/MEMORY MACHINES
+ // defines the number of ruby memories - idependent of ruby chips (power of 2)
+ static int memoryBits() { return g_NUM_MEMORIES_BITS; }
+ static int numberOfDirectory() { return numberOfMemories(); }
+ static int numberOfMemories() { return g_NUM_MEMORIES; }
+ static int numberOfDirectoryPerChip() { return g_NUM_MEMORIES_PER_CHIP; }
+ static int numberOfDirectoryPerChip(NodeID myNodeID) { return g_NUM_MEMORIES_PER_CHIP; }
+ static int DirectoryTransitionsPerCycle() { return DIRECTORY_TRANSITIONS_PER_RUBY_CYCLE; }
+
+ // PERSISTENT ARBITER MACHINES
+ static int numberOfPersistentArbiter() { return numberOfMemories(); }
+ static int numberOfPersistentArbiterPerChip() {return numberOfDirectoryPerChip(); }
+ static int numberOfPersistentArbiterPerChip(NodeID myNodeID) {return numberOfDirectoryPerChip(myNodeID); }
+ static int PersistentArbiterTransitionsPerCycle() { return L2CACHE_TRANSITIONS_PER_RUBY_CYCLE; }
+
+ // ---- END MACHINE SPECIFIC VARIABLES ----
+
+ // VARIABLE MEMORY RESPONSE LATENCY
+ // *** NOTE *** This is where variation is added to the simulation
+ // see Alameldeen et al. HPCA 2003 for further details
+ static int memoryResponseLatency() { return MEMORY_RESPONSE_LATENCY_MINUS_2+(random() % 5); }
+
+ static void init();
+ static void printConfiguration(ostream& out);
+
+ // Memory Controller
+ static int memBusCycleMultiplier () { return MEM_BUS_CYCLE_MULTIPLIER; }
+ static int banksPerRank () { return BANKS_PER_RANK; }
+ static int ranksPerDimm () { return RANKS_PER_DIMM; }
+ static int dimmsPerChannel () { return DIMMS_PER_CHANNEL; }
+ static int bankBit0 () { return BANK_BIT_0; }
+ static int rankBit0 () { return RANK_BIT_0; }
+ static int dimmBit0 () { return DIMM_BIT_0; }
+ static int bankQueueSize () { return BANK_QUEUE_SIZE; }
+ static int bankBusyTime () { return BANK_BUSY_TIME; }
+ static int rankRankDelay () { return RANK_RANK_DELAY; }
+ static int readWriteDelay () { return READ_WRITE_DELAY; }
+ static int basicBusBusyTime () { return BASIC_BUS_BUSY_TIME; }
+ static int memCtlLatency () { return MEM_CTL_LATENCY; }
+ static int refreshPeriod () { return REFRESH_PERIOD; }
+ static int tFaw () { return TFAW; }
+ static int memRandomArbitrate () { return MEM_RANDOM_ARBITRATE; }
+ static int memFixedDelay () { return MEM_FIXED_DELAY; }
+
+private:
+};
+
+#endif //RUBYCONFIG_H
diff --git a/src/mem/ruby/config/config.include b/src/mem/ruby/config/config.include
new file mode 100644
index 000000000..f853fb72b
--- /dev/null
+++ b/src/mem/ruby/config/config.include
@@ -0,0 +1,323 @@
+//
+// This file has been modified by Kevin Moore and Dan Nussbaum of the
+// Scalable Systems Research Group at Sun Microsystems Laboratories
+// (http://research.sun.com/scalable/) to support the Adaptive
+// Transactional Memory Test Platform (ATMTP). For information about
+// ATMTP, see the GEMS website: http://www.cs.wisc.edu/gems/.
+//
+// Please send email to atmtp-interest@sun.com with feedback, questions, or
+// to request future announcements about ATMTP.
+//
+// ----------------------------------------------------------------------
+//
+// File modification date: 2008-02-23
+//
+// ----------------------------------------------------------------------
+//
+// ATMTP is distributed as part of the GEMS software toolset and is
+// available for use and modification under the terms of version 2 of the
+// GNU General Public License. The GNU General Public License is contained
+// in the file $GEMS/LICENSE.
+//
+// Multifacet GEMS is free software; you can redistribute it and/or modify
+// it under the terms of version 2 of the GNU General Public License as
+// published by the Free Software Foundation.
+//
+// Multifacet GEMS is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with the Multifacet GEMS; if not, write to the Free Software Foundation,
+// Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
+//
+// ----------------------------------------------------------------------
+//
+
+// see rubyconfig.defaults for some explanations
+
+PARAM( g_RANDOM_SEED );
+
+// Maximum number of cycles a request is can be outstanding before the
+// Sequencer of StoreBuffer declares we're in deadlock/livelock
+PARAM( g_DEADLOCK_THRESHOLD );
+PARAM_BOOL( RANDOMIZATION );
+PARAM_BOOL( g_SYNTHETIC_DRIVER );
+PARAM_BOOL( g_DETERMINISTIC_DRIVER );
+
+// FOR MOESI_CMP_token
+PARAM_BOOL( g_FILTERING_ENABLED );
+PARAM_BOOL( g_DISTRIBUTED_PERSISTENT_ENABLED );
+PARAM_BOOL( g_DYNAMIC_TIMEOUT_ENABLED );
+PARAM( g_RETRY_THRESHOLD );
+PARAM( g_FIXED_TIMEOUT_LATENCY );
+
+PARAM( g_trace_warmup_length );
+PARAM_DOUBLE( g_bash_bandwidth_adaptive_threshold );
+
+PARAM( g_tester_length );
+PARAM( g_synthetic_locks );
+PARAM( g_deterministic_addrs );
+// Specified Generator: See SpecifiedGeneratorType in external.sm for valid values
+PARAM_STRING( g_SpecifiedGenerator );
+PARAM( g_callback_counter );
+PARAM( g_NUM_COMPLETIONS_BEFORE_PASS );
+
+PARAM( g_NUM_SMT_THREADS );
+
+PARAM( g_think_time );
+PARAM( g_hold_time );
+PARAM( g_wait_time );
+
+// For debugging purposes, one can enable a trace of all the protocol
+// state machine changes. Unfortunately, the code to generate the
+// trace is protocol specific. To enable the code for some of the
+// standard protocols,
+// 1. change "PROTOCOL_DEBUG_TRACE = true"
+// 2. enable debug in Makefile
+// 3. use the "--start 1" command line parameter or
+// "g_debug_ptr->setDebugTime(1)" to beging the following to set the
+// debug begin time
+//
+// this use to be ruby/common/Global.h
+
+PARAM_BOOL( PROTOCOL_DEBUG_TRACE );
+// a string for filtering debugging output (for all g_debug vars see Debug.h)
+PARAM_STRING( DEBUG_FILTER_STRING );
+// filters debugging messages based on priority (low, med, high)
+PARAM_STRING( DEBUG_VERBOSITY_STRING );
+// filters debugging messages based on a ruby time
+PARAM_ULONG( DEBUG_START_TIME );
+// sends debugging messages to a output filename
+PARAM_STRING( DEBUG_OUTPUT_FILENAME );
+
+// defines relative (integer) clock multipliers between ruby, opal, and simics
+PARAM( SIMICS_RUBY_MULTIPLIER );
+PARAM( OPAL_RUBY_MULTIPLIER );
+
+PARAM_BOOL( TRANSACTION_TRACE_ENABLED );
+PARAM_BOOL( USER_MODE_DATA_ONLY );
+PARAM_BOOL( PROFILE_HOT_LINES );
+
+// PROFILE_ALL_INSTRUCTIONS is used if you want Ruby to profile all instructions executed
+// The following need to be true for this to work correctly:
+// 1. Disable istc and dstc for this simulation run
+// 2. Add the following line to the object "sim" in the checkpoint you run from:
+// instruction_profile_line_size: 4
+// This is used to have simics report back all instruction requests
+
+// For more details on how to find out how to interpret the output physical instruction
+// address, please read the document in the simics-howto directory
+PARAM_BOOL( PROFILE_ALL_INSTRUCTIONS );
+
+// Set the following variable to true if you want a complete trace of
+// PCs (physical address of program counters, with executing processor IDs)
+// to be printed to stdout. Make sure to direct the simics output to a file.
+// Otherwise, the run will take a really long time!
+// A long run may write a file that can exceed the OS limit on file length
+PARAM_BOOL( PRINT_INSTRUCTION_TRACE );
+PARAM( g_DEBUG_CYCLE );
+
+// Don't allow any datablocks to enter the STC
+PARAM_BOOL( BLOCK_STC );
+
+// Make the entire memory system perfect
+PARAM_BOOL( PERFECT_MEMORY_SYSTEM );
+PARAM( PERFECT_MEMORY_SYSTEM_LATENCY );
+
+PARAM_BOOL( DATA_BLOCK ); // Define NO_DATA_BLOCK to make the DataBlock take zero space
+
+PARAM_BOOL( REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH );
+
+// *********************************************
+// CACHE & MEMORY PARAMETERS
+// *********************************************
+
+PARAM_BOOL( g_SIMICS );
+
+PARAM( L1_CACHE_ASSOC );
+PARAM( L1_CACHE_NUM_SETS_BITS );
+PARAM( L2_CACHE_ASSOC );
+PARAM( L2_CACHE_NUM_SETS_BITS );
+
+PARAM_ULONG( g_MEMORY_SIZE_BYTES );
+PARAM( g_DATA_BLOCK_BYTES );
+// The following page size parameter is used by the stride prefetcher
+PARAM( g_PAGE_SIZE_BYTES );
+PARAM_STRING( g_REPLACEMENT_POLICY );
+
+PARAM( g_NUM_PROCESSORS );
+PARAM( g_NUM_L2_BANKS );
+PARAM( g_NUM_MEMORIES );
+PARAM( g_PROCS_PER_CHIP );
+
+// The following group of parameters are calculated. They must
+// _always_ be left at zero.
+PARAM( g_NUM_CHIPS );
+PARAM( g_NUM_CHIP_BITS );
+PARAM( g_MEMORY_SIZE_BITS );
+PARAM( g_DATA_BLOCK_BITS );
+PARAM( g_PAGE_SIZE_BITS );
+PARAM( g_NUM_PROCESSORS_BITS );
+PARAM( g_PROCS_PER_CHIP_BITS );
+PARAM( g_NUM_L2_BANKS_BITS );
+PARAM( g_NUM_L2_BANKS_PER_CHIP_BITS );
+PARAM( g_NUM_L2_BANKS_PER_CHIP );
+PARAM( g_NUM_MEMORIES_BITS );
+PARAM( g_NUM_MEMORIES_PER_CHIP );
+PARAM( g_MEMORY_MODULE_BITS );
+PARAM_ULONG( g_MEMORY_MODULE_BLOCKS );
+
+// determines the mapping between L2 banks and sets within L2 banks
+PARAM_BOOL( MAP_L2BANKS_TO_LOWEST_BITS );
+
+// TIMING PARAMETERS
+PARAM( DIRECTORY_CACHE_LATENCY );
+
+PARAM( NULL_LATENCY );
+PARAM( ISSUE_LATENCY );
+PARAM( CACHE_RESPONSE_LATENCY );
+PARAM( L2_RESPONSE_LATENCY );
+PARAM( L2_TAG_LATENCY );
+PARAM( L1_RESPONSE_LATENCY );
+PARAM( MEMORY_RESPONSE_LATENCY_MINUS_2 );
+PARAM( DIRECTORY_LATENCY );
+PARAM( NETWORK_LINK_LATENCY );
+PARAM( COPY_HEAD_LATENCY );
+PARAM( ON_CHIP_LINK_LATENCY );
+PARAM( RECYCLE_LATENCY );
+PARAM( L2_RECYCLE_LATENCY );
+PARAM( TIMER_LATENCY );
+PARAM( TBE_RESPONSE_LATENCY );
+PARAM_BOOL( PERIODIC_TIMER_WAKEUPS );
+
+// constants used by TM protocols
+PARAM_BOOL( PROFILE_EXCEPTIONS );
+PARAM_BOOL( PROFILE_XACT );
+PARAM_BOOL( PROFILE_NONXACT );
+PARAM_BOOL( XACT_DEBUG );
+PARAM ( XACT_DEBUG_LEVEL );
+PARAM_BOOL( XACT_MEMORY );
+PARAM_BOOL( XACT_ENABLE_TOURMALINE );
+PARAM( XACT_NUM_CURRENT );
+PARAM( XACT_LAST_UPDATE );
+PARAM_BOOL( XACT_ISOLATION_CHECK );
+PARAM_BOOL( PERFECT_FILTER );
+PARAM_STRING( READ_WRITE_FILTER );
+PARAM_BOOL( PERFECT_VIRTUAL_FILTER );
+PARAM_STRING( VIRTUAL_READ_WRITE_FILTER );
+PARAM_BOOL( PERFECT_SUMMARY_FILTER );
+PARAM_STRING( SUMMARY_READ_WRITE_FILTER );
+PARAM_BOOL( XACT_EAGER_CD );
+PARAM_BOOL( XACT_LAZY_VM );
+PARAM_STRING( XACT_CONFLICT_RES );
+PARAM_BOOL( XACT_VISUALIZER );
+PARAM( XACT_COMMIT_TOKEN_LATENCY ) ;
+PARAM_BOOL( XACT_NO_BACKOFF );
+PARAM ( XACT_LOG_BUFFER_SIZE );
+PARAM ( XACT_STORE_PREDICTOR_HISTORY);
+PARAM ( XACT_STORE_PREDICTOR_ENTRIES);
+PARAM ( XACT_STORE_PREDICTOR_THRESHOLD);
+PARAM ( XACT_FIRST_ACCESS_COST );
+PARAM ( XACT_FIRST_PAGE_ACCESS_COST );
+PARAM_BOOL( ENABLE_MAGIC_WAITING );
+PARAM_BOOL( ENABLE_WATCHPOINT );
+PARAM_BOOL( XACT_ENABLE_VIRTUALIZATION_LOGTM_SE );
+
+// ATMTP
+PARAM_BOOL( ATMTP_ENABLED );
+PARAM_BOOL( ATMTP_ABORT_ON_NON_XACT_INST );
+PARAM_BOOL( ATMTP_ALLOW_SAVE_RESTORE_IN_XACT );
+PARAM( ATMTP_XACT_MAX_STORES );
+PARAM( ATMTP_DEBUG_LEVEL );
+
+// constants used by CMP protocols
+PARAM( L1_REQUEST_LATENCY );
+PARAM( L2_REQUEST_LATENCY );
+PARAM_BOOL( SINGLE_ACCESS_L2_BANKS ); // hack to simulate multi-cycle L2 bank accesses
+
+// Ruby cycles between when a sequencer issues a miss it arrives at
+// the L1 cache controller
+PARAM( SEQUENCER_TO_CONTROLLER_LATENCY );
+
+// Number of transitions each controller state machines can complete per cycle
+PARAM( L1CACHE_TRANSITIONS_PER_RUBY_CYCLE );
+PARAM( L2CACHE_TRANSITIONS_PER_RUBY_CYCLE );
+PARAM( DIRECTORY_TRANSITIONS_PER_RUBY_CYCLE );
+
+// Maximum number of requests (including prefetches) outstanding from
+// the sequencer (Note: this also include items buffered in the store
+// buffer)
+PARAM( g_SEQUENCER_OUTSTANDING_REQUESTS );
+
+// Number of TBEs available for demand misses, prefetches, and replacements
+PARAM( NUMBER_OF_TBES );
+PARAM( NUMBER_OF_L1_TBES );
+PARAM( NUMBER_OF_L2_TBES );
+
+// NOTE: Finite buffering allows us to simulate a wormhole routed network
+// with idealized flow control. All message buffers within the network (i.e.
+// the switch's input and output buffers) are set to the size specified below
+// by the PROTOCOL_BUFFER_SIZE
+PARAM_BOOL( FINITE_BUFFERING );
+PARAM( FINITE_BUFFER_SIZE ); // Zero is unbounded buffers
+// Number of requests buffered between the sequencer and the L1 conroller
+// This can be more accurately simulated in Opal, therefore it's set to an
+// infinite number
+// Only effects the simualtion when FINITE_BUFFERING is enabled
+PARAM( PROCESSOR_BUFFER_SIZE );
+// The PROTOCOL_BUFFER_SIZE limits the size of all other buffers connecting to
+// Controllers. Controlls the number of request issued by the L2 HW Prefetcher
+PARAM( PROTOCOL_BUFFER_SIZE );
+
+// Enable the TSO (Total Store Order) memory model
+PARAM_BOOL( TSO ); // Note: This also disables the "write" STCs
+
+// NETWORK PARAMETERS
+
+// Network Topology: See TopologyType in external.sm for valid values
+PARAM_STRING( g_NETWORK_TOPOLOGY );
+
+// Cache Design specifies file prefix for topology
+PARAM_STRING( g_CACHE_DESIGN );
+
+PARAM( g_endpoint_bandwidth );
+PARAM_BOOL( g_adaptive_routing );
+PARAM( NUMBER_OF_VIRTUAL_NETWORKS );
+PARAM( FAN_OUT_DEGREE );
+PARAM_BOOL( g_PRINT_TOPOLOGY );
+
+// transactional memory
+PARAM( XACT_LENGTH );
+PARAM( XACT_SIZE );
+PARAM( ABORT_RETRY_TIME );
+
+// Princeton Network (Garnet)
+PARAM_BOOL( g_GARNET_NETWORK );
+PARAM_BOOL( g_DETAIL_NETWORK );
+PARAM_BOOL( g_NETWORK_TESTING );
+PARAM( g_FLIT_SIZE );
+PARAM( g_NUM_PIPE_STAGES );
+PARAM( g_VCS_PER_CLASS );
+PARAM( g_BUFFER_SIZE );
+
+// MemoryControl:
+PARAM( MEM_BUS_CYCLE_MULTIPLIER );
+PARAM( BANKS_PER_RANK );
+PARAM( RANKS_PER_DIMM );
+PARAM( DIMMS_PER_CHANNEL );
+PARAM( BANK_BIT_0 );
+PARAM( RANK_BIT_0 );
+PARAM( DIMM_BIT_0 );
+PARAM( BANK_QUEUE_SIZE );
+PARAM( BANK_BUSY_TIME );
+PARAM( RANK_RANK_DELAY );
+PARAM( READ_WRITE_DELAY );
+PARAM( BASIC_BUS_BUSY_TIME );
+PARAM( MEM_CTL_LATENCY );
+PARAM( REFRESH_PERIOD );
+PARAM( TFAW );
+PARAM( MEM_RANDOM_ARBITRATE );
+PARAM( MEM_FIXED_DELAY );
+
diff --git a/src/mem/ruby/config/rubyconfig.defaults b/src/mem/ruby/config/rubyconfig.defaults
new file mode 100644
index 000000000..3b86b4645
--- /dev/null
+++ b/src/mem/ruby/config/rubyconfig.defaults
@@ -0,0 +1,466 @@
+//
+// This file has been modified by Kevin Moore and Dan Nussbaum of the
+// Scalable Systems Research Group at Sun Microsystems Laboratories
+// (http://research.sun.com/scalable/) to support the Adaptive
+// Transactional Memory Test Platform (ATMTP). For information about
+// ATMTP, see the GEMS website: http://www.cs.wisc.edu/gems/.
+//
+// Please send email to atmtp-interest@sun.com with feedback, questions, or
+// to request future announcements about ATMTP.
+//
+// ----------------------------------------------------------------------
+//
+// File modification date: 2008-02-23
+//
+// ----------------------------------------------------------------------
+//
+// ATMTP is distributed as part of the GEMS software toolset and is
+// available for use and modification under the terms of version 2 of the
+// GNU General Public License. The GNU General Public License is contained
+// in the file $GEMS/LICENSE.
+//
+// Multifacet GEMS is free software; you can redistribute it and/or modify
+// it under the terms of version 2 of the GNU General Public License as
+// published by the Free Software Foundation.
+//
+// Multifacet GEMS is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with the Multifacet GEMS; if not, write to the Free Software Foundation,
+// Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
+//
+// ----------------------------------------------------------------------
+//
+
+g_RANDOM_SEED: 1
+g_SIMICS: true
+
+g_DEADLOCK_THRESHOLD: 500000
+
+// determines how many Simics cycles advance for every Ruby cycle
+// (does not apply when running Opal)
+SIMICS_RUBY_MULTIPLIER: 4
+
+// corresponding parameter when using Opal+Ruby+Simics
+OPAL_RUBY_MULTIPLIER: 1
+
+
+// Ruby cycles between when a sequencer issues a request and it arrives at
+// the L1 cache controller
+//
+// ** important ** this parameter determines the L2 hit latency when
+// using the SMP protocols with a combined L1/L2 controller (-cache.sm)
+//
+SEQUENCER_TO_CONTROLLER_LATENCY: 4
+
+
+// When set to false, the L1 cache structures are probed for a hit in Sequencer.C
+// If a request hits, it is *not* issued to the cache controller
+// When set to true, all processor data requests issue to cache controller
+//
+// ** important ** this parameter must be set to false for proper L1/L2 hit timing
+// for the SMP protocols with combined L1/L2 controllers (-cache.sm)
+//
+REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH: false
+
+
+// When running with Opal in SMT configurations, this indicates the number of threads per physical processor
+g_NUM_SMT_THREADS: 1
+
+
+// Maximum number of requests (including SW prefetches) outstanding from
+// the sequencer (Note: this also include items buffered in the store
+// buffer)
+g_SEQUENCER_OUTSTANDING_REQUESTS: 16
+
+
+PROTOCOL_DEBUG_TRACE: true
+DEBUG_FILTER_STRING: none
+DEBUG_VERBOSITY_STRING: none
+DEBUG_START_TIME: 0
+DEBUG_OUTPUT_FILENAME: none
+
+
+TRANSACTION_TRACE_ENABLED: false
+USER_MODE_DATA_ONLY: false
+PROFILE_HOT_LINES: false
+
+PROFILE_ALL_INSTRUCTIONS: false
+PRINT_INSTRUCTION_TRACE: false
+g_DEBUG_CYCLE: 0
+BLOCK_STC: false
+PERFECT_MEMORY_SYSTEM: false
+PERFECT_MEMORY_SYSTEM_LATENCY: 0
+DATA_BLOCK: false
+
+
+// *********************************************
+// CACHE & MEMORY PARAMETERS
+// *********************************************
+
+
+L1_CACHE_ASSOC: 4
+L1_CACHE_NUM_SETS_BITS: 8
+L2_CACHE_ASSOC: 4
+L2_CACHE_NUM_SETS_BITS: 16
+
+// 32 bits = 4 GB address space
+g_MEMORY_SIZE_BYTES: 4294967296
+g_DATA_BLOCK_BYTES: 64
+g_PAGE_SIZE_BYTES: 4096
+g_REPLACEMENT_POLICY: PSEDUO_LRU // currently, only other option is LRU
+
+g_PROCS_PER_CHIP: 1
+
+
+// set automatically
+g_NUM_PROCESSORS: 0
+g_NUM_L2_BANKS: 0
+g_NUM_MEMORIES: 0
+
+// The following group of parameters are calculated. They must
+// _always_ be left at zero.
+g_NUM_CHIPS: 0
+g_NUM_CHIP_BITS: 0
+g_MEMORY_SIZE_BITS: 0
+g_DATA_BLOCK_BITS: 0
+g_PAGE_SIZE_BITS: 0
+g_NUM_PROCESSORS_BITS: 0
+g_PROCS_PER_CHIP_BITS: 0
+g_NUM_L2_BANKS_BITS: 0
+g_NUM_L2_BANKS_PER_CHIP: 0
+g_NUM_L2_BANKS_PER_CHIP_BITS: 0
+g_NUM_MEMORIES_BITS: 0
+g_NUM_MEMORIES_PER_CHIP: 0
+g_MEMORY_MODULE_BITS: 0
+g_MEMORY_MODULE_BLOCKS: 0
+
+
+// For certain CMP protocols, determines whether the lowest bits of a block address
+// are used to index to a L2 cache bank or into the sets of a
+// single bank
+// lowest highest
+// true: g_DATA_BLOCK_BITS | g_NUM_L2_BANKS_PER_CHIP_BITS | L2_CACHE_NUM_SETS_BITS
+// false: g_DATA_BLOCK_BITS | L2_CACHE_NUM_SETS_BITS | g_NUM_L2_BANKS_PER_CHIP_BITS
+MAP_L2BANKS_TO_LOWEST_BITS: false
+
+
+
+// TIMING PARAMETERS -- many of these are protocol specific. See SLICC files
+// to determine where they apply
+
+MEMORY_RESPONSE_LATENCY_MINUS_2: 158 // determines memory response latency
+DIRECTORY_CACHE_LATENCY: 6
+NULL_LATENCY: 1
+ISSUE_LATENCY: 2
+CACHE_RESPONSE_LATENCY: 12
+L1_RESPONSE_LATENCY: 3
+L2_RESPONSE_LATENCY: 6
+L2_TAG_LATENCY: 6
+DIRECTORY_LATENCY: 80
+NETWORK_LINK_LATENCY: 1
+COPY_HEAD_LATENCY: 4
+ON_CHIP_LINK_LATENCY: 1
+RECYCLE_LATENCY: 10
+L2_RECYCLE_LATENCY: 5
+TIMER_LATENCY: 10000
+TBE_RESPONSE_LATENCY: 1
+PERIODIC_TIMER_WAKEUPS: true
+
+
+// constants used by CMP protocols
+// cache bank access times
+L1_REQUEST_LATENCY: 2
+L2_REQUEST_LATENCY: 4
+
+
+
+
+// Number of transitions each controller state machines can complete per cycle
+// i.e. the number of ports to each controller
+// L1cache is the sum of the L1I and L1D cache ports
+L1CACHE_TRANSITIONS_PER_RUBY_CYCLE: 32
+// Note: if SINGLE_ACCESS_L2_BANKS is enabled, this will probably enforce a
+// much greater constraint on the concurrency of a L2 cache bank
+L2CACHE_TRANSITIONS_PER_RUBY_CYCLE: 32
+DIRECTORY_TRANSITIONS_PER_RUBY_CYCLE: 32
+
+
+// Number of TBEs available for demand misses, ALL prefetches, and replacements
+// used by one-level protocols
+NUMBER_OF_TBES: 128
+// two-level protocols
+NUMBER_OF_L1_TBES: 32
+NUMBER_OF_L2_TBES: 32
+
+// TSO is deprecated
+TSO: false
+
+
+// ** INTERCONECT PARAMETERS **
+//
+g_PRINT_TOPOLOGY: true
+g_NETWORK_TOPOLOGY: HIERARCHICAL_SWITCH
+g_CACHE_DESIGN: NUCA // specifies file prefix for FILE_SPECIFIED topology
+FAN_OUT_DEGREE: 4 // for HIERARCHICAL SWITCH topology
+
+g_adaptive_routing: true
+NUMBER_OF_VIRTUAL_NETWORKS: 4
+
+// bandwidth unit is 1/1000 byte per cycle. the following parameter is multiplied by
+// topology specific link weights
+g_endpoint_bandwidth: 10000
+
+
+// ** finite buffering parameters
+//
+// note: Finite buffering allows us to simulate a realistic virtual cut-through
+// routed network with idealized flow control. this feature is NOT heavily tested
+FINITE_BUFFERING: false
+// All message buffers within the network (i.e. the switch's input and
+// output buffers) are set to the size specified below by the FINITE_BUFFER_SIZE
+FINITE_BUFFER_SIZE: 3
+// g_SEQUENCER_OUTSTANDING_REQUESTS (above) controlls the number of demand requests
+// issued by the sequencer. The PROCESSOR_BUFFER_SIZE controlls the
+// number of requests in the mandatory queue
+// Only effects the simualtion when FINITE_BUFFERING is enabled
+PROCESSOR_BUFFER_SIZE: 10
+// The PROTOCOL_BUFFER_SIZE limits the size of all other buffers connecting to
+// Controllers. Controlls the number of request issued by the L2 HW Prefetcher
+PROTOCOL_BUFFER_SIZE: 32
+// ** end finite buffering parameters
+
+
+// (deprecated)
+// Allows on a single accesses to a multi-cycle L2 bank.
+// Ensures the cache array is only accessed once for every L2_REQUEST_LATENCY
+// number of cycles. However the TBE table can be accessed in parallel.
+SINGLE_ACCESS_L2_BANKS: true
+
+
+// constants used by TM protocols
+PROFILE_EXCEPTIONS: false
+PROFILE_XACT: true
+PROFILE_NONXACT: false
+XACT_DEBUG: true
+XACT_DEBUG_LEVEL: 1
+//XACT_MEMORY: true // set to true for TM protocols. set it HERE for lazy systems to register the proper SIMICS interfaces
+XACT_MEMORY: false
+XACT_ENABLE_TOURMALINE: false // perfect memory system
+XACT_NUM_CURRENT: 0 // must be 0
+XACT_LAST_UPDATE: 0 // must be 0
+XACT_ISOLATION_CHECK: false // Checks whether each memory access preserves transaction isolation
+PERFECT_FILTER: true // If true, use perfect physical read/write filters
+READ_WRITE_FILTER: Perfect_
+PERFECT_VIRTUAL_FILTER: true // If true, use perfect virtual read/write filters
+VIRTUAL_READ_WRITE_FILTER: Perfect_
+PERFECT_SUMMARY_FILTER: true // If true, use perfect summary read/write filters
+SUMMARY_READ_WRITE_FILTER: Perfect_
+XACT_EAGER_CD: true
+XACT_LAZY_VM: false
+XACT_CONFLICT_RES: BASE
+XACT_COMMIT_TOKEN_LATENCY: 0
+XACT_VISUALIZER: false
+XACT_NO_BACKOFF: false
+XACT_LOG_BUFFER_SIZE: 0
+XACT_STORE_PREDICTOR_ENTRIES: 256
+XACT_STORE_PREDICTOR_HISTORY: 256
+XACT_STORE_PREDICTOR_THRESHOLD: 4
+XACT_FIRST_ACCESS_COST: 0
+XACT_FIRST_PAGE_ACCESS_COST: 0
+ENABLE_MAGIC_WAITING: false
+ENABLE_WATCHPOINT: false
+XACT_ENABLE_VIRTUALIZATION_LOGTM_SE: false
+// g_NETWORK_TOPOLOGY: FILE_SPECIFIED
+// NUMBER_OF_VIRTUAL_NETWORKS: 5
+// L2_REQUEST_LATENCY: 15
+// SEQUENCER_TO_CONTROLLER_LATENCY: 3
+// L2_RESPONSE_LATENCY: 20
+// L2_TAG_LATENCY: 6
+// MEMORY_RESPONSE_LATENCY_MINUS_2: 448
+// RECYCLE_LATENCY: 1
+// g_MEMORY_SIZE_BYTES: 268435456
+// REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH: true
+
+// ATMTP
+ATMTP_ENABLED: false
+ATMTP_ABORT_ON_NON_XACT_INST: false
+ATMTP_ALLOW_SAVE_RESTORE_IN_XACT: false
+ATMTP_XACT_MAX_STORES: 32
+ATMTP_DEBUG_LEVEL: 0
+
+// MOESI_CMP_token parameters (some might be deprecated)
+g_FILTERING_ENABLED: false
+g_DISTRIBUTED_PERSISTENT_ENABLED: true
+g_RETRY_THRESHOLD: 1
+g_DYNAMIC_TIMEOUT_ENABLED: true
+g_FIXED_TIMEOUT_LATENCY: 300
+
+
+// tester parameters (overridden by testerconfig.defaults)
+//
+// injects random message delays to excite protocol races
+RANDOMIZATION: false
+g_SYNTHETIC_DRIVER: false
+g_DETERMINISTIC_DRIVER: false
+g_trace_warmup_length: 1000000
+g_bash_bandwidth_adaptive_threshold: 0.75
+
+g_tester_length: 0
+// # of synthetic locks == 16 * 128
+g_synthetic_locks: 2048
+g_deterministic_addrs: 1
+g_SpecifiedGenerator: DetermInvGenerator
+g_callback_counter: 0
+g_NUM_COMPLETIONS_BEFORE_PASS: 0
+// parameters used by locking synthetic tester
+g_think_time: 5
+g_hold_time: 5
+g_wait_time: 5
+
+// Princeton Network (Garnet)
+g_GARNET_NETWORK: false
+g_DETAIL_NETWORK: false
+g_NETWORK_TESTING: false
+g_FLIT_SIZE: 16
+g_NUM_PIPE_STAGES: 4
+g_VCS_PER_CLASS: 4
+g_BUFFER_SIZE: 4
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// MemoryControl:
+
+// Basic cycle time of the memory controller. This defines the period which is
+// used as the memory channel clock period, the address bus bit time, and the
+// memory controller cycle time.
+// Assuming a 200 MHz memory channel (DDR-400, which has 400 bits/sec data),
+// and a 2 GHz Ruby clock:
+MEM_BUS_CYCLE_MULTIPLIER: 10
+
+// How many internal banks in each DRAM chip:
+BANKS_PER_RANK: 8
+
+// How many sets of DRAM chips per DIMM.
+RANKS_PER_DIMM: 2
+
+// How many DIMMs per channel. (Currently the only thing that
+// matters is the number of ranks per channel, i.e. the product
+// of this parameter and RANKS_PER_DIMM. But if and when this is
+// expanded to do FB-DIMMs, the distinction between the two
+// will matter.)
+DIMMS_PER_CHANNEL: 2
+
+// Which bits to use to find the bank, rank, and DIMM numbers.
+// You could choose to have the bank bits, rank bits, and DIMM bits
+// in any order; here they are in that order.
+// For these defaults, we assume this format for addresses:
+// Offset within line: [5:0]
+// Memory controller #: [7:6]
+// Bank: [10:8]
+// Rank: [11]
+// DIMM: [12]
+// Row addr / Col addr: [top:13]
+// If you get these bits wrong, then some banks won't see any
+// requests; you need to check for this in the .stats output.
+BANK_BIT_0: 8
+RANK_BIT_0: 11
+DIMM_BIT_0: 12
+
+// Number of entries max in each bank queues; set to whatever you want.
+// If it is too small, you will see in the .stats file a lot of delay
+// time spent in the common input queue.
+BANK_QUEUE_SIZE: 12
+
+// Bank cycle time (tRC) measured in memory cycles:
+BANK_BUSY_TIME: 11
+
+// This is how many memory address cycles to delay between reads to
+// different ranks of DRAMs to allow for clock skew:
+RANK_RANK_DELAY: 1
+
+// This is how many memory address cycles to delay between a read
+// and a write. This is based on two things: (1) the data bus is
+// used one cycle earlier in the operation; (2) a round-trip wire
+// delay from the controller to the DIMM that did the reading.
+READ_WRITE_DELAY: 2
+
+// Basic address and data bus occupancy. If you are assuming a
+// 16-byte-wide data bus (pairs of DIMMs side-by-side), then
+// the data bus occupancy matches the address bus occupancy at
+// two cycles. But if the channel is only 8 bytes wide, you
+// need to increase this bus occupancy time to 4 cycles.
+BASIC_BUS_BUSY_TIME: 2
+
+// Latency to returning read request or writeback acknowledgement.
+// Measured in memory address cycles.
+// This equals tRCD + CL + AL + (four bit times)
+// + (round trip on channel)
+// + (memory control internal delays)
+// It's going to be an approximation, so pick what you like.
+// Note: The fact that latency is a constant, and does not depend on two
+// low-order address bits, implies that our memory controller either:
+// (a) tells the DRAM to read the critical word first, and sends the
+// critical word first back to the CPU, or (b) waits until it has
+// seen all four bit times on the data wires before sending anything
+// back. Either is plausible. If (a), remove the "four bit times"
+// term from the calculation above.
+MEM_CTL_LATENCY: 12
+
+// refresh_period is the number of memory cycles between refresh
+// of row x in bank n and refresh of row x+1 in bank n. For DDR-400,
+// this is typically 7.8 usec for commercial systems; after 8192 such
+// refreshes, this will have refreshed the whole chip in 64 msec. If
+// we have a 5 nsec memory clock, 7800 / 5 = 1560 cycles. The memory
+// controller will divide this by the total number of banks, and kick
+// off a refresh to *somebody* every time that amount is counted
+// down to zero. (There will be some rounding error there, but it
+// should have minimal effect.)
+REFRESH_PERIOD: 1560
+
+// tFAW is a DRAM chip parameter which restricts the number of
+// activates that can be done within a certain window of time.
+// The window is specified here in terms of number of memory
+// controller cycles. At most four activates may be done during
+// any such sliding window. If this number is set to be no more
+// than 4 * BASIC_BUS_BUSY_TIME, it will have no effect.
+// It is typical in real systems for tFAW to have no effect, but
+// it may be useful in throttling power. Set to zero to ignore.
+TFAW: 0
+
+// By default, the memory controller uses round-robin to arbitrate
+// between ready bank queues for use of the address bus. If you
+// wish to add randomness to the system, set this parameter to
+// one instead, and it will restart the round-robin pointer at a
+// random bank number each cycle. If you want additional
+// nondeterminism, set the parameter to some integer n >= 2, and
+// it will in addition add a n% chance each cycle that a ready bank
+// will be delayed an additional cycle. Note that if you are
+// in MEM_FIXED_DELAY mode (see below), MEM_RANDOM_ARBITRATE=1 will
+// have no effect, but MEM_RANDOM_ARBITRATE=2 or more will.
+MEM_RANDOM_ARBITRATE: 0
+
+// The following parameter, if nonzero, will disable the memory
+// controller and instead give every request a fixed latency. The
+// nonzero value specified here is measured in memory cycles and is
+// just added to MEM_CTL_LATENCY. It will also show up in the stats
+// file as a contributor to memory_delays_stalled_at_head_of_bank_queue.
+MEM_FIXED_DELAY: 0
+
+// If instead of DDR-400, you wanted DDR-800, the channel gets faster
+// but the basic operation of the DRAM core is unchanged.
+// Busy times appear to double just because they are measured
+// in smaller clock cycles. The performance advantage comes because
+// the bus busy times don't actually quite double.
+// You would use something like these values:
+//
+// MEM_BUS_CYCLE_MULTIPLIER: 5
+// BANK_BUSY_TIME: 22
+// RANK_RANK_DELAY: 2
+// READ_WRITE_DELAY: 3
+// BASIC_BUS_BUSY_TIME: 3
+// MEM_CTL_LATENCY: 20
+// REFRESH_PERIOD: 3120
diff --git a/src/mem/ruby/config/tester.defaults b/src/mem/ruby/config/tester.defaults
new file mode 100644
index 000000000..ea83a1443
--- /dev/null
+++ b/src/mem/ruby/config/tester.defaults
@@ -0,0 +1,60 @@
+
+//
+// This file contains tester specific changes to the rubyconfig.defaults
+// parameter values.
+//
+// Please: - Add new variables only to rubyconfig.defaults file.
+// - Change them here only when necessary.
+
+g_SIMICS: false
+DATA_BLOCK: true
+RANDOMIZATION: true
+g_SYNTHETIC_DRIVER: true
+g_DETERMINISTIC_DRIVER: false
+g_DEADLOCK_THRESHOLD: 500000
+g_SpecifiedGenerator: DetermGETXGenerator
+
+PROTOCOL_DEBUG_TRACE: true
+
+//
+// Generic cache parameters
+//
+
+// Cache sizes are smaller for the random tester to increase the amount
+// of false sharing.
+L1_CACHE_ASSOC: 2
+L1_CACHE_NUM_SETS_BITS: 2
+L2_CACHE_ASSOC: 2
+L2_CACHE_NUM_SETS_BITS: 5
+
+g_MEMORY_SIZE_BYTES: 1048576
+
+// XACT MEMORY
+XACT_LENGTH: 2000
+XACT_SIZE: 1000
+ABORT_RETRY_TIME: 400
+XACT_ISOLATION_CHECK: true
+L2CACHE_TRANSITIONS_PER_RUBY_CYCLE: 1000
+DIRECTORY_TRANSITIONS_PER_RUBY_CYCLE: 1000
+PERFECT_FILTER: true // If true, use perfect read/write filters
+READ_WRITE_FILTER: Perfect_
+
+//g_NETWORK_TOPOLOGY: FILE_SPECIFIED
+RECYCLE_LATENCY: 1
+//NUMBER_OF_VIRTUAL_NETWORKS: 5
+//g_NUM_MEMORIES: 16
+L2CACHE_TRANSITIONS_PER_RUBY_CYCLE: 1000
+DIRECTORY_TRANSITIONS_PER_RUBY_CYCLE: 1000
+//g_PROCS_PER_CHIP: 16
+//g_NUM_L2_BANKS: 16
+//g_endpoint_bandwidth: 10000
+//g_NUM_PROCESSORS: 16
+//g_NUM_SMT_THREADS: 1
+//g_GARNET_NETWORK: true
+//g_DETAIL_NETWORK: true
+//g_NETWORK_TESTING: false
+//g_FLIT_SIZE: 32
+//g_NUM_PIPE_STAGES: 5
+//g_VCS_PER_CLASS: 2
+//g_BUFFER_SIZE: 4
+
diff --git a/src/mem/ruby/eventqueue/EventQueue.cc b/src/mem/ruby/eventqueue/EventQueue.cc
new file mode 100644
index 000000000..0eef53530
--- /dev/null
+++ b/src/mem/ruby/eventqueue/EventQueue.cc
@@ -0,0 +1,120 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#include "EventQueue.hh"
+#include "RubyConfig.hh"
+#include "Consumer.hh"
+#include "Profiler.hh"
+#include "System.hh"
+#include "PrioHeap.hh"
+#include "EventQueueNode.hh"
+
+// Class public method definitions
+
+EventQueue::EventQueue()
+{
+ m_prio_heap_ptr = NULL;
+ init();
+}
+
+EventQueue::~EventQueue()
+{
+ delete m_prio_heap_ptr;
+}
+
+void EventQueue::init()
+{
+ m_globalTime = 1;
+ m_timeOfLastRecovery = 1;
+ m_prio_heap_ptr = new PrioHeap<EventQueueNode>;
+ m_prio_heap_ptr->init();
+}
+
+bool EventQueue::isEmpty() const
+{
+ return (m_prio_heap_ptr->size() == 0);
+}
+
+void EventQueue::scheduleEventAbsolute(Consumer* consumer, Time timeAbs)
+{
+ // Check to see if this is a redundant wakeup
+ // Time time = timeDelta + m_globalTime;
+ ASSERT(consumer != NULL);
+ if (consumer->getLastScheduledWakeup() != timeAbs) {
+ // This wakeup is not redundant
+ EventQueueNode thisNode;
+ thisNode.m_consumer_ptr = consumer;
+ assert(timeAbs > m_globalTime);
+ thisNode.m_time = timeAbs;
+ m_prio_heap_ptr->insert(thisNode);
+ consumer->setLastScheduledWakeup(timeAbs);
+ }
+}
+
+void EventQueue::triggerEvents(Time t)
+{
+ EventQueueNode thisNode;
+
+ while(m_prio_heap_ptr->size() > 0 && m_prio_heap_ptr->peekMin().m_time <= t) {
+ m_globalTime = m_prio_heap_ptr->peekMin().m_time;
+ thisNode = m_prio_heap_ptr->extractMin();
+ assert(thisNode.m_consumer_ptr != NULL);
+ DEBUG_EXPR(EVENTQUEUE_COMP,MedPrio,*(thisNode.m_consumer_ptr));
+ DEBUG_EXPR(EVENTQUEUE_COMP,MedPrio,thisNode.m_time);
+ thisNode.m_consumer_ptr->triggerWakeup();
+ }
+ m_globalTime = t;
+}
+
+void EventQueue::triggerAllEvents()
+{
+ // FIXME - avoid repeated code
+ EventQueueNode thisNode;
+
+ while(m_prio_heap_ptr->size() > 0) {
+ m_globalTime = m_prio_heap_ptr->peekMin().m_time;
+ thisNode = m_prio_heap_ptr->extractMin();
+ assert(thisNode.m_consumer_ptr != NULL);
+ DEBUG_EXPR(EVENTQUEUE_COMP,MedPrio,*(thisNode.m_consumer_ptr));
+ DEBUG_EXPR(EVENTQUEUE_COMP,MedPrio,thisNode.m_time);
+ thisNode.m_consumer_ptr->triggerWakeup();
+ }
+}
+
+// Class private method definitions
+
+void
+EventQueue::print(ostream& out) const
+{
+ out << "[Event Queue: " << *m_prio_heap_ptr << "]";
+}
diff --git a/src/mem/ruby/eventqueue/EventQueue.hh b/src/mem/ruby/eventqueue/EventQueue.hh
new file mode 100644
index 000000000..476e0d24a
--- /dev/null
+++ b/src/mem/ruby/eventqueue/EventQueue.hh
@@ -0,0 +1,118 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: The EventQueue class implements an event queue which
+ * can be trigger events, allowing our simulation to be event driven.
+ *
+ * Currently, the only event we support is a Consumer being signaled
+ * by calling the consumer's wakeup() routine. Adding the event to
+ * the queue does not require a virtual function call, though calling
+ * wakeup() is a virtual function call.
+ *
+ * The method triggerEvents() is called with a global time. All
+ * events which are before or at this time are triggered in timestamp
+ * order. No ordering is enforced for events scheduled to occur at
+ * the same time. Events scheduled to wakeup the same consumer at the
+ * same time are combined into a single event.
+ *
+ * The method scheduleConsumerWakeup() is called with a global time
+ * and a consumer pointer. The event queue will call the wakeup()
+ * method of the consumer at the appropriate time.
+ *
+ * This implementation of EventQueue uses a dynamically sized array
+ * managed as a heap. The algorithms used has O(lg n) for insert and
+ * O(lg n) for extract minimum element. (Based on chapter 7 of Cormen,
+ * Leiserson, and Rivest.) The array is dynamically sized and is
+ * automatically doubled in size when necessary.
+ *
+ */
+
+#ifndef EVENTQUEUE_H
+#define EVENTQUEUE_H
+
+#include "Global.hh"
+#include "Vector.hh"
+
+class Consumer;
+template <class TYPE> class PrioHeap;
+class EventQueueNode;
+
+class EventQueue {
+public:
+ // Constructors
+ EventQueue();
+
+ // Destructor
+ ~EventQueue();
+
+ // Public Methods
+
+ Time getTime() const { return m_globalTime; }
+ void scheduleEvent(Consumer* consumer, Time timeDelta) { scheduleEventAbsolute(consumer, timeDelta + m_globalTime); }
+ void scheduleEventAbsolute(Consumer* consumer, Time timeAbs);
+ void triggerEvents(Time t); // called to handle all events <= time t
+ void triggerAllEvents();
+ void print(ostream& out) const;
+ bool isEmpty() const;
+
+ Time getTimeOfLastRecovery() {return m_timeOfLastRecovery;}
+ void setTimeOfLastRecovery(Time t) {m_timeOfLastRecovery = t;}
+
+ // Private Methods
+private:
+ // Private copy constructor and assignment operator
+ void init();
+ EventQueue(const EventQueue& obj);
+ EventQueue& operator=(const EventQueue& obj);
+
+ // Data Members (m_ prefix)
+ PrioHeap<EventQueueNode>* m_prio_heap_ptr;
+ Time m_globalTime;
+ Time m_timeOfLastRecovery;
+};
+
+// Output operator declaration
+inline extern
+ostream& operator<<(ostream& out, const EventQueue& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+inline extern
+ostream& operator<<(ostream& out, const EventQueue& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //EVENTQUEUE_H
diff --git a/src/mem/ruby/eventqueue/EventQueueNode.cc b/src/mem/ruby/eventqueue/EventQueueNode.cc
new file mode 100644
index 000000000..b0027506b
--- /dev/null
+++ b/src/mem/ruby/eventqueue/EventQueueNode.cc
@@ -0,0 +1,47 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "EventQueueNode.hh"
+
+void EventQueueNode::print(ostream& out) const
+{
+ out << "[";
+ out << "Time=" << m_time;
+ if (m_consumer_ptr != NULL) {
+ out << " Consumer=" << m_consumer_ptr;
+ } else {
+ out << " Consumer=NULL";
+ }
+ out << "]";
+}
diff --git a/src/mem/ruby/eventqueue/EventQueueNode.hh b/src/mem/ruby/eventqueue/EventQueueNode.hh
new file mode 100644
index 000000000..eff7ff37e
--- /dev/null
+++ b/src/mem/ruby/eventqueue/EventQueueNode.hh
@@ -0,0 +1,98 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#ifndef EVENTQUEUENODE_H
+#define EVENTQUEUENODE_H
+
+#include "Global.hh"
+class Consumer;
+
+class EventQueueNode {
+public:
+ // Constructors
+ EventQueueNode() { m_time = 0; m_consumer_ptr = NULL; }
+
+ // Destructor
+ //~EventQueueNode();
+
+ // Public Methods
+ void print(ostream& out) const;
+
+ // Assignment operator and copy constructor since the default
+ // constructors confuse purify when long longs are present.
+ EventQueueNode& operator=(const EventQueueNode& obj) {
+ m_time = obj.m_time;
+ m_consumer_ptr = obj.m_consumer_ptr;
+ return *this;
+ }
+
+ EventQueueNode(const EventQueueNode& obj) {
+ m_time = obj.m_time;
+ m_consumer_ptr = obj.m_consumer_ptr;
+ }
+private:
+ // Private Methods
+
+ // Default copy constructor and assignment operator
+ // EventQueueNode(const EventQueueNode& obj);
+
+ // Data Members (m_ prefix)
+public:
+ Time m_time;
+ Consumer* m_consumer_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const EventQueueNode& obj);
+
+// ******************* Definitions *******************
+
+inline extern bool node_less_then_eq(const EventQueueNode& n1, const EventQueueNode& n2);
+
+inline extern
+bool node_less_then_eq(const EventQueueNode& n1, const EventQueueNode& n2)
+{
+ return (n1.m_time <= n2.m_time);
+}
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const EventQueueNode& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //EVENTQUEUENODE_H
diff --git a/src/mem/ruby/eventqueue/EventQueue_tester.cc b/src/mem/ruby/eventqueue/EventQueue_tester.cc
new file mode 100644
index 000000000..5e54aa7e0
--- /dev/null
+++ b/src/mem/ruby/eventqueue/EventQueue_tester.cc
@@ -0,0 +1,89 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "EventQueue.hh"
+#include "Consumer.hh"
+
+//static int global_counter = 0;
+
+class TestConsumer1 : public Consumer {
+public:
+ TestConsumer1(int description) { m_description = description; }
+ ~TestConsumer1() { }
+ void wakeup() { cout << "Wakeup#1: " << m_description << endl; }
+ // void wakeup() { global_counter++; }
+ void print(ostream& out) const { out << "1:" << m_description << endl; }
+
+private:
+ int m_description;
+};
+
+class TestConsumer2 : public Consumer {
+public:
+ TestConsumer2(int description) { m_description = description; }
+ ~TestConsumer2() { }
+ void wakeup() { cout << "Wakeup#2: " << m_description << endl; }
+ // void wakeup() { global_counter++; }
+ void print(ostream& out) const { out << "2:" << m_description << endl; }
+private:
+ int m_description;
+};
+
+int main()
+{
+ EventQueue q;
+ const int SIZE = 200;
+ const int MAX_TIME = 10000;
+ int numbers[SIZE];
+ Consumer* consumers[SIZE];
+
+ for (int i=0; i<SIZE; i++) {
+ numbers[i] = random() % MAX_TIME;
+ if (i%2 == 0) {
+ consumers[i] = new TestConsumer1(i);
+ } else {
+ consumers[i] = new TestConsumer2(i);
+ }
+ }
+
+ for(int i=0; i<SIZE; i++) {
+ q.scheduleEvent(consumers[i], numbers[i]);
+ }
+
+ q.triggerEvents(MAX_TIME);
+
+ for (int i=0; i<SIZE; i++) {
+ delete consumers[i];
+ }
+}
diff --git a/src/mem/ruby/init.cc b/src/mem/ruby/init.cc
new file mode 100644
index 000000000..213d6b176
--- /dev/null
+++ b/src/mem/ruby/init.cc
@@ -0,0 +1,307 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * init.C
+ *
+ * Description: See init.h
+ *
+ * $Id$
+ *
+ */
+
+#include "Global.hh"
+#include "EventQueue.hh"
+#include "System.hh"
+#include "Debug.hh"
+#include "Profiler.hh"
+#include "Tester.hh"
+#include "OpalInterface.hh"
+#include "init.hh"
+#include "interface.hh"
+
+#ifdef CONTIGUOUS_ADDRESSES
+#include "ContiguousAddressTranslator.hh"
+
+/* Declared in interface.C */
+extern ContiguousAddressTranslator * g_p_ca_translator;
+
+#endif // #ifdef CONTIGUOUS_ADDRESSES
+
+using namespace std;
+#include <string>
+#include <map>
+#include <stdlib.h>
+
+// Maurice
+// extern "C" {
+// #include "simics/api.hh"
+// };
+
+#include "FakeSimicsDataTypes.hh"
+
+#include "confio.hh"
+#include "initvar.hh"
+
+// A generated file containing the default parameters in string form
+// The defaults are stored in the variable global_default_param
+#include "default_param.hh"
+
+attr_value_t ruby_session_get( void *id, void *obj,
+ attr_value_t *idx ) {
+ attr_value_t ret;
+
+ // all session attributes default to return invalid
+ ret.kind = Sim_Val_Invalid;
+ return ret;
+}
+
+set_error_t ruby_session_set( void *id, void *obj,
+ attr_value_t *val, attr_value_t *idx ) {
+ const char *command = (const char *) id;
+ // Add new ruby commands to this function
+
+#if 0 // Eventually add these commands back in
+ if (!strcmp(command, "dump-stats" ) ) {
+ char* filename = (char*) val->u.string;
+ if(strcmp(filename, "")){
+ ruby_dump_stats(filename);
+ } else {
+ ruby_dump_stats(NULL);
+ }
+ return Sim_Set_Ok;
+ } else if (!strcmp(command, "dump-short-stats" ) ) {
+ char* filename = (char*) val->u.string;
+ if(strcmp(filename, "")){
+ ruby_dump_short_stats(filename);
+ } else {
+ ruby_dump_short_stats(NULL);
+ }
+ return Sim_Set_Ok;
+ } else if (!strcmp(command, "periodic-stats-file" ) ) {
+ char* filename = (char*) val->u.string;
+ ruby_set_periodic_stats_file(filename);
+ return Sim_Set_Ok;
+ } else if (!strcmp(command, "periodic-stats-interval" ) ) {
+ int interval = val->u.integer;
+ ruby_set_periodic_stats_interval(interval);
+ return Sim_Set_Ok;
+ } else if (!strcmp(command, "clear-stats" ) ) {
+ ruby_clear_stats();
+ return Sim_Set_Ok;
+ } else if (!strcmp(command, "debug-verb" ) ) {
+ char* new_verbosity = (char*) val->u.string;
+ ruby_change_debug_verbosity(new_verbosity);
+ return Sim_Set_Ok;
+ } else if (!strcmp(command, "debug-filter" ) ) {
+ char* new_debug_filter = (char*) val->u.string;
+ ruby_change_debug_filter(new_debug_filter);
+ return Sim_Set_Ok;
+ } else if (!strcmp(command, "debug-output-file" ) ) {
+ char* new_filename = (char*) val->u.string;
+ ruby_set_debug_output_file(new_filename);
+ return Sim_Set_Ok;
+ } else if (!strcmp(command, "debug-start-time" ) ) {
+ char* new_start_time = (char*) val->u.string;
+ ruby_set_debug_start_time(new_start_time);
+ return Sim_Set_Ok;
+ } else if (!strcmp(command, "load-caches" ) ) {
+ char* filename = (char*) val->u.string;
+ ruby_load_caches(filename);
+ return Sim_Set_Ok;
+ } else if (!strcmp(command, "save-caches" ) ) {
+ char* filename = (char*) val->u.string;
+ ruby_save_caches(filename);
+ return Sim_Set_Ok;
+ } else if (!strcmp(command, "dump-cache" ) ) {
+ int cpuNumber = val->u.integer;
+ ruby_dump_cache(cpuNumber);
+ return Sim_Set_Ok;
+ } else if (!strcmp(command, "dump-cache-data" ) ) {
+ int cpuNumber = val->u.list.vector[0].u.integer;
+ char *filename = (char*) val->u.list.vector[1].u.string;
+ ruby_dump_cache_data( cpuNumber, filename );
+ return Sim_Set_Ok;
+ } else if (!strcmp(command, "tracer-output-file" ) ) {
+ char* new_filename = (char*) val->u.string;
+ ruby_set_tracer_output_file(new_filename);
+ return Sim_Set_Ok;
+ } else if (!strcmp(command, "xact-visualizer-file" ) ) {
+ char* new_filename = (char*) val->u.string;
+ ruby_xact_visualizer_file(new_filename);
+ return Sim_Set_Ok;
+ }
+ fprintf( stderr, "error: unrecognized command: %s\n", command );
+#endif
+ return Sim_Set_Illegal_Value;
+}
+
+static initvar_t *ruby_initvar_obj = NULL;
+
+//***************************************************************************
+static void init_generate_values( void )
+{
+ /* update generated values, based on input configuration */
+}
+
+//***************************************************************************
+void init_variables( void )
+{
+ // allocate the "variable initialization" package
+ ruby_initvar_obj = new initvar_t( "ruby", "../../../ruby/",
+ global_default_param,
+ &init_simulator,
+ &init_generate_values,
+ &ruby_session_get,
+ &ruby_session_set );
+}
+
+void init_simulator()
+{
+ // Set things to NULL to make sure we don't de-reference them
+ // without a seg. fault.
+ g_system_ptr = NULL;
+ g_debug_ptr = NULL;
+ g_eventQueue_ptr = NULL;
+
+ cout << "Ruby Timing Mode" << endl;
+#ifndef MULTIFACET_NO_OPT_WARN
+ cerr << "Warning: optimizations not enabled." << endl;
+#endif
+
+ if (g_SIMICS) {
+ // LUKE - if we don't set the default SMT threads in condor scripts,
+ // set it now
+ if(g_NUM_SMT_THREADS == 0){
+ g_NUM_SMT_THREADS = 1;
+ }
+ if(g_NUM_PROCESSORS == 0){
+ //only set to default if value not set in condor scripts
+ // Account for SMT systems also
+ g_NUM_PROCESSORS = SIMICS_number_processors()/g_NUM_SMT_THREADS;
+ }
+ }
+
+ RubyConfig::init();
+
+ g_debug_ptr = new Debug( DEBUG_FILTER_STRING,
+ DEBUG_VERBOSITY_STRING,
+ DEBUG_START_TIME,
+ DEBUG_OUTPUT_FILENAME );
+
+ cout << "Creating event queue..." << endl;
+ g_eventQueue_ptr = new EventQueue;
+ cout << "Creating event queue done" << endl;
+
+ cout << "Creating system..." << endl;
+ cout << " Processors: " << RubyConfig::numberOfProcessors() << endl;
+
+ g_system_ptr = new System;
+ cout << "Creating system done" << endl;
+
+ // if opal is loaded, its static interface object (inst) will be non-null,
+ // and the opal object needs to be notified that ruby is now loaded.
+ // "1" indicates a load and should be replaced with an enumerated type.
+ if (OpalInterface::inst != NULL) {
+ OpalInterface::inst->notify( 1 );
+ }
+
+#ifdef CONTIGUOUS_ADDRESSES
+ if(g_SIMICS) {
+ cout << "Establishing Contiguous Address Space Mappings..." << flush;
+ g_p_ca_translator = new ContiguousAddressTranslator();
+ assert(g_p_ca_translator!=NULL);
+ if(g_p_ca_translator->AddressesAreContiguous()) {
+ cout << "Physical Memory Addresses are already contiguous." << endl;
+ delete g_p_ca_translator;
+ g_p_ca_translator = NULL;
+ } else {
+ cout << "Done." << endl;
+ }
+ } else {
+ g_p_ca_translator = NULL;
+ }
+#endif // #ifdef CONTIGUOUS_ADDRESSES
+
+ cout << "Ruby initialization complete" << endl;
+}
+
+void init_opal_interface( mf_ruby_api_t *api )
+{
+ OpalInterface::installInterface( api );
+}
+
+int init_use_snoop()
+{
+ if (g_SIMICS) {
+ // The "snoop interface" defined by simics allows ruby to see store
+ // data values (from simics). If DATA_BLOCK is defined, we are tracking
+ // data, so we need to install the snoop interface.
+ return ((DATA_BLOCK == true) || (XACT_MEMORY));
+ } else {
+ return (0);
+ }
+}
+
+void destroy_simulator()
+{
+ cout << "Deleting system..." << endl;
+ delete g_system_ptr;
+ cout << "Deleting system done" << endl;
+
+ cout << "Deleting event queue..." << endl;
+ delete g_eventQueue_ptr;
+ cout << "Deleting event queue done" << endl;
+
+ delete g_debug_ptr;
+}
+
+/*-------------------------------------------------------------------------+
+ | DG: These are the external load and unload hooks that will be called by |
+ | M5 in phase 1 integration, and possibly afterwards, too. |
+ +-------------------------------------------------------------------------*/
+
+extern "C"
+int OnLoadRuby() {
+ init_variables();
+ return 0;
+}
+
+extern "C"
+int OnInitRuby() {
+ init_simulator();
+ return 0;
+}
+
+extern "C"
+int OnUnloadRuby() {
+ destroy_simulator();
+ return 0;
+}
+
diff --git a/src/mem/ruby/init.hh b/src/mem/ruby/init.hh
new file mode 100644
index 000000000..36d975b3e
--- /dev/null
+++ b/src/mem/ruby/init.hh
@@ -0,0 +1,56 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * init.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef INIT_H
+#define INIT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern void init_variables();
+extern void init_simulator();
+extern void init_opal_interface( mf_ruby_api_t *api );
+extern int init_use_snoop();
+extern void destroy_simulator();
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif //INIT_H
diff --git a/src/mem/ruby/interfaces/OpalInterface.cc b/src/mem/ruby/interfaces/OpalInterface.cc
new file mode 100644
index 000000000..362c7bcb6
--- /dev/null
+++ b/src/mem/ruby/interfaces/OpalInterface.cc
@@ -0,0 +1,446 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+/*------------------------------------------------------------------------*/
+/* Includes */
+/*------------------------------------------------------------------------*/
+
+#include "OpalInterface.hh"
+#include "interface.hh"
+#include "System.hh"
+#include "SubBlock.hh"
+#include "mf_api.hh"
+#include "Chip.hh"
+#include "RubyConfig.hh"
+//#include "XactIsolationChecker.hh" //gem5:Arka for decomissioning ruby/log_tm
+// #include "TransactionInterfaceManager.hh"
+//#include "TransactionVersionManager.hh" //gem5:Arka for decomissioning ruby/log_tm
+#include "Sequencer.hh"
+
+/*------------------------------------------------------------------------*/
+/* Forward declarations */
+/*------------------------------------------------------------------------*/
+
+static CacheRequestType get_request_type( OpalMemop_t opaltype );
+static OpalMemop_t get_opal_request_type( CacheRequestType type );
+
+/// The static opalinterface instance
+OpalInterface *OpalInterface::inst = NULL;
+
+/*------------------------------------------------------------------------*/
+/* Constructor(s) / destructor */
+/*------------------------------------------------------------------------*/
+
+//**************************************************************************
+OpalInterface::OpalInterface(System* sys_ptr) {
+ clearStats();
+ ASSERT( inst == NULL );
+ inst = this;
+ m_opal_intf = NULL;
+}
+
+/*------------------------------------------------------------------------*/
+/* Public methods */
+/*------------------------------------------------------------------------*/
+
+//**************************************************************************
+void OpalInterface::printConfig(ostream& out) const {
+ out << "Opal_ruby_multiplier: " << OPAL_RUBY_MULTIPLIER << endl;
+ out << endl;
+}
+
+void OpalInterface::printStats(ostream& out) const {
+ out << endl;
+ out << "Opal Interface Stats" << endl;
+ out << "----------------------" << endl;
+ out << endl;
+}
+
+//**************************************************************************
+void OpalInterface::clearStats() {
+}
+
+//**************************************************************************
+integer_t OpalInterface::getInstructionCount(int procID) const {
+ return ((*m_opal_intf->getInstructionCount)(procID));
+}
+
+//*************************************************************************
+uint64 OpalInterface::getOpalTime(int procID) const {
+ return ((*m_opal_intf->getOpalTime)(procID));
+}
+
+/************ For WATTCH power stats ************************************/
+//*************************************************************************
+void OpalInterface::incrementL2Access(int procID) const{
+ ((*m_opal_intf->incrementL2Access)(procID));
+}
+
+//*************************************************************************
+void OpalInterface::incrementPrefetcherAccess(int procID, int num_prefetches, int isinstr) const{
+ ((*m_opal_intf->incrementPrefetcherAccess)(procID, num_prefetches, isinstr));
+}
+/******************** END WATTCH power stats ****************************/
+
+// Notifies Opal of an L2 miss
+//*************************************************************************
+void OpalInterface::notifyL2Miss(int procID, physical_address_t physicalAddr, OpalMemop_t type, int tagexists) const{
+ ((*m_opal_intf->notifyL2Miss)(procID, physicalAddr, type, tagexists));
+}
+
+/******************************************************************
+ * void hitCallback(int cpuNumber)
+ * Called by Sequencer. Calls opal.
+ ******************************************************************/
+
+//**************************************************************************
+void OpalInterface::hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread) {
+ // notify opal that the transaction has completed
+ (*m_opal_intf->hitCallback)( proc, data.getAddress().getAddress(), get_opal_request_type(type), thread );
+}
+
+//**************************************************************************
+// Useful functions if Ruby needs to read/write physical memory when running with Opal
+integer_t OpalInterface::readPhysicalMemory(int procID,
+ physical_address_t address,
+ int len ){
+ return SIMICS_read_physical_memory(procID, address, len);
+}
+
+//**************************************************************************
+void OpalInterface::writePhysicalMemory( int procID,
+ physical_address_t address,
+ integer_t value,
+ int len ){
+ SIMICS_write_physical_memory(procID, address, value, len);
+}
+
+//***************************************************************
+// notifies Opal to print debug info
+void OpalInterface::printDebug(){
+ (*m_opal_intf->printDebug)();
+}
+
+//***************************************************************
+
+/******************************************************************
+ * Called by opal's memory operations (memop.C)
+ * May call Sequencer.
+ ******************************************************************/
+
+//****************************************************************************
+int OpalInterface::getNumberOutstanding( int cpuNumber ){
+ Sequencer* targetSequencer_ptr = g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->getSequencer(cpuNumber%RubyConfig::numberOfProcsPerChip());
+
+ return targetSequencer_ptr->getNumberOutstanding();
+}
+
+//****************************************************************************
+int OpalInterface::getNumberOutstandingDemand( int cpuNumber ){
+ Sequencer* targetSequencer_ptr = g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->getSequencer(cpuNumber%RubyConfig::numberOfProcsPerChip());
+
+ return targetSequencer_ptr->getNumberOutstandingDemand();
+}
+
+//****************************************************************************
+int OpalInterface::getNumberOutstandingPrefetch( int cpuNumber ){
+ Sequencer* targetSequencer_ptr = g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->getSequencer(cpuNumber%RubyConfig::numberOfProcsPerChip());
+
+ return targetSequencer_ptr->getNumberOutstandingPrefetch();
+}
+
+//**************************************************************************
+int OpalInterface::isReady( int cpuNumber, la_t logicalAddr, pa_t physicalAddr, OpalMemop_t typeOfRequest, int thread ) {
+ // Send request to sequencer
+ Sequencer* targetSequencer_ptr = g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->getSequencer(cpuNumber%RubyConfig::numberOfProcsPerChip());
+
+ // FIXME - some of these fields have bogus values sinced isReady()
+ // doesn't need them. However, it would be cleaner if all of these
+ // fields were exact.
+
+ return (targetSequencer_ptr->isReady(CacheMsg(Address( physicalAddr ),
+ Address( physicalAddr ),
+ get_request_type(typeOfRequest),
+ Address(0),
+ AccessModeType_UserMode, // User/supervisor mode
+ 0, // Size in bytes of request
+ PrefetchBit_No, // Not a prefetch
+ 0, // Version number
+ Address(logicalAddr), // Virtual Address
+ thread, // SMT thread
+ 0, // TM specific - timestamp of memory request
+ false // TM specific - whether request is part of escape action
+ )
+ ));
+}
+
+// FIXME: duplicated code should be avoided
+//**************************************************************************
+void OpalInterface::makeRequest(int cpuNumber, la_t logicalAddr, pa_t physicalAddr,
+ int requestSize, OpalMemop_t typeOfRequest,
+ la_t virtualPC, int isPriv, int thread) {
+ // Issue the request to the sequencer.
+ // set access type (user/supervisor)
+ AccessModeType access_mode;
+ if (isPriv) {
+ access_mode = AccessModeType_SupervisorMode;
+ } else {
+ access_mode = AccessModeType_UserMode;
+ }
+
+ // Send request to sequencer
+ Sequencer* targetSequencer_ptr = g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->getSequencer(cpuNumber%RubyConfig::numberOfProcsPerChip());
+
+ targetSequencer_ptr->makeRequest(CacheMsg(Address( physicalAddr ),
+ Address( physicalAddr ),
+ get_request_type(typeOfRequest),
+ Address(virtualPC),
+ access_mode, // User/supervisor mode
+ requestSize, // Size in bytes of request
+ PrefetchBit_No, // Not a prefetch
+ 0, // Version number
+ Address(logicalAddr), // Virtual Address
+ thread, // SMT thread
+ 0, // TM specific - timestamp of memory request
+ false // TM specific - whether request is part of escape action
+ )
+ );
+}
+
+
+//**************************************************************************
+void OpalInterface::makePrefetch(int cpuNumber, la_t logicalAddr, pa_t physicalAddr,
+ int requestSize, OpalMemop_t typeOfRequest,
+ la_t virtualPC, int isPriv, int thread) {
+ DEBUG_MSG(SEQUENCER_COMP,MedPrio,"Making prefetch request");
+
+ // Issue the request to the sequencer.
+ // set access type (user/supervisor)
+ AccessModeType access_mode;
+ if (isPriv) {
+ access_mode = AccessModeType_SupervisorMode;
+ } else {
+ access_mode = AccessModeType_UserMode;
+ }
+
+ // make the prefetch
+ Sequencer* targetSequencer_ptr = g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->getSequencer(cpuNumber%RubyConfig::numberOfProcsPerChip());
+ targetSequencer_ptr->makeRequest(CacheMsg(Address( physicalAddr ),
+ Address( physicalAddr ),
+ get_request_type(typeOfRequest),
+ Address(virtualPC),
+ access_mode,
+ requestSize,
+ PrefetchBit_Yes, // True means this is a prefetch
+ 0, // Version number
+ Address(logicalAddr), // Virtual Address
+ thread, // SMT thread
+ 0, // TM specific - timestamp of memory request
+ false // TM specific - whether request is part of escape action
+ )
+ );
+ return;
+}
+
+//**************************************************************************
+int OpalInterface::staleDataRequest( int cpuNumber, pa_t physicalAddr,
+ int requestSize, int8 *buffer ) {
+ // Find sequencer
+ Sequencer* targetSequencer_ptr = g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->getSequencer(cpuNumber%RubyConfig::numberOfProcsPerChip());
+ assert(targetSequencer_ptr != NULL);
+
+ // query the cache for stale data values (if any)
+ bool hit = false;
+ //hit = targetSequencer_ptr->staleDataAccess( Address(physicalAddr),
+ // requestSize, buffer );
+
+ return hit;
+}
+
+//**************************************************************************
+void OpalInterface::notify( int status ) {
+ if (OpalInterface::inst == NULL) {
+ if (status == 1) {
+ // notify system that opal is now loaded
+ g_system_ptr->opalLoadNotify();
+ } else {
+ return;
+ }
+ }
+
+ // opal interface must be allocated now
+ ASSERT( OpalInterface::inst != NULL );
+ if ( status == 0 ) {
+
+ } else if ( status == 1 ) {
+ // install notification: query opal for its interface
+ OpalInterface::inst->queryOpalInterface();
+ if ( OpalInterface::inst->m_opal_intf != NULL ) {
+ cout << "OpalInterface: installation successful." << endl;
+ // test: (*(OpalInterface::inst->m_opal_intf->hitCallback))( 0, 0xFFULL );
+ }
+ } else if ( status == 2 ) {
+ // unload notification
+ // NOTE: this is not tested, as we can't unload ruby or opal right now.
+ OpalInterface::inst->removeOpalInterface();
+ }
+}
+
+// advance ruby time
+//**************************************************************************
+int OpalInterface::s_advance_counter = 0;
+
+void OpalInterface::advanceTime( void ) {
+ s_advance_counter++;
+ if (s_advance_counter == OPAL_RUBY_MULTIPLIER) {
+ Time time = g_eventQueue_ptr->getTime() + 1;
+ DEBUG_EXPR(NODE_COMP, HighPrio, time);
+ g_eventQueue_ptr->triggerEvents(time);
+ s_advance_counter = 0;
+ }
+}
+
+// return ruby's time
+//**************************************************************************
+unsigned long long OpalInterface::getTime( void ) {
+ return (g_eventQueue_ptr->getTime());
+}
+
+// print's Ruby outstanding request table
+void OpalInterface::printProgress(int cpuNumber){
+ Sequencer* targetSequencer_ptr = g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->getSequencer(cpuNumber%RubyConfig::numberOfProcsPerChip());
+ assert(targetSequencer_ptr != NULL);
+
+ targetSequencer_ptr->printProgress(cout);
+}
+
+// Non-method helper function
+//**************************************************************************
+static CacheRequestType get_request_type( OpalMemop_t opaltype ) {
+ CacheRequestType type;
+
+ if (opaltype == OPAL_LOAD) {
+ type = CacheRequestType_LD;
+ } else if (opaltype == OPAL_STORE){
+ type = CacheRequestType_ST;
+ } else if (opaltype == OPAL_IFETCH){
+ type = CacheRequestType_IFETCH;
+ } else if (opaltype == OPAL_ATOMIC){
+ type = CacheRequestType_ATOMIC;
+ } else {
+ ERROR_MSG("Error: Strange memory transaction type: not a LD or a ST");
+ }
+ return type;
+}
+
+//**************************************************************************
+static OpalMemop_t get_opal_request_type( CacheRequestType type ) {
+ OpalMemop_t opal_type;
+
+ if(type == CacheRequestType_LD){
+ opal_type = OPAL_LOAD;
+ }
+ else if( type == CacheRequestType_ST){
+ opal_type = OPAL_STORE;
+ }
+ else if( type == CacheRequestType_IFETCH){
+ opal_type = OPAL_IFETCH;
+ }
+ else if( type == CacheRequestType_ATOMIC){
+ opal_type = OPAL_ATOMIC;
+ }
+ else{
+ ERROR_MSG("Error: Strange memory transaction type: not a LD or a ST");
+ }
+
+ //cout << "get_opal_request_type() CacheRequestType[ " << type << " ] opal_type[ " << opal_type << " ] " << endl;
+ return opal_type;
+}
+
+//**************************************************************************
+void OpalInterface::removeOpalInterface( void ) {
+ cout << "ruby: opal uninstalled. reinstalling timing model." << endl;
+ SIMICS_install_timing_model();
+}
+
+//**************************************************************************
+bool OpalInterface::isOpalLoaded( void ) {
+ if (!g_SIMICS) {
+ return false;
+ } else {
+ mf_opal_api_t *opal_interface = SIMICS_get_opal_interface();
+ if ( opal_interface == NULL ) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+}
+
+//**************************************************************************
+void OpalInterface::queryOpalInterface( void ) {
+ m_opal_intf = SIMICS_get_opal_interface();
+ if ( m_opal_intf == NULL ) {
+ WARN_MSG("error: OpalInterface: opal does not implement mf-opal-api interface.\n");
+ } else {
+ // opal is loaded -- remove the timing_model interface
+ cout << "Ruby: ruby-opal link established. removing timing_model." << endl;
+ SIMICS_remove_timing_model();
+
+ if (m_opal_intf->notifyCallback != NULL) {
+ cout << "opalinterface: doing notify callback\n";
+ (*m_opal_intf->notifyCallback)( 1 );
+ } else {
+ // 2/27/2005, removed spurious error message (MRM)
+ // cout << "error: opalinterface: mf-opal-api has NULL notify callback.\n";
+ }
+ }
+}
+
+// install the opal interface to simics
+//**************************************************************************
+void OpalInterface::installInterface( mf_ruby_api_t *api ) {
+ // install ruby interface
+ api->isReady = &OpalInterface::isReady;
+ api->makeRequest = &OpalInterface::makeRequest;
+ api->makePrefetch = &OpalInterface::makePrefetch;
+ api->advanceTime = &OpalInterface::advanceTime;
+ api->getTime = &OpalInterface::getTime;
+ api->staleDataRequest = &OpalInterface::staleDataRequest;
+ api->notifyCallback = &OpalInterface::notify;
+ api->getNumberOutstanding = &OpalInterface::getNumberOutstanding;
+ api->getNumberOutstandingDemand = &OpalInterface::getNumberOutstandingDemand;
+ api->getNumberOutstandingPrefetch = &OpalInterface::getNumberOutstandingPrefetch;
+ api->printProgress = &OpalInterface::printProgress;
+}
diff --git a/src/mem/ruby/interfaces/OpalInterface.hh b/src/mem/ruby/interfaces/OpalInterface.hh
new file mode 100644
index 000000000..4bc63d15a
--- /dev/null
+++ b/src/mem/ruby/interfaces/OpalInterface.hh
@@ -0,0 +1,214 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef OpalInterface_H
+#define OpalInterface_H
+
+/*------------------------------------------------------------------------*/
+/* Includes */
+/*------------------------------------------------------------------------*/
+
+#include "Global.hh"
+#include "Driver.hh"
+#include "mf_api.hh"
+#include "CacheRequestType.hh"
+
+/*------------------------------------------------------------------------*/
+/* Class declaration(s) */
+/*------------------------------------------------------------------------*/
+
+class System;
+class TransactionInterfaceManager;
+class Sequencer;
+
+/**
+ * the processor model (opal) calls these OpalInterface APIs to access
+ * the memory hierarchy (ruby).
+ * @see pseq_t
+ * @author cmauer
+ * @version $Id$
+ */
+class OpalInterface : public Driver {
+public:
+ // Constructors
+ OpalInterface(System* sys_ptr);
+
+ // Destructor
+ // ~OpalInterface();
+
+ integer_t getInstructionCount(int procID) const;
+ void hitCallback( NodeID proc, SubBlock& data, CacheRequestType type, int thread );
+ void printStats(ostream& out) const;
+ void clearStats();
+ void printConfig(ostream& out) const;
+ void print(ostream& out) const;
+
+ integer_t readPhysicalMemory(int procID, physical_address_t address,
+ int len );
+
+ void writePhysicalMemory( int procID, physical_address_t address,
+ integer_t value, int len );
+ uint64 getOpalTime(int procID) const;
+
+ // for WATTCH power
+ void incrementL2Access(int procID) const;
+ void incrementPrefetcherAccess(int procID, int num_prefetches, int isinstr) const;
+
+ // notifies Opal of an L2 miss
+ void notifyL2Miss(int procID, physical_address_t physicalAddr, OpalMemop_t type, int tagexists) const;
+
+ void printDebug();
+
+ /// The static opalinterface instance
+ static OpalInterface *inst;
+
+ /// static methods
+ static int getNumberOutstanding(int cpuNumber);
+ static int getNumberOutstandingDemand(int cpuNumber);
+ static int getNumberOutstandingPrefetch( int cpuNumber );
+
+ /* returns true if the sequencer is able to handle more requests.
+ This implements "back-pressure" by which the processor knows
+ not to issue more requests if the network or cache's limits are reached.
+ */
+ static int isReady( int cpuNumber, la_t logicalAddr, pa_t physicalAddr, OpalMemop_t typeOfRequest, int thread );
+
+ /*
+ makeRequest performs the coherence transactions necessary to get the
+ physical address in the cache with the correct permissions. More than
+ one request can be outstanding to ruby, but only one per block address.
+ The size of the cache line is defined to Intf_CacheLineSize.
+ When a request is finished (e.g. the cache contains physical address),
+ ruby calls completedRequest(). No request can be bigger than
+ Opal_CacheLineSize. It is illegal to request non-aligned memory
+ locations. A request of size 2 must be at an even byte, a size 4 must
+ be at a byte address half-word aligned, etc. Requests also can't cross a
+ cache-line boundaries.
+ */
+ static void makeRequest(int cpuNumber, la_t logicalAddr, pa_t physicalAddr,
+ int requestSize, OpalMemop_t typeOfRequest,
+ la_t virtualPC, int isPriv, int thread);
+
+ /* prefetch a given block...
+ */
+ static void makePrefetch(int cpuNumber, la_t logicalAddr, pa_t physicalAddr,
+ int requestSize, OpalMemop_t typeOfRequest,
+ la_t virtualPC, int isPriv, int thread);
+
+ /*
+ * request data from the cache, even if it's state is "Invalid".
+ */
+ static int staleDataRequest( int cpuNumber, pa_t physicalAddr,
+ int requestSize, int8 *buffer );
+
+ /* notify ruby of opal's status
+ */
+ static void notify( int status );
+
+ /*
+ * advance ruby one cycle
+ */
+ static void advanceTime( void );
+
+ /*
+ * return ruby's cycle count.
+ */
+ static unsigned long long getTime( void );
+
+ /* prints Ruby's outstanding request table */
+ static void printProgress(int cpuNumber);
+
+ /*
+ * initialize / install the inter-module interface
+ */
+ static void installInterface( mf_ruby_api_t *api );
+
+ /*
+ * Test if opal is loaded or not
+ */
+ static bool isOpalLoaded( void );
+
+ /*
+ * query opal for its api
+ */
+ void queryOpalInterface( void );
+
+ /*
+ * remove the opal interface (opal is unloaded).
+ */
+ void removeOpalInterface( void );
+
+ /*
+ * set the opal interface (used if stand-alone testing)
+ */
+ void setOpalInterface( mf_opal_api_t *opal_intf ) {
+ m_opal_intf = opal_intf;
+ }
+
+ /**
+ * Signal an abort
+ */
+ //void abortCallback(NodeID proc);
+
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ OpalInterface(const OpalInterface& obj);
+ OpalInterface& operator=(const OpalInterface& obj);
+
+ // Data Members (m_ prefix)
+ mf_opal_api_t *m_opal_intf;
+ Time m_simicsStartTime;
+
+ static int s_advance_counter;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const OpalInterface& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const OpalInterface& obj)
+{
+// obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif // OpalInterface_H
diff --git a/src/mem/ruby/interfaces/mf_api.hh b/src/mem/ruby/interfaces/mf_api.hh
new file mode 100644
index 000000000..c04a39308
--- /dev/null
+++ b/src/mem/ruby/interfaces/mf_api.hh
@@ -0,0 +1,165 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*------------------------------------------------------------------------*/
+/* Includes */
+/*------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*/
+/* Macro declarations */
+/*------------------------------------------------------------------------*/
+
+#ifndef _MF_MEMORY_API_H_
+#define _MF_MEMORY_API_H_
+
+#ifdef SIMICS30
+#ifndef pa_t
+typedef physical_address_t pa_t;
+typedef physical_address_t la_t;
+#endif
+#endif
+
+/**
+ * Defines types of memory requests
+ */
+typedef enum OpalMemop {
+ OPAL_LOAD,
+ OPAL_STORE,
+ OPAL_IFETCH,
+ OPAL_ATOMIC,
+} OpalMemop_t;
+
+/*------------------------------------------------------------------------*/
+/* Class declaration(s) */
+/*------------------------------------------------------------------------*/
+
+/**
+* structure which provides an interface between ruby and opal.
+*/
+typedef struct mf_opal_api {
+ /**
+ * @name Methods
+ */
+ //@{
+ /**
+ * notify processor model that data from address address is available at proc
+ */
+ void (*hitCallback)( int cpuNumber, pa_t phys_address, OpalMemop_t type, int thread );
+
+ /**
+ * notify opal that ruby is loaded, or removed
+ */
+ void (*notifyCallback)( int status );
+
+ /**
+ * query for the number of instructions executed on a given processor.
+ */
+ integer_t (*getInstructionCount)( int cpuNumber );
+
+ // for printing out debug info on crash
+ void (*printDebug)();
+
+ /** query Opal for the current time */
+ uint64 (*getOpalTime)(int cpuNumber);
+
+ /** For WATTCH power stats */
+ // Called whenever L2 is accessed
+ void (*incrementL2Access)(int cpuNumber);
+ // Called whenever prefetcher is accessed
+ void (*incrementPrefetcherAccess)(int cpuNumber, int num_prefetches, int isinstr);
+
+ /* Called whenever there's an L2 miss */
+ void (*notifyL2Miss)(int cpuNumber, physical_address_t physicalAddr, OpalMemop_t type, int tagexists);
+
+ //@}
+} mf_opal_api_t;
+
+typedef struct mf_ruby_api {
+ /**
+ * @name Methods
+ */
+ //@{
+ /**
+ * Check to see if the system is ready for more requests
+ */
+ int (*isReady)( int cpuNumber, la_t logicalAddr, pa_t physicalAddr, OpalMemop_t typeOfRequest, int thread );
+
+ /**
+ * Make a 'mandatory' request to the memory hierarchy
+ */
+ void (*makeRequest)( int cpuNumber, la_t logicalAddr, pa_t physicalAddr,
+ int requestSize, OpalMemop_t typeOfRequest,
+ la_t virtualPC, int isPriv, int thread);
+
+ /**
+ * Make a prefetch request to the memory hierarchy
+ */
+ void (*makePrefetch)( int cpuNumber, la_t logicalAddr, pa_t physicalAddr,
+ int requestSize, OpalMemop_t typeOfRequest,
+ la_t virtualPC, int isPriv, int thread);
+
+ /**
+ * Ask the memory hierarchy for 'stale' data that can be used for speculation
+ * Returns true (1) if the tag matches, false (0) if not.
+ */
+ int (*staleDataRequest)( int cpuNumber, pa_t physicalAddr,
+ int requestSize, int8 *buffer );
+
+ /**
+ * Advance ruby's cycle time one step
+ */
+ void (*advanceTime)( void );
+
+ /**
+ * Get ruby's cycle time count.
+ */
+ uint64 (*getTime)( void );
+
+ /** prints Ruby's outstanding request table */
+ void (*printProgress)(int cpuNumber);
+
+ /**
+ * notify ruby that opal is loaded, or removed
+ */
+ void (*notifyCallback)( int status );
+
+ // Returns the number of outstanding request
+ int (*getNumberOutstanding)(int cpuNumber);
+
+ // Returns the number of outstanding demand requests
+ int (*getNumberOutstandingDemand)(int cpuNumber );
+
+ // Returns the number of outstanding prefetch request
+ int (*getNumberOutstandingPrefetch)(int cpuNumber );
+
+
+ //@}
+} mf_ruby_api_t;
+
+#endif //_MF_MEMORY_API_H_
diff --git a/src/mem/ruby/network/Network.hh b/src/mem/ruby/network/Network.hh
new file mode 100644
index 000000000..662e54e93
--- /dev/null
+++ b/src/mem/ruby/network/Network.hh
@@ -0,0 +1,148 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Network.h
+ *
+ * Description: The Network class is the base class for classes that
+ * implement the interconnection network between components
+ * (processor/cache components and memory/directory components). The
+ * interconnection network as described here is not a physical
+ * network, but a programming concept used to implement all
+ * communication between components. Thus parts of this 'network'
+ * will model the on-chip connections between cache controllers and
+ * directory controllers as well as the links between chip and network
+ * switches.
+ *
+ * $Id$
+ * */
+
+#ifndef NETWORK_H
+#define NETWORK_H
+
+#include "Global.hh"
+#include "NodeID.hh"
+#include "MessageSizeType.hh"
+
+class NetDest;
+class MessageBuffer;
+class Throttle;
+
+class Network {
+public:
+ // Constructors
+ Network() {}
+
+ // Destructor
+ virtual ~Network() {}
+
+ // Public Methods
+
+ static Network* createNetwork(int nodes);
+
+ // returns the queue requested for the given component
+ virtual MessageBuffer* getToNetQueue(NodeID id, bool ordered, int netNumber) = 0;
+ virtual MessageBuffer* getFromNetQueue(NodeID id, bool ordered, int netNumber) = 0;
+ virtual const Vector<Throttle*>* getThrottles(NodeID id) const { return NULL; }
+
+ virtual int getNumNodes() {return 1;}
+
+ virtual void makeOutLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration) = 0;
+ virtual void makeInLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int bw_multiplier, bool isReconfiguration) = 0;
+ virtual void makeInternalLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration) = 0;
+
+ virtual void reset() = 0;
+
+ virtual void printStats(ostream& out) const = 0;
+ virtual void clearStats() = 0;
+ virtual void printConfig(ostream& out) const = 0;
+ virtual void print(ostream& out) const = 0;
+
+private:
+
+ // Private Methods
+ // Private copy constructor and assignment operator
+ Network(const Network& obj);
+ Network& operator=(const Network& obj);
+
+ // Data Members (m_ prefix)
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Network& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Network& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+// Code to map network message size types to an integer number of bytes
+const int CONTROL_MESSAGE_SIZE = 8;
+const int DATA_MESSAGE_SIZE = (64+8);
+
+extern inline
+int MessageSizeType_to_int(MessageSizeType size_type)
+{
+ switch(size_type) {
+ case MessageSizeType_Undefined:
+ ERROR_MSG("Can't convert Undefined MessageSizeType to integer");
+ break;
+ case MessageSizeType_Control:
+ case MessageSizeType_Request_Control:
+ case MessageSizeType_Reissue_Control:
+ case MessageSizeType_Response_Control:
+ case MessageSizeType_Writeback_Control:
+ case MessageSizeType_Forwarded_Control:
+ case MessageSizeType_Invalidate_Control:
+ case MessageSizeType_Unblock_Control:
+ case MessageSizeType_Persistent_Control:
+ case MessageSizeType_Completion_Control:
+ return CONTROL_MESSAGE_SIZE;
+ break;
+ case MessageSizeType_Data:
+ case MessageSizeType_Response_Data:
+ case MessageSizeType_ResponseLocal_Data:
+ case MessageSizeType_ResponseL2hit_Data:
+ case MessageSizeType_Writeback_Data:
+ return DATA_MESSAGE_SIZE;
+ break;
+ default:
+ ERROR_MSG("Invalid range for type MessageSizeType");
+ break;
+ }
+ return 0;
+}
+
+#endif //NETWORK_H
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/CreditLink_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/CreditLink_d.hh
new file mode 100644
index 000000000..e3a9b7d2d
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/CreditLink_d.hh
@@ -0,0 +1,17 @@
+/*
+ * CreditLink_d.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+#ifndef CREDIT_LINK_D_H
+#define CREDIT_LINK_D_H
+
+#include "NetworkLink_d.hh"
+
+class CreditLink_d : public NetworkLink_d {
+public:
+ CreditLink_d(int id):NetworkLink_d(id) {}
+};
+
+#endif
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.cc
new file mode 100644
index 000000000..43f9a31bd
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.cc
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * GarnetNetwork_d.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "GarnetNetwork_d.hh"
+#include "MachineType.hh"
+#include "NetworkInterface_d.hh"
+#include "MessageBuffer.hh"
+#include "Router_d.hh"
+#include "Topology.hh"
+#include "NetworkLink_d.hh"
+#include "CreditLink_d.hh"
+#include "NetDest.hh"
+
+GarnetNetwork_d::GarnetNetwork_d(int nodes)
+{
+ m_nodes = MachineType_base_number(MachineType_NUM); // Total nodes in network
+ m_virtual_networks = NUMBER_OF_VIRTUAL_NETWORKS; // Number of virtual networks = number of message classes in the coherence protocol
+ m_ruby_start = 0;
+ m_flits_recieved = 0;
+ m_flits_injected = 0;
+ m_network_latency = 0.0;
+ m_queueing_latency = 0.0;
+
+ m_router_ptr_vector.clear();
+
+ // Allocate to and from queues
+ m_toNetQueues.setSize(m_nodes); // Queues that are getting messages from protocol
+ m_fromNetQueues.setSize(m_nodes); // Queues that are feeding the protocol
+ m_in_use.setSize(m_virtual_networks);
+ m_ordered.setSize(m_virtual_networks);
+ for (int i = 0; i < m_virtual_networks; i++)
+ {
+ m_in_use[i] = false;
+ m_ordered[i] = false;
+ }
+
+ for (int node = 0; node < m_nodes; node++)
+ {
+ //Setting how many vitual message buffers will there be per Network Queue
+ m_toNetQueues[node].setSize(m_virtual_networks);
+ m_fromNetQueues[node].setSize(m_virtual_networks);
+
+ for (int j = 0; j < m_virtual_networks; j++)
+ {
+ m_toNetQueues[node][j] = new MessageBuffer(); // Instantiating the Message Buffers that interact with the coherence protocol
+ m_fromNetQueues[node][j] = new MessageBuffer();
+ }
+ }
+
+ // Setup the network switches
+ m_topology_ptr = new Topology(this, m_nodes);
+
+ int number_of_routers = m_topology_ptr->numSwitches();
+ for (int i=0; i<number_of_routers; i++) {
+ m_router_ptr_vector.insertAtBottom(new Router_d(i, this));
+ }
+
+ for (int i=0; i < m_nodes; i++) {
+ NetworkInterface_d *ni = new NetworkInterface_d(i, m_virtual_networks, this);
+ ni->addNode(m_toNetQueues[i], m_fromNetQueues[i]);
+ m_ni_ptr_vector.insertAtBottom(ni);
+ }
+ m_topology_ptr->createLinks(false); // false because this isn't a reconfiguration
+ for(int i = 0; i < m_router_ptr_vector.size(); i++)
+ {
+ m_router_ptr_vector[i]->init();
+ }
+}
+
+GarnetNetwork_d::~GarnetNetwork_d()
+{
+ for (int i = 0; i < m_nodes; i++)
+ {
+ m_toNetQueues[i].deletePointers();
+ m_fromNetQueues[i].deletePointers();
+ }
+ m_router_ptr_vector.deletePointers();
+ m_ni_ptr_vector.deletePointers();
+ m_link_ptr_vector.deletePointers();
+ m_creditlink_ptr_vector.deletePointers();
+ delete m_topology_ptr;
+}
+
+void GarnetNetwork_d::reset()
+{
+ for (int node = 0; node < m_nodes; node++)
+ {
+ for (int j = 0; j < m_virtual_networks; j++)
+ {
+ m_toNetQueues[node][j]->clear();
+ m_fromNetQueues[node][j]->clear();
+ }
+ }
+}
+
+/*
+ * This function creates a link from the Network Interface (NI) into the Network.
+ * It creates a Network Link from the NI to a Router and a Credit Link from
+ * the Router to the NI
+*/
+
+void GarnetNetwork_d::makeInLink(NodeID src, SwitchID dest, const NetDest& routing_table_entry, int link_latency, int bw_multiplier, bool isReconfiguration)
+{
+ assert(src < m_nodes);
+
+ if(!isReconfiguration)
+ {
+ NetworkLink_d *net_link = new NetworkLink_d(m_link_ptr_vector.size(), link_latency, this);
+ CreditLink_d *credit_link = new CreditLink_d(m_creditlink_ptr_vector.size());
+ m_link_ptr_vector.insertAtBottom(net_link);
+ m_creditlink_ptr_vector.insertAtBottom(credit_link);
+
+ m_router_ptr_vector[dest]->addInPort(net_link, credit_link);
+ m_ni_ptr_vector[src]->addOutPort(net_link, credit_link);
+ }
+ else
+ {
+ ERROR_MSG("Fatal Error:: Reconfiguration not allowed here");
+ // do nothing
+ }
+}
+
+/*
+ * This function creates a link from the Network to a NI.
+ * It creates a Network Link from a Router to the NI and
+ * a Credit Link from NI to the Router
+*/
+
+void GarnetNetwork_d::makeOutLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration)
+{
+ assert(dest < m_nodes);
+ assert(src < m_router_ptr_vector.size());
+ assert(m_router_ptr_vector[src] != NULL);
+
+ if(!isReconfiguration)
+ {
+ NetworkLink_d *net_link = new NetworkLink_d(m_link_ptr_vector.size(), link_latency, this);
+ CreditLink_d *credit_link = new CreditLink_d(m_creditlink_ptr_vector.size());
+ m_link_ptr_vector.insertAtBottom(net_link);
+ m_creditlink_ptr_vector.insertAtBottom(credit_link);
+
+ m_router_ptr_vector[src]->addOutPort(net_link, routing_table_entry, link_weight, credit_link);
+ m_ni_ptr_vector[dest]->addInPort(net_link, credit_link);
+ }
+ else
+ {
+ ERROR_MSG("Fatal Error:: Reconfiguration not allowed here");
+ //do nothing
+ }
+}
+
+/*
+ * This function creates a internal network links
+*/
+
+void GarnetNetwork_d::makeInternalLink(SwitchID src, SwitchID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration)
+{
+ if(!isReconfiguration)
+ {
+ NetworkLink_d *net_link = new NetworkLink_d(m_link_ptr_vector.size(), link_latency, this);
+ CreditLink_d *credit_link = new CreditLink_d(m_creditlink_ptr_vector.size());
+ m_link_ptr_vector.insertAtBottom(net_link);
+ m_creditlink_ptr_vector.insertAtBottom(credit_link);
+
+ m_router_ptr_vector[dest]->addInPort(net_link, credit_link);
+ m_router_ptr_vector[src]->addOutPort(net_link, routing_table_entry, link_weight, credit_link);
+ }
+ else
+ {
+ ERROR_MSG("Fatal Error:: Reconfiguration not allowed here");
+ // do nothing
+ }
+}
+
+void GarnetNetwork_d::checkNetworkAllocation(NodeID id, bool ordered, int network_num)
+{
+ ASSERT(id < m_nodes);
+ ASSERT(network_num < m_virtual_networks);
+
+ if (ordered)
+ {
+ m_ordered[network_num] = true;
+ }
+ m_in_use[network_num] = true;
+}
+
+MessageBuffer* GarnetNetwork_d::getToNetQueue(NodeID id, bool ordered, int network_num)
+{
+ checkNetworkAllocation(id, ordered, network_num);
+ return m_toNetQueues[id][network_num];
+}
+
+MessageBuffer* GarnetNetwork_d::getFromNetQueue(NodeID id, bool ordered, int network_num)
+{
+ checkNetworkAllocation(id, ordered, network_num);
+ return m_fromNetQueues[id][network_num];
+}
+
+void GarnetNetwork_d::clearStats()
+{
+ m_ruby_start = g_eventQueue_ptr->getTime();
+}
+
+Time GarnetNetwork_d::getRubyStartTime()
+{
+ return m_ruby_start;
+}
+
+void GarnetNetwork_d::printStats(ostream& out) const
+{ double average_link_utilization = 0;
+ Vector<double > average_vc_load;
+ average_vc_load.setSize(m_virtual_networks*NetworkConfig::getVCsPerClass());
+
+ for(int i = 0; i < m_virtual_networks*NetworkConfig::getVCsPerClass(); i++)
+ {
+ average_vc_load[i] = 0;
+ }
+
+ out << endl;
+ out << "Network Stats" << endl;
+ out << "-------------" << endl;
+ out << endl;
+ for(int i = 0; i < m_link_ptr_vector.size(); i++)
+ {
+ average_link_utilization += (double(m_link_ptr_vector[i]->getLinkUtilization())) / (double(g_eventQueue_ptr->getTime()-m_ruby_start));
+
+ Vector<int > vc_load = m_link_ptr_vector[i]->getVcLoad();
+ for(int j = 0; j < vc_load.size(); j++)
+ {
+ assert(vc_load.size() == NetworkConfig::getVCsPerClass()*m_virtual_networks);
+ average_vc_load[j] += vc_load[j];
+ }
+ }
+ average_link_utilization = average_link_utilization/m_link_ptr_vector.size();
+ out << "Average Link Utilization :: " << average_link_utilization << " flits/cycle" << endl;
+ out << "-------------" << endl;
+
+ for(int i = 0; i < NetworkConfig::getVCsPerClass()*NUMBER_OF_VIRTUAL_NETWORKS; i++)
+ {
+ average_vc_load[i] = (double(average_vc_load[i]) / (double(g_eventQueue_ptr->getTime()) - m_ruby_start));
+ out << "Average VC Load [" << i << "] = " << average_vc_load[i] << " flits/cycle " << endl;
+ }
+ out << "-------------" << endl;
+
+ // out << "Total flits injected = " << m_flits_injected << endl;
+ // out << "Total flits recieved = " << m_flits_recieved << endl;
+ out << "Average network latency = " << ((double) m_network_latency/ (double) m_flits_recieved)<< endl;
+ // out << "Average queueing latency = " << ((double) m_queueing_latency/ (double) m_flits_recieved)<< endl;
+ // out << "Average latency = " << ((double) (m_queueing_latency + m_network_latency) / (double) m_flits_recieved)<< endl;
+ out << "-------------" << endl;
+
+ double m_total_link_power = 0.0;
+ double m_total_router_power = 0.0;
+
+ for(int i = 0; i < m_link_ptr_vector.size(); i++)
+ {
+ m_total_link_power += m_link_ptr_vector[i]->calculate_power();
+ }
+
+ for(int i = 0; i < m_router_ptr_vector.size(); i++)
+ {
+ m_total_router_power += m_router_ptr_vector[i]->calculate_power();
+ }
+ out << "Total Link Power = " << m_total_link_power << " W " << endl;
+ out << "Total Router Power = " << m_total_router_power << " W " <<endl;
+ out << "-------------" << endl;
+}
+
+void GarnetNetwork_d::printConfig(ostream& out) const
+{
+ out << endl;
+ out << "Network Configuration" << endl;
+ out << "---------------------" << endl;
+ out << "network: GarnetNetwork_d" << endl;
+ out << "topology: " << g_NETWORK_TOPOLOGY << endl;
+ out << endl;
+
+ for (int i = 0; i < m_virtual_networks; i++)
+ {
+ out << "virtual_net_" << i << ": ";
+ if (m_in_use[i])
+ {
+ out << "active, ";
+ if (m_ordered[i])
+ {
+ out << "ordered" << endl;
+ }
+ else
+ {
+ out << "unordered" << endl;
+ }
+ }
+ else
+ {
+ out << "inactive" << endl;
+ }
+ }
+ out << endl;
+
+ for(int i = 0; i < m_ni_ptr_vector.size(); i++)
+ {
+ m_ni_ptr_vector[i]->printConfig(out);
+ }
+ for(int i = 0; i < m_router_ptr_vector.size(); i++)
+ {
+ m_router_ptr_vector[i]->printConfig(out);
+ }
+ if (g_PRINT_TOPOLOGY)
+ {
+ m_topology_ptr->printConfig(out);
+ }
+}
+
+void GarnetNetwork_d::print(ostream& out) const
+{
+ out << "[GarnetNetwork_d]";
+}
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.hh
new file mode 100644
index 000000000..34486eab8
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.hh
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * GarnetNetwork_d.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef GARNETNETWORK_D_H
+#define GARNETNETWORK_D_H
+
+#include "NetworkHeader.hh"
+#include "Vector.hh"
+#include "NetworkConfig.hh"
+#include "Network.hh"
+
+class NetworkInterface_d;
+class MessageBuffer;
+class Router_d;
+class Topology;
+class NetDest;
+class NetworkLink_d;
+class CreditLink_d;
+
+class GarnetNetwork_d : public Network{
+public:
+ GarnetNetwork_d(int nodes);
+
+ ~GarnetNetwork_d();
+
+ int getNumNodes(){ return m_nodes;}
+
+ // returns the queue requested for the given component
+ MessageBuffer* getToNetQueue(NodeID id, bool ordered, int network_num);
+ MessageBuffer* getFromNetQueue(NodeID id, bool ordered, int network_num);
+
+ void clearStats();
+ void printStats(ostream& out) const;
+ void printConfig(ostream& out) const;
+ void print(ostream& out) const;
+
+ inline void increment_injected_flits()
+ {
+ m_flits_injected++;
+ }
+ inline void increment_recieved_flits()
+ {
+ m_flits_recieved++;
+ }
+ inline void increment_network_latency(Time latency)
+ {
+ m_network_latency += latency;
+ }
+ inline void increment_queueing_latency(Time latency)
+ {
+ m_queueing_latency += latency;
+ }
+
+ bool isVNetOrdered(int vnet)
+ {
+ return m_ordered[vnet];
+ }
+ bool validVirtualNetwork(int vnet) { return m_in_use[vnet]; }
+
+ Time getRubyStartTime();
+
+ void reset();
+
+ // Methods used by Topology to setup the network
+ void makeOutLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration);
+ void makeInLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int bw_multiplier, bool isReconfiguration);
+ void makeInternalLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration);
+
+private:
+ void checkNetworkAllocation(NodeID id, bool ordered, int network_num);
+
+// Private copy constructor and assignment operator
+ GarnetNetwork_d(const GarnetNetwork_d& obj);
+ GarnetNetwork_d& operator=(const GarnetNetwork_d& obj);
+
+/***********Data Members*************/
+ int m_virtual_networks;
+ int m_nodes;
+ int m_flits_recieved, m_flits_injected;
+ double m_network_latency, m_queueing_latency;
+
+ Vector<bool> m_in_use;
+ Vector<bool> m_ordered;
+
+ Vector<Vector<MessageBuffer*> > m_toNetQueues;
+ Vector<Vector<MessageBuffer*> > m_fromNetQueues;
+
+ Vector<Router_d *> m_router_ptr_vector; // All Routers in Network
+ Vector<NetworkLink_d *> m_link_ptr_vector; // All links in the network
+ Vector<CreditLink_d *> m_creditlink_ptr_vector; // All links in the network
+ Vector<NetworkInterface_d *> m_ni_ptr_vector; // All NI's in Network
+
+ Topology* m_topology_ptr;
+ Time m_ruby_start;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const GarnetNetwork_d& obj);
+
+// ******************* Definitions *******************
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const GarnetNetwork_d& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //GARNETNETWORK_D_H
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/InputUnit_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/InputUnit_d.cc
new file mode 100644
index 000000000..bedd801d5
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/InputUnit_d.cc
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * InputUnit_d.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "InputUnit_d.hh"
+#include "Router_d.hh"
+
+InputUnit_d::InputUnit_d(int id, Router_d *router)
+{
+ m_id = id;
+ m_router = router;
+ m_num_vcs = m_router->get_num_vcs();
+
+ m_num_buffer_reads = 0;
+ m_num_buffer_writes = 0;
+
+ creditQueue = new flitBuffer_d();
+ // Instantiating the virtual channels
+ m_vcs.setSize(m_num_vcs);
+ for(int i=0; i < m_num_vcs; i++)
+ {
+ m_vcs[i] = new VirtualChannel_d(i);
+ }
+}
+
+InputUnit_d::~InputUnit_d()
+{
+ delete creditQueue;
+ m_vcs.deletePointers();
+}
+
+void InputUnit_d::wakeup()
+{
+ flit_d *t_flit;
+ if(m_in_link->isReady())
+ {
+ t_flit = m_in_link->consumeLink();
+ int vc = t_flit->get_vc();
+ if((t_flit->get_type() == HEAD_) || (t_flit->get_type() == HEAD_TAIL_))
+ {
+ assert(m_vcs[vc]->get_state() == IDLE_);
+ m_router->route_req(t_flit, this, vc); // Do the route computation for this vc
+ m_vcs[vc]->set_enqueue_time(g_eventQueue_ptr->getTime());
+ }
+ else
+ {
+ t_flit->advance_stage(SA_);
+ m_router->swarb_req();
+ }
+ m_vcs[vc]->insertFlit(t_flit); // write flit into input buffer
+ m_num_buffer_writes++;
+ m_num_buffer_reads++; // same as read because any flit that is written will be read only once
+ }
+}
+
+
+void InputUnit_d::printConfig(ostream& out)
+{
+ out << endl;
+ out << "InputUnit Configuration" << endl;
+ out << "---------------------" << endl;
+ out << "id = " << m_id << endl;
+ out << "In link is " << m_in_link->get_id() << endl;
+}
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/InputUnit_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/InputUnit_d.hh
new file mode 100644
index 000000000..c22363fb1
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/InputUnit_d.hh
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * InputUnit_d.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef INPUT_UNIT_D_H
+#define INPUT_UNIT_D_H
+
+#include "NetworkHeader.hh"
+#include "flitBuffer_d.hh"
+#include "Consumer.hh"
+#include "Vector.hh"
+#include "VirtualChannel_d.hh"
+#include "NetworkLink_d.hh"
+#include "CreditLink_d.hh"
+
+class Router_d;
+
+class InputUnit_d : public Consumer {
+public:
+ InputUnit_d(int id, Router_d *router);
+ ~InputUnit_d();
+
+ void wakeup();
+ void printConfig(ostream& out);
+ flitBuffer_d* getCreditQueue() { return creditQueue; }
+ void print(ostream& out) const {};
+
+ inline int get_inlink_id()
+ {
+ return m_in_link->get_id();
+ }
+
+ inline void set_vc_state(VC_state_type state, int vc)
+ {
+ m_vcs[vc]->set_state(state);
+ }
+ inline void set_enqueue_time(int invc, Time time)
+ {
+ m_vcs[invc]->set_enqueue_time(time);
+ }
+ inline Time get_enqueue_time(int invc)
+ {
+ return m_vcs[invc]->get_enqueue_time();
+ }
+ inline void update_credit(int in_vc, int credit)
+ {
+ m_vcs[in_vc]->update_credit(credit);
+ }
+
+ inline bool has_credits(int vc)
+ {
+ return m_vcs[vc]->has_credits();
+ }
+
+ inline void increment_credit(int in_vc, bool free_signal)
+ {
+ flit_d *t_flit = new flit_d(in_vc, free_signal);
+ creditQueue->insert(t_flit);
+ g_eventQueue_ptr->scheduleEvent(m_credit_link, 1);
+ }
+
+ inline int get_outvc(int invc)
+ {
+ return m_vcs[invc]->get_outvc();
+ }
+
+ inline void updateRoute(int vc, int outport)
+ {
+ m_vcs[vc]->set_outport(outport);
+ m_vcs[vc]->set_state(VC_AB_);
+ }
+
+ inline void grant_vc(int in_vc, int out_vc)
+ {
+ m_vcs[in_vc]->grant_vc(out_vc);
+ }
+
+ inline flit_d* peekTopFlit(int vc)
+ {
+ return m_vcs[vc]->peekTopFlit();
+ }
+
+ inline flit_d* getTopFlit(int vc)
+ {
+ return m_vcs[vc]->getTopFlit();
+ }
+
+ inline bool need_stage(int vc, VC_state_type state, flit_stage stage)
+ {
+ return m_vcs[vc]->need_stage(state, stage);
+ }
+
+ inline bool need_stage_nextcycle(int vc, VC_state_type state, flit_stage stage)
+ {
+ return m_vcs[vc]->need_stage_nextcycle(state, stage);
+ }
+
+ inline bool isReady(int invc)
+ {
+ return m_vcs[invc]->isReady();
+ }
+
+ inline int get_route(int vc)
+ {
+ return m_vcs[vc]->get_route();
+ }
+ inline void set_in_link(NetworkLink_d *link)
+ {
+ m_in_link = link;
+ }
+
+ inline void set_credit_link(CreditLink_d *credit_link)
+ {
+ m_credit_link = credit_link;
+ }
+
+ inline double get_buf_read_count()
+ {
+ return m_num_buffer_reads;
+ }
+
+ inline double get_buf_write_count()
+ {
+ return m_num_buffer_writes;
+ }
+
+private:
+ int m_id;
+ int m_num_vcs;
+ double m_num_buffer_writes, m_num_buffer_reads;
+
+ Router_d *m_router;
+ NetworkLink_d *m_in_link;
+ CreditLink_d *m_credit_link;
+ flitBuffer_d *creditQueue;
+
+ // Virtual channels
+ Vector<VirtualChannel_d *> m_vcs; // [vc]
+};
+
+#endif
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/NetworkHeader.hh b/src/mem/ruby/network/garnet-fixed-pipeline/NetworkHeader.hh
new file mode 100644
index 000000000..6a212ce99
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/NetworkHeader.hh
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NetworkHeader.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef NETWORK_HEADER_H
+#define NETWORK_HEADER_H
+
+#include "Global.hh"
+#include "NodeID.hh"
+
+using namespace std;
+using namespace __gnu_cxx;
+
+enum flit_type {HEAD_, BODY_, TAIL_, HEAD_TAIL_, NUM_FLIT_TYPE_};
+enum VC_state_type {IDLE_, VC_AB_, ACTIVE_, NUM_VC_STATE_TYPE_};
+enum flit_stage {I_, VA_, SA_, ST_, LT_, NUM_FLIT_STAGE_};
+
+#define NETCONFIG_DEFAULTS "netconfig.defaults"
+
+#define INFINITE_ 10000
+
+#endif
+
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/NetworkInterface_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/NetworkInterface_d.cc
new file mode 100644
index 000000000..edf2d4b95
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/NetworkInterface_d.cc
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NetworkInterface_d.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "NetworkInterface_d.hh"
+#include "MessageBuffer.hh"
+#include "flitBuffer_d.hh"
+#include "NetworkMessage.hh"
+
+NetworkInterface_d::NetworkInterface_d(int id, int virtual_networks, GarnetNetwork_d *network_ptr)
+{
+ m_id = id;
+ m_net_ptr = network_ptr;
+ m_virtual_networks = virtual_networks;
+ m_vc_per_vnet = NetworkConfig::getVCsPerClass();
+ m_num_vcs = m_vc_per_vnet*m_virtual_networks;
+
+ m_vc_round_robin = 0;
+ m_ni_buffers.setSize(m_num_vcs);
+ m_ni_enqueue_time.setSize(m_num_vcs);
+ inNode_ptr.setSize(m_virtual_networks);
+ outNode_ptr.setSize(m_virtual_networks);
+ creditQueue = new flitBuffer_d();
+
+ for(int i =0; i < m_num_vcs; i++)
+ {
+ m_ni_buffers[i] = new flitBuffer_d(); // instantiating the NI flit buffers
+ m_ni_enqueue_time[i] = INFINITE_;
+ }
+ m_vc_allocator.setSize(m_virtual_networks); // 1 allocator per virtual net
+ for(int i = 0; i < m_virtual_networks; i++)
+ {
+ m_vc_allocator[i] = 0;
+ }
+
+ for(int i = 0; i < m_num_vcs; i++)
+ {
+ m_out_vc_state.insertAtBottom(new OutVcState_d(i));
+ m_out_vc_state[i]->setState(IDLE_, g_eventQueue_ptr->getTime());
+ }
+}
+
+NetworkInterface_d::~NetworkInterface_d()
+{
+ m_out_vc_state.deletePointers();
+ m_ni_buffers.deletePointers();
+ delete creditQueue;
+ delete outSrcQueue;
+}
+
+void NetworkInterface_d::addInPort(NetworkLink_d *in_link, CreditLink_d *credit_link)
+{
+ inNetLink = in_link;
+ in_link->setLinkConsumer(this);
+ m_ni_credit_link = credit_link;
+ credit_link->setSourceQueue(creditQueue);
+}
+
+void NetworkInterface_d::addOutPort(NetworkLink_d *out_link, CreditLink_d *credit_link)
+{
+ m_credit_link = credit_link;
+ credit_link->setLinkConsumer(this);
+
+ outNetLink = out_link;
+ outSrcQueue = new flitBuffer_d();
+ out_link->setSourceQueue(outSrcQueue);
+}
+
+void NetworkInterface_d::addNode(Vector<MessageBuffer*>& in, Vector<MessageBuffer*>& out)
+{
+ ASSERT(in.size() == m_virtual_networks);
+ inNode_ptr = in;
+ outNode_ptr = out;
+ for (int j = 0; j < m_virtual_networks; j++)
+ {
+ inNode_ptr[j]->setConsumer(this); // So that protocol injects messages into the NI
+ }
+}
+
+bool NetworkInterface_d::flitisizeMessage(MsgPtr msg_ptr, int vnet)
+{
+ NetworkMessage *net_msg_ptr = dynamic_cast<NetworkMessage*>(msg_ptr.ref());
+ NetDest net_msg_dest = net_msg_ptr->getInternalDestination();
+ Vector<NodeID> dest_nodes = net_msg_dest.getAllDest(); // gets all the destinations associated with this message.
+
+ int num_flits = (int) ceil((double) MessageSizeType_to_int(net_msg_ptr->getMessageSize())/NetworkConfig::getFlitSize() ); // Number of flits is dependent on the link bandwidth available. This is expressed in terms of bytes/cycle or the flit size
+
+ for(int ctr = 0; ctr < dest_nodes.size(); ctr++) // loop because we will be converting all multicast messages into unicast messages
+ {
+ int vc = calculateVC(vnet); // this will return a free output virtual channel
+ if(vc == -1)
+ {
+ return false ;
+ }
+ MsgPtr new_msg_ptr = *(msg_ptr.ref());
+ NodeID destID = dest_nodes[ctr];
+
+ NetworkMessage *new_net_msg_ptr = dynamic_cast<NetworkMessage*>(new_msg_ptr.ref());
+ if(dest_nodes.size() > 1)
+ {
+ NetDest personal_dest;
+ for(int m = 0; m < (int) MachineType_NUM; m++)
+ {
+ if((destID >= MachineType_base_number((MachineType) m)) && destID < MachineType_base_number((MachineType) (m+1)))
+ {
+ // calculating the NetDest associated with this destination ID
+ personal_dest.clear();
+ personal_dest.add((MachineID) {(MachineType) m, (destID - MachineType_base_number((MachineType) m))});
+ new_net_msg_ptr->getInternalDestination() = personal_dest;
+ break;
+ }
+ }
+ net_msg_dest.removeNetDest(personal_dest);
+ net_msg_ptr->getInternalDestination().removeNetDest(personal_dest); // removing the destination from the original message to reflect that a message with this particular destination has been flitisized and an output vc is acquired
+ }
+ for(int i = 0; i < num_flits; i++)
+ {
+ m_net_ptr->increment_injected_flits();
+ flit_d *fl = new flit_d(i, vc, vnet, num_flits, new_msg_ptr);
+ fl->set_delay(g_eventQueue_ptr->getTime() - (msg_ptr.ref())->getTime());
+ m_ni_buffers[vc]->insert(fl);
+ }
+ m_ni_enqueue_time[vc] = g_eventQueue_ptr->getTime();
+ m_out_vc_state[vc]->setState(ACTIVE_, g_eventQueue_ptr->getTime());
+ }
+ return true ;
+}
+
+// Looking for a free output vc
+int NetworkInterface_d::calculateVC(int vnet)
+{
+ for(int i = 0; i < m_vc_per_vnet; i++)
+ {
+ int delta = m_vc_allocator[vnet];
+ m_vc_allocator[vnet]++;
+ if(m_vc_allocator[vnet] == m_vc_per_vnet)
+ m_vc_allocator[vnet] = 0;
+
+ if(m_out_vc_state[(vnet*m_vc_per_vnet) + delta]->isInState(IDLE_, g_eventQueue_ptr->getTime()))
+ {
+ return ((vnet*m_vc_per_vnet) + delta);
+ }
+ }
+ return -1;
+}
+
+/*
+ * The NI wakeup checks whether there are any ready messages in the protocol buffer. If yes,
+ * it picks that up, flitisizes it into a number of flits and puts it into an output buffer
+ * and schedules the output link. On a wakeup it also checks whether there are flits in the
+ * input link. If yes, it picks them up and if the flit is a tail, the NI inserts the
+ * corresponding message into the protocol buffer. It also checks for credits being sent
+ * by the downstream router.
+ */
+
+void NetworkInterface_d::wakeup()
+{
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, m_id);
+ DEBUG_MSG(NETWORK_COMP, MedPrio, "NI WOKE UP");
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, g_eventQueue_ptr->getTime());
+
+ MsgPtr msg_ptr;
+
+ //Checking for messages coming from the protocol
+ for (int vnet = 0; vnet < m_virtual_networks; vnet++) // can pick up a message/cycle for each virtual net
+ {
+ while(inNode_ptr[vnet]->isReady()) // Is there a message waiting
+ {
+ msg_ptr = inNode_ptr[vnet]->peekMsgPtr();
+ if(flitisizeMessage(msg_ptr, vnet))
+ {
+ inNode_ptr[vnet]->pop();
+ }
+ else
+ {
+ break;
+ }
+ }
+ }
+
+ scheduleOutputLink();
+ checkReschedule();
+
+/*********** Picking messages destined for this NI **********/
+
+ if(inNetLink->isReady())
+ {
+ flit_d *t_flit = inNetLink->consumeLink();
+ bool free_signal = false;
+ if(t_flit->get_type() == TAIL_ || t_flit->get_type() == HEAD_TAIL_)
+ {
+ free_signal = true;
+ if(!NetworkConfig::isNetworkTesting()) // When we are doing network only testing, the messages do not have to be buffered into the message buffers
+ {
+ outNode_ptr[t_flit->get_vnet()]->enqueue(t_flit->get_msg_ptr(), 1); // enqueueing for protocol buffer. This is not required when doing network only testing
+ }
+ }
+ flit_d *credit_flit = new flit_d(t_flit->get_vc(), free_signal); // Simply send a credit back since we are not buddering this flit in the NI
+ creditQueue->insert(credit_flit);
+ g_eventQueue_ptr->scheduleEvent(m_ni_credit_link, 1);
+
+ m_net_ptr->increment_recieved_flits();
+ int network_delay = g_eventQueue_ptr->getTime() - t_flit->get_enqueue_time();
+ int queueing_delay = t_flit->get_delay();
+ m_net_ptr->increment_network_latency(network_delay);
+ m_net_ptr->increment_queueing_latency(queueing_delay);
+ delete t_flit;
+ }
+
+ /****************** Checking for credit link *******/
+
+ if(m_credit_link->isReady())
+ {
+ flit_d *t_flit = m_credit_link->consumeLink();
+ m_out_vc_state[t_flit->get_vc()]->increment_credit();
+ if(t_flit->is_free_signal())
+ {
+ m_out_vc_state[t_flit->get_vc()]->setState(IDLE_, g_eventQueue_ptr->getTime());
+ }
+ delete t_flit;
+ }
+}
+
+// This function look at the NI buffers and if some buffer has flits which are ready to traverse the link in the next cycle and also the downstream output vc associated with this flit has buffers left, the link is scheduled for the next cycle
+
+void NetworkInterface_d::scheduleOutputLink()
+{
+ int vc = m_vc_round_robin;
+ m_vc_round_robin++;
+ if(m_vc_round_robin == m_num_vcs)
+ m_vc_round_robin = 0;
+
+ for(int i = 0; i < m_num_vcs; i++)
+ {
+ vc++;
+ if(vc == m_num_vcs)
+ vc = 0;
+ if(m_ni_buffers[vc]->isReady() && m_out_vc_state[vc]->has_credits()) // models buffer backpressure
+ {
+ bool is_candidate_vc = true;
+ int t_vnet = get_vnet(vc);
+ int vc_base = t_vnet * m_vc_per_vnet;
+
+ if(m_net_ptr->isVNetOrdered(t_vnet))
+ {
+ for (int vc_offset = 0; vc_offset < m_vc_per_vnet; vc_offset++)
+ {
+ int t_vc = vc_base + vc_offset;
+ if(m_ni_buffers[t_vc]->isReady())
+ {
+ if(m_ni_enqueue_time[t_vc] < m_ni_enqueue_time[vc])
+ {
+ is_candidate_vc = false;
+ break;
+ }
+ }
+ }
+ }
+ if(!is_candidate_vc)
+ continue;
+
+ m_out_vc_state[vc]->decrement_credit();
+ flit_d *t_flit = m_ni_buffers[vc]->getTopFlit(); // Just removing the flit
+ t_flit->set_time(g_eventQueue_ptr->getTime() + 1);
+ outSrcQueue->insert(t_flit);
+ g_eventQueue_ptr->scheduleEvent(outNetLink, 1); // schedule the out link
+
+ if(t_flit->get_type() == TAIL_ || t_flit->get_type() == HEAD_TAIL_)
+ {
+ m_ni_enqueue_time[vc] = INFINITE_;
+ }
+ return;
+ }
+ }
+}
+
+int NetworkInterface_d::get_vnet(int vc)
+{
+ for(int i = 0; i < NUMBER_OF_VIRTUAL_NETWORKS; i++)
+ {
+ if(vc >= (i*m_vc_per_vnet) && vc < ((i+1)*m_vc_per_vnet))
+ {
+ return i;
+ }
+ }
+ ERROR_MSG("Could not determine vc");
+ return -1;
+}
+
+void NetworkInterface_d::checkReschedule()
+{
+ for(int vnet = 0; vnet < m_virtual_networks; vnet++)
+ {
+ if(inNode_ptr[vnet]->isReady()) // Is there a message waiting
+ {
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ return;
+ }
+ }
+ for(int vc = 0; vc < m_num_vcs; vc++)
+ {
+ if(m_ni_buffers[vc]->isReadyForNext())
+ {
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ return;
+ }
+ }
+}
+
+void NetworkInterface_d::printConfig(ostream& out) const
+{
+ out << "[Network Interface " << m_id << "] - ";
+ out << "[inLink " << inNetLink->get_id() << "] - ";
+ out << "[outLink " << outNetLink->get_id() << "]" << endl;
+}
+
+void NetworkInterface_d::print(ostream& out) const
+{
+ out << "[Network Interface]";
+}
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/NetworkInterface_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/NetworkInterface_d.hh
new file mode 100644
index 000000000..c776d343c
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/NetworkInterface_d.hh
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NetworkInterface_d.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef NET_INTERFACE_D_H
+#define NET_INTERFACE_D_H
+
+#include "NetworkHeader.hh"
+#include "GarnetNetwork_d.hh"
+#include "Vector.hh"
+#include "Consumer.hh"
+#include "Message.hh"
+#include "NetworkLink_d.hh"
+#include "CreditLink_d.hh"
+#include "OutVcState_d.hh"
+
+class NetworkMessage;
+class MessageBuffer;
+class flitBuffer_d;
+
+class NetworkInterface_d : public Consumer {
+public:
+ NetworkInterface_d(int id, int virtual_networks, GarnetNetwork_d* network_ptr);
+
+ ~NetworkInterface_d();
+
+ void addInPort(NetworkLink_d *in_link, CreditLink_d *credit_link);
+ void addOutPort(NetworkLink_d *out_link, CreditLink_d *credit_link);
+
+ void wakeup();
+ void addNode(Vector<MessageBuffer *> &inNode, Vector<MessageBuffer *> &outNode);
+ void printConfig(ostream& out) const;
+ void print(ostream& out) const;
+ int get_vnet(int vc);
+
+private:
+/**************Data Members*************/
+ GarnetNetwork_d *m_net_ptr;
+ int m_virtual_networks, m_num_vcs, m_vc_per_vnet;
+ NodeID m_id;
+ Vector<OutVcState_d *> m_out_vc_state;
+ Vector<int > m_vc_allocator;
+ int m_vc_round_robin; // For round robin scheduling
+ flitBuffer_d *outSrcQueue; // For modelling link contention
+ flitBuffer_d *creditQueue;
+
+ NetworkLink_d *inNetLink;
+ NetworkLink_d *outNetLink;
+ CreditLink_d *m_credit_link;
+ CreditLink_d *m_ni_credit_link;
+
+ // Input Flit Buffers
+ Vector<flitBuffer_d *> m_ni_buffers; // The flit buffers which will serve the Consumer
+ Vector<Time > m_ni_enqueue_time;
+
+ Vector<MessageBuffer *> inNode_ptr; // The Message buffers that takes messages from the protocol
+ Vector<MessageBuffer *> outNode_ptr; // The Message buffers that provides messages to the protocol
+
+ bool flitisizeMessage(MsgPtr msg_ptr, int vnet);
+ int calculateVC(int vnet);
+ void scheduleOutputLink();
+ void checkReschedule();
+};
+
+#endif
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.cc
new file mode 100644
index 000000000..26794ebf2
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NetworkLink_d.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "NetworkLink_d.hh"
+#include "NetworkConfig.hh"
+#include "GarnetNetwork_d.hh"
+
+NetworkLink_d::NetworkLink_d(int id)
+{
+ m_id = id;
+ m_latency = 1;
+ m_flit_width = NetworkConfig::getFlitSize();
+
+ linkBuffer = new flitBuffer_d();
+ m_link_utilized = 0;
+ m_vc_load.setSize(NetworkConfig::getVCsPerClass()*NUMBER_OF_VIRTUAL_NETWORKS);
+
+ for(int i = 0; i < NetworkConfig::getVCsPerClass()*NUMBER_OF_VIRTUAL_NETWORKS; i++)
+ m_vc_load[i] = 0;
+}
+
+NetworkLink_d::NetworkLink_d(int id, int link_latency, GarnetNetwork_d *net_ptr)
+{
+ m_net_ptr = net_ptr;
+ m_id = id;
+ m_latency = link_latency;
+ linkBuffer = new flitBuffer_d();
+ m_link_utilized = 0;
+ m_vc_load.setSize(NetworkConfig::getVCsPerClass()*NUMBER_OF_VIRTUAL_NETWORKS);
+
+ for(int i = 0; i < NetworkConfig::getVCsPerClass()*NUMBER_OF_VIRTUAL_NETWORKS; i++)
+ m_vc_load[i] = 0;
+}
+
+NetworkLink_d::~NetworkLink_d()
+{
+ delete linkBuffer;
+}
+
+void NetworkLink_d::setLinkConsumer(Consumer *consumer)
+{
+ link_consumer = consumer;
+}
+
+void NetworkLink_d::setSourceQueue(flitBuffer_d *srcQueue)
+{
+ link_srcQueue = srcQueue;
+}
+
+void NetworkLink_d::wakeup()
+{
+ if(link_srcQueue->isReady())
+ {
+ flit_d *t_flit = link_srcQueue->getTopFlit();
+ t_flit->set_time(g_eventQueue_ptr->getTime() + m_latency);
+ linkBuffer->insert(t_flit);
+ g_eventQueue_ptr->scheduleEvent(link_consumer, m_latency);
+ m_link_utilized++;
+ m_vc_load[t_flit->get_vc()]++;
+ }
+}
+
+Vector<int> NetworkLink_d::getVcLoad()
+{
+ return m_vc_load;
+}
+
+int NetworkLink_d::getLinkUtilization()
+{
+ return m_link_utilized;
+}
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.hh
new file mode 100644
index 000000000..40695331e
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.hh
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NetworkLink_d.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef NETWORK_LINK_D_H
+#define NETWORK_LINK_D_H
+
+#include "NetworkHeader.hh"
+#include "Consumer.hh"
+#include "flitBuffer_d.hh"
+#include "PrioHeap.hh"
+#include "power_bus.hh"
+
+class GarnetNetwork_d;
+
+class NetworkLink_d : public Consumer {
+public:
+ NetworkLink_d(int id);
+ ~NetworkLink_d();
+
+ NetworkLink_d(int id, int link_latency, GarnetNetwork_d *net_ptr);
+ void setLinkConsumer(Consumer *consumer);
+ void setSourceQueue(flitBuffer_d *srcQueue);
+ void print(ostream& out) const{}
+ int getLinkUtilization();
+ Vector<int> getVcLoad();
+ int get_id(){return m_id;}
+ void wakeup();
+
+ double calculate_offline_power(power_bus*);
+ double calculate_power();
+
+ inline bool isReady()
+ {
+ return linkBuffer->isReady();
+ }
+ inline flit_d* peekLink()
+ {
+ return linkBuffer->peekTopFlit();
+ }
+ inline flit_d* consumeLink()
+ {
+ return linkBuffer->getTopFlit();
+ }
+
+protected:
+ int m_id;
+ int m_latency;
+ GarnetNetwork_d *m_net_ptr;
+
+ flitBuffer_d *linkBuffer;
+ Consumer *link_consumer;
+ flitBuffer_d *link_srcQueue;
+ int m_link_utilized;
+ Vector<int > m_vc_load;
+ int m_flit_width;
+};
+
+#endif
+
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.cc
new file mode 100644
index 000000000..32cf8a51d
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * OutVCState_d.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "OutVcState_d.hh"
+#include "NetworkConfig.hh"
+#include "EventQueue.hh"
+
+OutVcState_d::OutVcState_d(int id)
+{
+ m_id = id;
+ m_vc_state = IDLE_;
+ m_time = g_eventQueue_ptr->getTime();
+ m_credit_count = NetworkConfig::getBufferSize();
+}
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.hh
new file mode 100644
index 000000000..8c07ec796
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.hh
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * OutVCState_d.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef OUT_VC_STATE_D_H
+#define OUT_VC_STATE_D_H
+
+#include "NetworkHeader.hh"
+
+class OutVcState_d {
+public:
+ OutVcState_d(int id);
+
+ int get_inport() {return m_in_port; }
+ int get_invc() { return m_in_vc; }
+ int get_credit_count() {return m_credit_count; }
+ void set_inport(int port) {m_in_port = port; }
+ void set_invc(int vc) {m_in_vc = vc; }
+
+ inline bool isInState(VC_state_type state, Time request_time)
+ {
+ return ((m_vc_state == state) && (request_time >= m_time) );
+ }
+
+ inline void setState(VC_state_type state, Time time)
+ {
+ m_vc_state = state;
+ m_time = time;
+ }
+
+ inline bool has_credits()
+ {
+ return (m_credit_count > 0);
+ }
+
+ inline void increment_credit()
+ {
+ m_credit_count++;
+ }
+
+ inline void decrement_credit()
+ {
+ m_credit_count--;
+ }
+
+private:
+ int m_id ;
+ Time m_time;
+ VC_state_type m_vc_state;
+ int m_in_port;
+ int m_in_vc;
+ int m_credit_count;
+};
+
+#endif
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/OutputUnit_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/OutputUnit_d.cc
new file mode 100644
index 000000000..43096c314
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/OutputUnit_d.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * OutputUnit_d.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "OutputUnit_d.hh"
+#include "Router_d.hh"
+#include "NetworkConfig.hh"
+
+OutputUnit_d::OutputUnit_d(int id, Router_d *router)
+{
+ m_id = id;
+ m_router = router;
+ m_num_vcs = m_router->get_num_vcs();
+ m_out_buffer = new flitBuffer_d();
+
+ for(int i = 0; i < m_num_vcs; i++)
+ {
+ m_outvc_state.insertAtBottom(new OutVcState_d(i));
+ }
+}
+
+OutputUnit_d::~OutputUnit_d()
+{
+ delete m_out_buffer;
+ m_outvc_state.deletePointers();
+}
+
+void OutputUnit_d::decrement_credit(int out_vc)
+{
+ m_outvc_state[out_vc]->decrement_credit();
+ m_router->update_incredit(m_outvc_state[out_vc]->get_inport(), m_outvc_state[out_vc]->get_invc(), m_outvc_state[out_vc]->get_credit_count());
+}
+
+void OutputUnit_d::wakeup()
+{
+ if(m_credit_link->isReady())
+ {
+ flit_d *t_flit = m_credit_link->consumeLink();
+ int out_vc = t_flit->get_vc();
+ m_outvc_state[out_vc]->increment_credit();
+ m_router->update_incredit(m_outvc_state[out_vc]->get_inport(), m_outvc_state[out_vc]->get_invc(), m_outvc_state[out_vc]->get_credit_count());
+
+ if(t_flit->is_free_signal())
+ set_vc_state(IDLE_, out_vc);
+
+ delete t_flit;
+ }
+}
+
+flitBuffer_d* OutputUnit_d::getOutQueue()
+{
+ return m_out_buffer;
+}
+
+void OutputUnit_d::set_out_link(NetworkLink_d *link)
+{
+ m_out_link = link;
+}
+
+void OutputUnit_d::set_credit_link(CreditLink_d *credit_link)
+{
+ m_credit_link = credit_link;
+}
+
+void OutputUnit_d::update_vc(int vc, int in_port, int in_vc)
+{
+ m_outvc_state[vc]->setState(ACTIVE_, g_eventQueue_ptr->getTime() + 1);
+ m_outvc_state[vc]->set_inport(in_port);
+ m_outvc_state[vc]->set_invc(in_vc);
+ m_router->update_incredit(in_port, in_vc, m_outvc_state[vc]->get_credit_count());
+}
+
+void OutputUnit_d::printConfig(ostream& out)
+{
+ out << endl;
+ out << "OutputUnit Configuration" << endl;
+ out << "---------------------" << endl;
+ out << "id = " << m_id << endl;
+ out << "Out link is " << m_out_link->get_id() << endl;
+}
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/OutputUnit_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/OutputUnit_d.hh
new file mode 100644
index 000000000..f23c06559
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/OutputUnit_d.hh
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * OutputUnit_d.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef OUTPUT_UNIT_D_H
+#define OUTPUT_UNIT_D_H
+
+#include "NetworkHeader.hh"
+#include "Consumer.hh"
+#include "flitBuffer_d.hh"
+#include "OutVcState_d.hh"
+#include "NetworkLink_d.hh"
+#include "CreditLink_d.hh"
+
+class Router_d;
+
+class OutputUnit_d : public Consumer {
+public:
+ OutputUnit_d(int id, Router_d *router);
+ ~OutputUnit_d();
+ void set_out_link(NetworkLink_d *link);
+ void set_credit_link(CreditLink_d *credit_link);
+ void wakeup();
+ flitBuffer_d* getOutQueue();
+ void printConfig(ostream& out);
+ void update_vc(int vc, int in_port, int in_vc);
+ void print(ostream& out) const {};
+ int get_credit_cnt(int vc) { return m_outvc_state[vc]->get_credit_count(); }
+ void decrement_credit(int out_vc);
+
+ inline int get_outlink_id()
+ {
+ return m_out_link->get_id();
+ }
+
+ inline void set_vc_state(VC_state_type state, int vc)
+ {
+ m_outvc_state[vc]->setState(state, g_eventQueue_ptr->getTime() + 1);
+ }
+ inline bool is_vc_idle(int vc)
+ {
+ return (m_outvc_state[vc]->isInState(IDLE_, g_eventQueue_ptr->getTime()) );
+ }
+ inline void insert_flit(flit_d *t_flit)
+ {
+ m_out_buffer->insert(t_flit);
+ g_eventQueue_ptr->scheduleEvent(m_out_link, 1);
+ }
+
+
+
+private:
+ int m_id;
+ int m_num_vcs;
+ Router_d *m_router;
+ NetworkLink_d *m_out_link;
+ CreditLink_d *m_credit_link;
+
+ flitBuffer_d *m_out_buffer; // This is for the network link to consume
+ Vector<OutVcState_d *> m_outvc_state; // vc state of downstream router
+
+};
+
+#endif
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/Router_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/Router_d.cc
new file mode 100644
index 000000000..4f61487c7
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/Router_d.cc
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Router_d.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "Router_d.hh"
+#include "GarnetNetwork_d.hh"
+#include "NetworkLink_d.hh"
+#include "CreditLink_d.hh"
+#include "InputUnit_d.hh"
+#include "OutputUnit_d.hh"
+#include "RoutingUnit_d.hh"
+#include "VCallocator_d.hh"
+#include "SWallocator_d.hh"
+#include "Switch_d.hh"
+
+Router_d::Router_d(int id, GarnetNetwork_d *network_ptr)
+{
+ m_id = id;
+ m_network_ptr = network_ptr;
+ m_virtual_networks = NUMBER_OF_VIRTUAL_NETWORKS;
+ m_vc_per_vnet = NetworkConfig::getVCsPerClass();
+ m_num_vcs = m_virtual_networks*m_vc_per_vnet;
+ m_flit_width = NetworkConfig::getFlitSize();
+
+ m_routing_unit = new RoutingUnit_d(this);
+ m_vc_alloc = new VCallocator_d(this);
+ m_sw_alloc = new SWallocator_d(this);
+ m_switch = new Switch_d(this);
+
+ m_input_unit.clear();
+ m_output_unit.clear();
+
+ buf_read_count = 0;
+ buf_write_count = 0;
+ crossbar_count = 0;
+ vc_local_arbit_count = 0;
+ vc_global_arbit_count = 0;
+ sw_local_arbit_count = 0;
+ sw_global_arbit_count = 0;
+}
+
+Router_d::~Router_d()
+{
+ m_input_unit.deletePointers();
+ m_output_unit.deletePointers();
+ delete m_routing_unit;
+ delete m_vc_alloc;
+ delete m_sw_alloc;
+ delete m_switch;
+}
+
+void Router_d::init()
+{
+ m_vc_alloc->init();
+ m_sw_alloc->init();
+ m_switch->init();
+}
+
+void Router_d::addInPort(NetworkLink_d *in_link, CreditLink_d *credit_link)
+{
+ int port_num = m_input_unit.size();
+ InputUnit_d *input_unit = new InputUnit_d(port_num, this);
+
+ input_unit->set_in_link(in_link);
+ input_unit->set_credit_link(credit_link);
+ in_link->setLinkConsumer(input_unit);
+ credit_link->setSourceQueue(input_unit->getCreditQueue());
+
+ m_input_unit.insertAtBottom(input_unit);
+}
+
+void Router_d::addOutPort(NetworkLink_d *out_link, const NetDest& routing_table_entry, int link_weight, CreditLink_d *credit_link)
+{
+ int port_num = m_output_unit.size();
+ OutputUnit_d *output_unit = new OutputUnit_d(port_num, this);
+
+ output_unit->set_out_link(out_link);
+ output_unit->set_credit_link(credit_link);
+ credit_link->setLinkConsumer(output_unit);
+ out_link->setSourceQueue(output_unit->getOutQueue());
+
+ m_output_unit.insertAtBottom(output_unit);
+
+ m_routing_unit->addRoute(routing_table_entry);
+ m_routing_unit->addWeight(link_weight);
+}
+
+void Router_d::route_req(flit_d *t_flit, InputUnit_d *in_unit, int invc)
+{
+ m_routing_unit->RC_stage(t_flit, in_unit, invc);
+}
+void Router_d::vcarb_req()
+{
+ g_eventQueue_ptr->scheduleEvent(m_vc_alloc, 1);
+}
+void Router_d::swarb_req()
+{
+ g_eventQueue_ptr->scheduleEvent(m_sw_alloc, 1);
+}
+void Router_d::update_incredit(int in_port, int in_vc, int credit)
+{
+ m_input_unit[in_port]->update_credit(in_vc, credit);
+}
+void Router_d::update_sw_winner(int inport, flit_d *t_flit)
+{
+ m_switch->update_sw_winner(inport, t_flit);
+ g_eventQueue_ptr->scheduleEvent(m_switch, 1);
+}
+
+void Router_d::calculate_performance_numbers()
+{
+ for(int i = 0; i < m_input_unit.size(); i++)
+ {
+ buf_read_count += m_input_unit[i]->get_buf_read_count();
+ buf_write_count += m_input_unit[i]->get_buf_write_count();
+ }
+ crossbar_count = m_switch->get_crossbar_count();
+ vc_local_arbit_count = m_vc_alloc->get_local_arbit_count();
+ vc_global_arbit_count = m_vc_alloc->get_global_arbit_count();
+ sw_local_arbit_count = m_sw_alloc->get_local_arbit_count();
+ sw_global_arbit_count = m_sw_alloc->get_global_arbit_count();
+}
+
+void Router_d::printConfig(ostream& out)
+{
+ out << "[Router " << m_id << "] :: " << endl;
+ out << "[inLink - ";
+ for(int i = 0;i < m_input_unit.size(); i++)
+ out << m_input_unit[i]->get_inlink_id() << " - ";
+ out << "]" << endl;
+ out << "[outLink - ";
+ for(int i = 0;i < m_output_unit.size(); i++)
+ out << m_output_unit[i]->get_outlink_id() << " - ";
+ out << "]" << endl;
+}
+
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/Router_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/Router_d.hh
new file mode 100644
index 000000000..215055ea6
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/Router_d.hh
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Router_d.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+#ifndef ROUTER_D_H
+#define ROUTER_D_H
+
+#include "NetworkHeader.hh"
+#include "Vector.hh"
+#include "flit_d.hh"
+#include "NetDest.hh"
+#include "power_router_init.hh"
+
+class GarnetNetwork_d;
+class NetworkLink_d;
+class CreditLink_d;
+class InputUnit_d;
+class OutputUnit_d;
+class RoutingUnit_d;
+class VCallocator_d;
+class SWallocator_d;
+class Switch_d;
+
+class Router_d {
+public:
+ Router_d(int id, GarnetNetwork_d *network_ptr);
+
+ ~Router_d();
+
+ void init();
+ void addInPort(NetworkLink_d *link, CreditLink_d *credit_link);
+ void addOutPort(NetworkLink_d *link, const NetDest& routing_table_entry, int link_weight, CreditLink_d *credit_link);
+ int get_num_vcs() { return m_num_vcs; }
+ int get_vc_per_vnet() {return m_vc_per_vnet; }
+ int get_num_inports() { return m_input_unit.size(); }
+ int get_num_outports() { return m_output_unit.size(); }
+ void printConfig(ostream& out);
+ int get_id() { return m_id; }
+ GarnetNetwork_d* get_net_ptr() { return m_network_ptr; }
+
+ Vector<InputUnit_d *>& get_inputUnit_ref() { return m_input_unit; }
+ Vector<OutputUnit_d *>& get_outputUnit_ref() { return m_output_unit; }
+
+ void update_sw_winner(int inport, flit_d *t_flit);
+ void update_incredit(int in_port, int in_vc, int credit);
+ void route_req(flit_d *t_flit, InputUnit_d* in_unit, int invc);
+ void vcarb_req();
+ void swarb_req();
+
+ void power_router_initialize(power_router *router, power_router_info *info);
+ double calculate_power();
+ double calculate_offline_power(power_router*, power_router_info*);
+ void calculate_performance_numbers();
+
+private:
+ int m_id;
+ int m_virtual_networks, m_num_vcs, m_vc_per_vnet;
+ GarnetNetwork_d *m_network_ptr;
+ int m_flit_width;
+
+ double buf_read_count, buf_write_count, crossbar_count, vc_local_arbit_count, vc_global_arbit_count, sw_local_arbit_count, sw_global_arbit_count;
+
+ Vector<InputUnit_d *> m_input_unit;
+ Vector<OutputUnit_d *> m_output_unit;
+ RoutingUnit_d *m_routing_unit;
+ VCallocator_d *m_vc_alloc;
+ SWallocator_d *m_sw_alloc;
+ Switch_d *m_switch;
+};
+
+#endif
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/RoutingUnit_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/RoutingUnit_d.cc
new file mode 100644
index 000000000..ccced1d05
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/RoutingUnit_d.cc
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Routingunit_d.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "RoutingUnit_d.hh"
+#include "Router_d.hh"
+#include "InputUnit_d.hh"
+#include "NetworkMessage.hh"
+
+RoutingUnit_d::RoutingUnit_d(Router_d *router)
+{
+ m_router = router;
+ m_routing_table.clear();
+ m_weight_table.clear();
+}
+
+void RoutingUnit_d::addRoute(const NetDest& routing_table_entry)
+{
+ m_routing_table.insertAtBottom(routing_table_entry);
+}
+
+void RoutingUnit_d::addWeight(int link_weight)
+{
+ m_weight_table.insertAtBottom(link_weight);
+}
+
+void RoutingUnit_d::RC_stage(flit_d *t_flit, InputUnit_d *in_unit, int invc)
+{
+ int outport = routeCompute(t_flit);
+ in_unit->updateRoute(invc, outport);
+ t_flit->advance_stage(VA_);
+ m_router->vcarb_req();
+}
+
+int RoutingUnit_d::routeCompute(flit_d *t_flit)
+{
+ MsgPtr msg_ptr = t_flit->get_msg_ptr();
+ NetworkMessage* net_msg_ptr = NULL;
+ net_msg_ptr = dynamic_cast<NetworkMessage*>(msg_ptr.ref());
+ NetDest msg_destination = net_msg_ptr->getInternalDestination();
+
+ int output_link = -1;
+ int min_weight = INFINITE_;
+
+ for(int link = 0; link < m_routing_table.size(); link++)
+ {
+ if (msg_destination.intersectionIsNotEmpty(m_routing_table[link]))
+ {
+ if(m_weight_table[link] >= min_weight)
+ continue;
+ output_link = link;
+ min_weight = m_weight_table[link];
+ }
+ }
+ if(output_link == -1)
+ {
+ ERROR_MSG("Fatal Error:: No Route exists from this Router.");
+ exit(0);
+ }
+ return output_link;
+
+}
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/RoutingUnit_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/RoutingUnit_d.hh
new file mode 100644
index 000000000..f6eb9f906
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/RoutingUnit_d.hh
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Routerunit_d.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef ROUTING_UNIT_D_H
+#define ROUTING_UNIT_D_H
+
+#include "NetworkHeader.hh"
+#include "Consumer.hh"
+#include "flit_d.hh"
+#include "NetDest.hh"
+
+class InputUnit_d;
+class Router_d;
+
+class RoutingUnit_d {
+public:
+ RoutingUnit_d(Router_d *router);
+ void addRoute(const NetDest& routing_table_entry);
+ int routeCompute(flit_d *t_flit);
+ void addWeight(int link_weight);
+ void RC_stage(flit_d *t_flit, InputUnit_d *in_unit, int invc);
+
+private:
+ Router_d *m_router;
+ Vector<NetDest > m_routing_table;
+ Vector<int > m_weight_table;
+};
+
+#endif
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/SWallocator_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/SWallocator_d.cc
new file mode 100644
index 000000000..7f6507047
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/SWallocator_d.cc
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * SWallocator_d.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "SWallocator_d.hh"
+#include "Router_d.hh"
+#include "InputUnit_d.hh"
+#include "OutputUnit_d.hh"
+#include "GarnetNetwork_d.hh"
+
+SWallocator_d::SWallocator_d(Router_d *router)
+{
+ m_router = router;
+ m_num_vcs = m_router->get_num_vcs();
+ m_vc_per_vnet = m_router->get_vc_per_vnet();
+
+ m_local_arbiter_activity = 0;
+ m_global_arbiter_activity = 0;
+}
+
+void SWallocator_d::init()
+{
+ m_input_unit = m_router->get_inputUnit_ref();
+ m_output_unit = m_router->get_outputUnit_ref();
+
+ m_num_inports = m_router->get_num_inports();
+ m_num_outports = m_router->get_num_outports();
+ m_round_robin_outport.setSize(m_num_outports);
+ m_round_robin_inport.setSize(m_num_inports);
+ m_port_req.setSize(m_num_outports);
+ m_vc_winners.setSize(m_num_outports);
+
+ for(int i = 0; i < m_num_inports; i++)
+ {
+ m_round_robin_inport[i] = 0;
+ }
+
+ for(int i = 0; i < m_num_outports; i++)
+ {
+ m_port_req[i].setSize(m_num_inports);
+ m_vc_winners[i].setSize(m_num_inports);
+
+ m_round_robin_outport[i] = 0;
+
+ for(int j = 0; j < m_num_inports; j++)
+ {
+ m_port_req[i][j] = false; // [outport][inport]
+ }
+ }
+}
+
+void SWallocator_d::wakeup()
+{
+ arbitrate_inports(); // First stage of allocation
+ arbitrate_outports(); // Second stage of allocation
+
+ clear_request_vector();
+ check_for_wakeup();
+
+}
+
+void SWallocator_d::arbitrate_inports()
+{
+ // First I will do round robin arbitration on a set of input vc requests
+ for(int inport = 0; inport < m_num_inports; inport++)
+ {
+ int invc = m_round_robin_inport[inport];
+ m_round_robin_inport[inport]++;
+
+ if(m_round_robin_inport[inport] >= m_num_vcs)
+ m_round_robin_inport[inport] = 0;
+ for(int j = 0; j < m_num_vcs; j++)
+ {
+ invc++;
+ if(invc >= m_num_vcs)
+ invc = 0;
+ if(m_input_unit[inport]->need_stage(invc, ACTIVE_, SA_) && m_input_unit[inport]->has_credits(invc))
+ {
+ if(is_candidate_inport(inport, invc))
+ {
+ int outport = m_input_unit[inport]->get_route(invc);
+ m_local_arbiter_activity++;
+ m_port_req[outport][inport] = true;
+ m_vc_winners[outport][inport]= invc;
+ break; // got one vc winner for this port
+ }
+ }
+ }
+ }
+}
+
+bool SWallocator_d::is_candidate_inport(int inport, int invc)
+{
+ int outport = m_input_unit[inport]->get_route(invc);
+ int t_enqueue_time = m_input_unit[inport]->get_enqueue_time(invc);
+ int t_vnet = get_vnet(invc);
+ int vc_base = t_vnet*m_vc_per_vnet;
+ if((m_router->get_net_ptr())->isVNetOrdered(t_vnet))
+ {
+ for(int vc_offset = 0; vc_offset < m_vc_per_vnet; vc_offset++)
+ {
+ int temp_vc = vc_base + vc_offset;
+ if(m_input_unit[inport]->need_stage(temp_vc, ACTIVE_, SA_) && (m_input_unit[inport]->get_route(temp_vc) == outport) && (m_input_unit[inport]->get_enqueue_time(temp_vc) < t_enqueue_time))
+ {
+ return false;
+ break;
+ }
+ }
+ }
+ return true;
+}
+
+
+void SWallocator_d::arbitrate_outports()
+{
+// now I have a set of input vc requests for output vcs. Again do round robin arbitration on these requests
+ for(int outport = 0; outport < m_num_outports; outport++)
+ {
+ int in_port = m_round_robin_outport[outport];
+ m_round_robin_outport[outport]++;
+
+ if(m_round_robin_outport[outport] >= m_num_outports)
+ m_round_robin_outport[outport] = 0;
+
+ for(int inport = 0; inport < m_num_inports; inport++)
+ {
+ in_port++;
+ if(in_port >= m_num_inports)
+ in_port = 0;
+ if(m_port_req[outport][in_port]) // This Inport has a request this cycle for this port
+ {
+ m_port_req[outport][in_port] = false;
+ int invc = m_vc_winners[outport][in_port];
+ int outvc = m_input_unit[in_port]->get_outvc(invc);
+ flit_d *t_flit = m_input_unit[in_port]->getTopFlit(invc); // removes flit from Input Unit
+ t_flit->advance_stage(ST_);
+ t_flit->set_vc(outvc);
+ t_flit->set_outport(outport);
+ t_flit->set_time(g_eventQueue_ptr->getTime() + 1);
+ m_output_unit[outport]->decrement_credit(outvc);
+ m_router->update_sw_winner(in_port, t_flit);
+ m_global_arbiter_activity++;
+
+ if((t_flit->get_type() == TAIL_) || t_flit->get_type() == HEAD_TAIL_)
+ {
+ m_input_unit[in_port]->increment_credit(invc, true); // Send a credit back along with the information that this VC is not idle
+ assert(m_input_unit[in_port]->isReady(invc) == false); // This Input VC should now be empty
+
+ m_input_unit[in_port]->set_vc_state(IDLE_, invc);
+ m_input_unit[in_port]->set_enqueue_time(invc, INFINITE_);
+ }
+ else
+ {
+ m_input_unit[in_port]->increment_credit(invc, false); // Send a credit back but do not indicate that the VC is idle
+ }
+ break; // got a in request for this outport
+ }
+ }
+ }
+}
+
+void SWallocator_d::check_for_wakeup()
+{
+ for(int i = 0; i < m_num_inports; i++)
+ {
+ for(int j = 0; j < m_num_vcs; j++)
+ {
+ if(m_input_unit[i]->need_stage_nextcycle(j, ACTIVE_, SA_))
+ {
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ return;
+ }
+ }
+ }
+}
+
+int SWallocator_d::get_vnet(int invc)
+{
+ for(int i = 0; i < NUMBER_OF_VIRTUAL_NETWORKS; i++)
+ {
+ if(invc >= (i*m_vc_per_vnet) && invc < ((i+1)*m_vc_per_vnet))
+ {
+ return i;
+ }
+ }
+ ERROR_MSG("Could not determine vc");
+ return -1;
+}
+
+void SWallocator_d::clear_request_vector()
+{
+ for(int i = 0; i < m_num_outports; i++)
+ {
+ for(int j = 0; j < m_num_inports; j++)
+ {
+ m_port_req[i][j] = false;
+ }
+ }
+}
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/SWallocator_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/SWallocator_d.hh
new file mode 100644
index 000000000..ed8389650
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/SWallocator_d.hh
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * SWallocator_d.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef SW_ALLOCATOR_D_H
+#define SW_ALLOCATOR_D_H
+
+#include "NetworkHeader.hh"
+#include "Consumer.hh"
+
+class Router_d;
+class InputUnit_d;
+class OutputUnit_d;
+
+class SWallocator_d : public Consumer {
+public:
+ SWallocator_d(Router_d *router);
+ void wakeup();
+ void init();
+ void clear_request_vector();
+ void check_for_wakeup();
+ int get_vnet (int invc);
+ void print(ostream& out) const {};
+ void arbitrate_inports();
+ void arbitrate_outports();
+ bool is_candidate_inport(int inport, int invc);
+ inline double get_local_arbit_count()
+ {
+ return m_local_arbiter_activity;
+ }
+ inline double get_global_arbit_count()
+ {
+ return m_global_arbiter_activity;
+ }
+
+private:
+ int m_num_inports, m_num_outports;
+ int m_num_vcs, m_vc_per_vnet;
+
+ double m_local_arbiter_activity, m_global_arbiter_activity;
+
+ Router_d *m_router;
+ Vector<int > m_round_robin_outport;
+ Vector<int > m_round_robin_inport;
+ Vector<Vector<bool > > m_port_req;
+ Vector<Vector<int > > m_vc_winners; // a list for each outport
+ Vector<InputUnit_d *> m_input_unit;
+ Vector<OutputUnit_d *> m_output_unit;
+};
+
+#endif
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/Switch_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/Switch_d.cc
new file mode 100644
index 000000000..be5469696
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/Switch_d.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Switch_d.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "Switch_d.hh"
+#include "Router_d.hh"
+#include "OutputUnit_d.hh"
+
+Switch_d::Switch_d(Router_d *router)
+{
+ m_router = router;
+ m_num_vcs = m_router->get_num_vcs();
+ m_crossbar_activity = 0;
+}
+
+Switch_d::~Switch_d()
+{
+ m_switch_buffer.deletePointers();
+}
+
+void Switch_d::init()
+{
+ m_output_unit = m_router->get_outputUnit_ref();
+
+ m_num_inports = m_router->get_num_inports();
+ m_switch_buffer.setSize(m_num_inports);
+ for(int i = 0; i < m_num_inports; i++)
+ {
+ m_switch_buffer[i] = new flitBuffer_d();
+ }
+}
+
+void Switch_d::wakeup()
+{
+ DEBUG_MSG(NETWORK_COMP, HighPrio, "Switch woke up");
+ DEBUG_EXPR(NETWORK_COMP, HighPrio, g_eventQueue_ptr->getTime());
+
+ for(int inport = 0; inport < m_num_inports; inport++)
+ {
+ if(!m_switch_buffer[inport]->isReady())
+ continue;
+ flit_d *t_flit = m_switch_buffer[inport]->peekTopFlit();
+ if(t_flit->is_stage(ST_))
+ {
+ int outport = t_flit->get_outport();
+ t_flit->advance_stage(LT_);
+ t_flit->set_time(g_eventQueue_ptr->getTime() + 1);
+ m_output_unit[outport]->insert_flit(t_flit); // This will take care of waking up the Network Link
+ m_switch_buffer[inport]->getTopFlit();
+ m_crossbar_activity++;
+ }
+ }
+ check_for_wakeup();
+}
+
+void Switch_d::check_for_wakeup()
+{
+ for(int inport = 0; inport < m_num_inports; inport++)
+ {
+ if(m_switch_buffer[inport]->isReadyForNext())
+ {
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ break;
+ }
+ }
+}
+
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/Switch_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/Switch_d.hh
new file mode 100644
index 000000000..63f3995f5
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/Switch_d.hh
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Switch_d.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef SWITCH_D_H
+#define SWITCH_D_H
+
+#include "NetworkHeader.hh"
+#include "Consumer.hh"
+#include "flitBuffer_d.hh"
+
+class Router_d;
+class OutputUnit_d;
+
+class Switch_d : public Consumer {
+public:
+ Switch_d(Router_d *router);
+ ~Switch_d();
+ void wakeup();
+ void init();
+ void check_for_wakeup();
+ void print(ostream& out) const {};
+ inline void update_sw_winner(int inport, flit_d *t_flit)
+ {
+ m_switch_buffer[inport]->insert(t_flit);
+ }
+ inline double get_crossbar_count()
+ {
+ return m_crossbar_activity;
+ }
+
+
+private:
+ int m_num_vcs;
+ int m_num_inports;
+ double m_crossbar_activity;
+ Router_d *m_router;
+ Vector<flitBuffer_d *> m_switch_buffer;
+ Vector<OutputUnit_d *> m_output_unit;
+};
+
+#endif
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/VCallocator_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/VCallocator_d.cc
new file mode 100644
index 000000000..b444ebc02
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/VCallocator_d.cc
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * VCallocator_d.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "NetworkConfig.hh"
+#include "VCallocator_d.hh"
+#include "Router_d.hh"
+#include "InputUnit_d.hh"
+#include "OutputUnit_d.hh"
+#include "GarnetNetwork_d.hh"
+
+VCallocator_d::VCallocator_d(Router_d *router)
+{
+ m_router = router;
+ m_num_vcs = m_router->get_num_vcs();
+ m_vc_per_vnet = m_router->get_vc_per_vnet();
+ m_local_arbiter_activity = 0;
+ m_global_arbiter_activity = 0;
+}
+
+void VCallocator_d::init()
+{
+ m_input_unit = m_router->get_inputUnit_ref();
+ m_output_unit = m_router->get_outputUnit_ref();
+
+ m_num_inports = m_router->get_num_inports();
+ m_num_outports = m_router->get_num_outports();
+ m_round_robin_invc.setSize(m_num_inports);
+ m_round_robin_outvc.setSize(m_num_outports);
+ m_outvc_req.setSize(m_num_outports);
+ m_outvc_is_req.setSize(m_num_outports);
+
+ for(int i = 0; i < m_num_inports; i++)
+ {
+ m_round_robin_invc[i].setSize(m_num_vcs);
+
+ for(int j = 0; j < m_num_vcs; j++)
+ {
+ m_round_robin_invc[i][j] = 0;
+ }
+ }
+
+ for(int i = 0; i < m_num_outports; i++)
+ {
+ m_round_robin_outvc[i].setSize(m_num_vcs);
+ m_outvc_req[i].setSize(m_num_vcs);
+ m_outvc_is_req[i].setSize(m_num_vcs);
+
+ for(int j = 0; j < m_num_vcs; j++)
+ {
+ m_round_robin_outvc[i][j].first = 0;
+ m_round_robin_outvc[i][j].second = 0;
+ m_outvc_is_req[i][j] = false;
+
+ m_outvc_req[i][j].setSize(m_num_inports);
+
+ for(int k = 0; k < m_num_inports; k++)
+ {
+ m_outvc_req[i][j][k].setSize(m_num_vcs);
+ for(int l = 0; l < m_num_vcs; l++)
+ {
+ m_outvc_req[i][j][k][l] = false;
+ }
+ }
+ }
+ }
+}
+
+void VCallocator_d::clear_request_vector()
+{
+ for(int i = 0; i < m_num_outports; i++)
+ {
+ for(int j = 0; j < m_num_vcs; j++)
+ {
+ if(!m_outvc_is_req[i][j])
+ continue;
+ m_outvc_is_req[i][j] = false;
+ for(int k = 0; k < m_num_inports; k++)
+ {
+ for(int l = 0; l < m_num_vcs; l++)
+ {
+ m_outvc_req[i][j][k][l] = false;
+ }
+ }
+ }
+ }
+}
+
+void VCallocator_d::wakeup()
+{
+ arbitrate_invcs(); // First stage of allocation
+ arbitrate_outvcs(); // Second stage of allocation
+
+ clear_request_vector();
+ check_for_wakeup();
+}
+
+bool VCallocator_d::is_invc_candidate(int inport_iter, int invc_iter)
+{
+ int outport = m_input_unit[inport_iter]->get_route(invc_iter);
+ int vnet = get_vnet(invc_iter);
+ int t_enqueue_time = m_input_unit[inport_iter]->get_enqueue_time(invc_iter);
+
+ int invc_base = vnet*m_vc_per_vnet;
+
+ if((m_router->get_net_ptr())->isVNetOrdered(vnet))
+ {
+ for(int vc_offset = 0; vc_offset < m_vc_per_vnet; vc_offset++)
+ {
+ int temp_vc = invc_base + vc_offset;
+ if(m_input_unit[inport_iter]->need_stage(temp_vc, VC_AB_, VA_) && (m_input_unit[inport_iter]->get_route(temp_vc) == outport) && (m_input_unit[inport_iter]->get_enqueue_time(temp_vc) < t_enqueue_time))
+ {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+void VCallocator_d::select_outvc(int inport_iter, int invc_iter)
+{
+ int outport = m_input_unit[inport_iter]->get_route(invc_iter);
+ int vnet = get_vnet(invc_iter);
+ int outvc_base = vnet*m_vc_per_vnet;
+ int num_vcs_per_vnet = m_vc_per_vnet;
+
+ int outvc_offset = m_round_robin_invc[inport_iter][invc_iter];
+ m_round_robin_invc[inport_iter][invc_iter]++;
+
+ if(m_round_robin_invc[inport_iter][invc_iter] >= num_vcs_per_vnet)
+ m_round_robin_invc[inport_iter][invc_iter] = 0;
+
+ for(int outvc_offset_iter = 0; outvc_offset_iter < num_vcs_per_vnet; outvc_offset_iter++)
+ {
+ outvc_offset++;
+ if(outvc_offset >= num_vcs_per_vnet)
+ outvc_offset = 0;
+ int outvc = outvc_base + outvc_offset;
+ if(m_output_unit[outport]->is_vc_idle(outvc))
+ {
+ m_local_arbiter_activity++;
+ m_outvc_req[outport][outvc][inport_iter][invc_iter] = true;
+ if(!m_outvc_is_req[outport][outvc])
+ m_outvc_is_req[outport][outvc] = true;
+ return; // out vc acquired
+ }
+ }
+}
+
+void VCallocator_d::arbitrate_invcs()
+{
+ for(int inport_iter = 0; inport_iter < m_num_inports; inport_iter++)
+ {
+ for(int invc_iter = 0; invc_iter < m_num_vcs; invc_iter++)
+ {
+ if(m_input_unit[inport_iter]->need_stage(invc_iter, VC_AB_, VA_))
+ {
+ if(!is_invc_candidate(inport_iter, invc_iter))
+ continue;
+
+ select_outvc(inport_iter, invc_iter);
+ }
+ }
+ }
+}
+
+void VCallocator_d::arbitrate_outvcs()
+{
+ for(int outport_iter = 0; outport_iter < m_num_outports; outport_iter++)
+ {
+ for(int outvc_iter = 0; outvc_iter < m_num_vcs; outvc_iter++)
+ {
+ if(!m_outvc_is_req[outport_iter][outvc_iter]) // No requests for this outvc in this cycle
+ continue;
+
+ int inport = m_round_robin_outvc[outport_iter][outvc_iter].first;
+ int invc_offset = m_round_robin_outvc[outport_iter][outvc_iter].second;
+ int vnet = get_vnet(outvc_iter);
+ int invc_base = vnet*m_vc_per_vnet;
+ int num_vcs_per_vnet = m_vc_per_vnet;
+
+ m_round_robin_outvc[outport_iter][outvc_iter].second++;
+ if(m_round_robin_outvc[outport_iter][outvc_iter].second >= num_vcs_per_vnet)
+ {
+ m_round_robin_outvc[outport_iter][outvc_iter].second = 0;
+ m_round_robin_outvc[outport_iter][outvc_iter].first++;
+ if(m_round_robin_outvc[outport_iter][outvc_iter].first >= m_num_inports)
+ m_round_robin_outvc[outport_iter][outvc_iter].first = 0;
+ }
+ for(int in_iter = 0; in_iter < m_num_inports*num_vcs_per_vnet; in_iter++)
+ {
+ invc_offset++;
+ if(invc_offset >= num_vcs_per_vnet)
+ {
+ invc_offset = 0;
+ inport++;
+ if(inport >= m_num_inports)
+ inport = 0;
+ }
+ int invc = invc_base + invc_offset;
+ if(m_outvc_req[outport_iter][outvc_iter][inport][invc])
+ {
+ m_global_arbiter_activity++;
+ m_input_unit[inport]->grant_vc(invc, outvc_iter);
+ m_output_unit[outport_iter]->update_vc(outvc_iter, inport, invc);
+ m_router->swarb_req();
+ break;
+ }
+ }
+ }
+ }
+}
+
+int VCallocator_d::get_vnet(int invc)
+{
+ for(int i = 0; i < NUMBER_OF_VIRTUAL_NETWORKS; i++)
+ {
+ if(invc >= (i*m_vc_per_vnet) && invc < ((i+1)*m_vc_per_vnet))
+ {
+ return i;
+ }
+ }
+ ERROR_MSG("Could not determine vc");
+ return -1;
+}
+
+void VCallocator_d::check_for_wakeup()
+{
+ for(int i = 0; i < m_num_inports; i++)
+ {
+ for(int j = 0; j < m_num_vcs; j++)
+ {
+ if(m_input_unit[i]->need_stage_nextcycle(j, VC_AB_, VA_))
+ {
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ return;
+ }
+ }
+ }
+}
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/VCallocator_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/VCallocator_d.hh
new file mode 100644
index 000000000..fc1bf1340
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/VCallocator_d.hh
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * VCallocator_d.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef VC_ALLOCATOR_D_H
+#define VC_ALLOCATOR_D_H
+
+#include "NetworkHeader.hh"
+#include "Consumer.hh"
+
+class Router_d;
+class InputUnit_d;
+class OutputUnit_d;
+
+class VCallocator_d : public Consumer {
+public:
+ VCallocator_d(Router_d *router);
+ void init();
+ void wakeup();
+ void check_for_wakeup();
+ void clear_request_vector();
+ int get_vnet(int invc);
+ void print(ostream& out) const {};
+ void arbitrate_invcs();
+ void arbitrate_outvcs();
+ bool is_invc_candidate(int inport_iter, int invc_iter);
+ void select_outvc(int inport_iter, int invc_iter);
+ inline double get_local_arbit_count()
+ {
+ return m_local_arbiter_activity;
+ }
+ inline double get_global_arbit_count()
+ {
+ return m_global_arbiter_activity;
+ }
+
+private:
+ int m_num_vcs, m_vc_per_vnet;
+ int m_num_inports;
+ int m_num_outports;
+
+ double m_local_arbiter_activity, m_global_arbiter_activity;
+
+ Router_d *m_router;
+ Vector<Vector <int > > m_round_robin_invc; // First stage of arbitration where all vcs select an output vc to content for
+ Vector<Vector <pair<int, int> > > m_round_robin_outvc; // Arbiter for every output vc
+ Vector<Vector<Vector<Vector<bool > > > > m_outvc_req; // [outport][outvc][inpotr][invc]. set true in the first phase of allocation
+ Vector<Vector<bool > > m_outvc_is_req;
+
+ Vector<InputUnit_d *> m_input_unit ;
+ Vector<OutputUnit_d *> m_output_unit ;
+
+};
+
+#endif
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/VirtualChannel_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/VirtualChannel_d.cc
new file mode 100644
index 000000000..60c6bef2a
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/VirtualChannel_d.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * VirtualChannel_d.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "VirtualChannel_d.hh"
+
+VirtualChannel_d::VirtualChannel_d(int id)
+{
+ m_id = id;
+ m_input_buffer = new flitBuffer_d();
+ m_vc_state.first = IDLE_;
+ m_vc_state.second = g_eventQueue_ptr->getTime();
+ m_enqueue_time = INFINITE_;
+}
+
+VirtualChannel_d::~VirtualChannel_d()
+{
+ delete m_input_buffer;
+}
+
+void VirtualChannel_d::set_outport(int outport)
+{
+ route = outport;
+}
+
+void VirtualChannel_d::grant_vc(int out_vc)
+{
+ m_output_vc = out_vc;
+ m_vc_state.first = ACTIVE_;
+ m_vc_state.second = g_eventQueue_ptr->getTime() + 1;
+ flit_d *t_flit = m_input_buffer->peekTopFlit();
+ t_flit->advance_stage(SA_);
+}
+
+bool VirtualChannel_d::need_stage(VC_state_type state, flit_stage stage)
+{
+ if((m_vc_state.first == state) && (g_eventQueue_ptr->getTime() >= m_vc_state.second))
+ {
+ if(m_input_buffer->isReady())
+ {
+ flit_d *t_flit = m_input_buffer->peekTopFlit();
+ return(t_flit->is_stage(stage)) ;
+ }
+ }
+ return false;
+
+}
+
+bool VirtualChannel_d::need_stage_nextcycle(VC_state_type state, flit_stage stage)
+{
+ if((m_vc_state.first == state) && ((g_eventQueue_ptr->getTime()+1) >= m_vc_state.second))
+ {
+ if(m_input_buffer->isReadyForNext())
+ {
+ flit_d *t_flit = m_input_buffer->peekTopFlit();
+ return(t_flit->is_next_stage(stage)) ;
+ }
+ }
+ return false;
+}
+
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/VirtualChannel_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/VirtualChannel_d.hh
new file mode 100644
index 000000000..111837122
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/VirtualChannel_d.hh
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * VirtualChannel_d.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef VIRTUAL_CHANNEL_D_H
+#define VIRTUAL_CHANNEL_D_H
+
+#include "NetworkHeader.hh"
+#include "flitBuffer_d.hh"
+
+class VirtualChannel_d {
+public:
+ VirtualChannel_d(int id);
+ ~VirtualChannel_d();
+
+ bool need_stage(VC_state_type state, flit_stage stage);
+ bool need_stage_nextcycle(VC_state_type state, flit_stage stage);
+ void set_outport(int outport);
+ void grant_vc(int out_vc);
+
+ inline Time get_enqueue_time()
+ {
+ return m_enqueue_time;
+ }
+
+ inline void set_enqueue_time(Time time)
+ {
+ m_enqueue_time = time;
+ }
+
+ inline VC_state_type get_state()
+ {
+ return m_vc_state.first;
+ }
+ inline int get_outvc()
+ {
+ return m_output_vc;
+ }
+ inline bool isReady()
+ {
+ return m_input_buffer->isReady();
+ }
+ inline bool has_credits()
+ {
+ return (m_credit_count > 0);
+ }
+ inline int get_route()
+ {
+ return route;
+ }
+ inline void update_credit(int credit)
+ {
+ m_credit_count = credit;
+ }
+ inline void increment_credit()
+ {
+ m_credit_count++;
+ }
+ inline void insertFlit(flit_d *t_flit)
+ {
+ m_input_buffer->insert(t_flit);
+ }
+ inline void set_state(VC_state_type m_state)
+ {
+ m_vc_state.first = m_state;
+ m_vc_state.second = g_eventQueue_ptr->getTime() + 1;
+ }
+
+ inline flit_d* peekTopFlit()
+ {
+ return m_input_buffer->peekTopFlit();
+ }
+
+ inline flit_d* getTopFlit()
+ {
+ return m_input_buffer->getTopFlit();
+ }
+
+private:
+ int m_id;
+ flitBuffer_d *m_input_buffer;
+ pair<VC_state_type, Time> m_vc_state; // I/R/V/A/C
+ int route;
+ Time m_enqueue_time;
+ int m_output_vc;
+ int m_credit_count;
+};
+#endif
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/flitBuffer_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/flitBuffer_d.cc
new file mode 100644
index 000000000..e5e7226a2
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/flitBuffer_d.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * flitBuffer_d.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "flitBuffer_d.hh"
+
+flitBuffer_d::flitBuffer_d()
+{
+ max_size = INFINITE_;
+}
+
+flitBuffer_d::flitBuffer_d(int maximum_size)
+{
+ max_size = maximum_size;
+}
+
+bool flitBuffer_d::isEmpty()
+{
+ return (m_buffer.size() == 0);
+}
+
+bool flitBuffer_d::isReady()
+{
+ if(m_buffer.size() != 0 )
+ {
+ flit_d *t_flit = m_buffer.peekMin();
+ if(t_flit->get_time() <= g_eventQueue_ptr->getTime())
+ return true;
+ }
+ return false;
+}
+
+bool flitBuffer_d::isReadyForNext()
+{
+ if(m_buffer.size() != 0 )
+ {
+ flit_d *t_flit = m_buffer.peekMin();
+ if(t_flit->get_time() <= (g_eventQueue_ptr->getTime() + 1))
+ return true;
+ }
+ return false;
+}
+
+void flitBuffer_d::print(ostream& out) const
+{
+ out << "[flitBuffer: ";
+ out << m_buffer.size() << "] " << endl;
+}
+
+bool flitBuffer_d::isFull()
+{
+ return (m_buffer.size() >= max_size);
+}
+void flitBuffer_d::setMaxSize(int maximum)
+{
+ max_size = maximum;
+}
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/flitBuffer_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/flitBuffer_d.hh
new file mode 100644
index 000000000..7e7d07a5a
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/flitBuffer_d.hh
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * flitBuffer_d.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef FLIT_BUFFER_D_H
+#define FLIT_BUFFER_D_H
+
+#include "NetworkHeader.hh"
+#include "PrioHeap.hh"
+#include "flit_d.hh"
+
+class flitBuffer_d {
+public:
+ flitBuffer_d();
+ flitBuffer_d(int maximum_size);
+
+ bool isReady();
+ bool isReadyForNext();
+ bool isEmpty();
+ void print(ostream& out) const;
+ bool isFull();
+ void setMaxSize(int maximum);
+
+ inline flit_d* getTopFlit()
+ {
+ return m_buffer.extractMin();
+ }
+ inline flit_d* peekTopFlit()
+ {
+ return m_buffer.peekMin();
+ }
+ inline void insert(flit_d *flt)
+ {
+ m_buffer.insert(flt);
+ }
+ /**********Data Members*********/
+private:
+ PrioHeap <flit_d *> m_buffer;
+ int size, max_size;
+};
+
+ostream& operator<<(ostream& out, const flitBuffer_d& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const flitBuffer_d& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif
+
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/flit_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/flit_d.cc
new file mode 100644
index 000000000..e049c5537
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/flit_d.cc
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * flit_d.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "flit_d.hh"
+
+flit_d::flit_d(int id, int vc, int vnet, int size, MsgPtr msg_ptr)
+{
+ m_size = size;
+ m_msg_ptr = msg_ptr;
+ m_enqueue_time = g_eventQueue_ptr->getTime();
+ m_time = g_eventQueue_ptr->getTime();
+ m_id = id;
+ m_vnet = vnet;
+ m_vc = vc;
+ m_stage.first = I_;
+ m_stage.second = m_time;
+
+ if(size == 1)
+ {
+ m_type = HEAD_TAIL_;
+ return;
+ }
+ if(id == 0)
+ m_type = HEAD_;
+ else if(id == (size - 1))
+ m_type = TAIL_;
+ else
+ m_type = BODY_;
+}
+
+flit_d::flit_d(int vc, bool is_free_signal)
+{
+ m_id = 0;
+ m_vc = vc;
+ m_is_free_signal = is_free_signal;
+ m_time = g_eventQueue_ptr->getTime();
+}
+/*
+int flit_d::get_size()
+{
+ return m_size;
+}
+Time flit_d::get_enqueue_time()
+{
+ return m_enqueue_time;
+}
+int flit_d::get_id()
+{
+ return m_id;
+}
+Time flit_d::get_time()
+{
+ return m_time;
+}
+void flit_d::set_time(Time time)
+{
+ m_time = time;
+}
+int flit_d::get_vnet()
+{
+ return m_vnet;
+}
+int flit_d::get_vc()
+{
+ return m_vc;
+}
+void flit_d::set_vc(int vc)
+{
+ m_vc = vc;
+}
+MsgPtr& flit_d::get_msg_ptr()
+{
+ return m_msg_ptr;
+}
+flit_type flit_d::get_type()
+{
+ return m_type;
+}
+bool flit_d::is_stage(flit_stage t_stage)
+{
+ return ((m_stage.first == t_stage) && (g_eventQueue_ptr->getTime() >= m_stage.second));
+}
+bool flit_d::is_next_stage(flit_stage t_stage)
+{
+ return ((m_stage.first == t_stage) && ((g_eventQueue_ptr->getTime()+1) >= m_stage.second));
+}
+void flit_d::advance_stage(flit_stage t_stage)
+{
+ m_stage.first = t_stage;
+ m_stage.second = g_eventQueue_ptr->getTime() + 1;
+}
+*/
+void flit_d::print(ostream& out) const
+{
+ out << "[flit:: ";
+ out << "Id=" << m_id << " ";
+ out << "Type=" << m_type << " ";
+ out << "Vnet=" << m_vnet << " ";
+ out << "VC=" << m_vc << " ";
+ out << "Enqueue Time=" << m_enqueue_time << " ";
+ out << "]";
+}
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/flit_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/flit_d.hh
new file mode 100644
index 000000000..9a47e964f
--- /dev/null
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/flit_d.hh
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * flit_d.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+
+#ifndef FLIT_D_H
+#define FLIT_D_H
+
+#include "NetworkHeader.hh"
+#include "Message.hh"
+
+class flit_d {
+public:
+ flit_d(int id, int vc, int vnet, int size, MsgPtr msg_ptr);
+ flit_d(int vc, bool is_free_signal);
+ void set_outport(int port) { m_outport = port; }
+ int get_outport() {return m_outport; }
+ void print(ostream& out) const;
+ bool is_free_signal()
+ {
+ return m_is_free_signal;
+ }
+
+ inline int get_size()
+ {
+ return m_size;
+ }
+ inline Time get_enqueue_time()
+ {
+ return m_enqueue_time;
+ }
+ inline int get_id()
+ {
+ return m_id;
+ }
+ inline Time get_time()
+ {
+ return m_time;
+ }
+ inline void set_time(Time time)
+ {
+ m_time = time;
+ }
+ inline int get_vnet()
+ {
+ return m_vnet;
+ }
+ inline int get_vc()
+ {
+ return m_vc;
+ }
+ inline void set_vc(int vc)
+ {
+ m_vc = vc;
+ }
+ inline MsgPtr& get_msg_ptr()
+ {
+ return m_msg_ptr;
+ }
+ inline flit_type get_type()
+ {
+ return m_type;
+ }
+ inline bool is_stage(flit_stage t_stage)
+ {
+ return ((m_stage.first == t_stage) && (g_eventQueue_ptr->getTime() >= m_stage.second));
+ }
+ inline bool is_next_stage(flit_stage t_stage)
+ {
+ return ((m_stage.first == t_stage) && ((g_eventQueue_ptr->getTime()+1) >= m_stage.second));
+ }
+ inline void advance_stage(flit_stage t_stage)
+ {
+ m_stage.first = t_stage;
+ m_stage.second = g_eventQueue_ptr->getTime() + 1;
+ }
+ inline pair<flit_stage, Time> get_stage()
+ {
+ return m_stage;
+ }
+ inline void set_delay(int delay)
+ {
+ src_delay = delay;
+ }
+
+ inline int get_delay()
+ {
+ return src_delay;
+ }
+
+
+private:
+ /************Data Members*************/
+ int m_id;
+ int m_vnet;
+ int m_vc;
+ int m_size;
+ bool m_is_free_signal;
+ Time m_enqueue_time, m_time;
+ flit_type m_type;
+ MsgPtr m_msg_ptr;
+ int m_outport;
+ int src_delay;
+ pair<flit_stage, Time> m_stage;
+
+};
+
+inline extern bool node_less_then_eq(flit_d* n1, flit_d* n2);
+
+inline extern
+bool node_less_then_eq(flit_d* n1, flit_d* n2)
+{
+ if (n1->get_time() == n2->get_time()) {
+// ASSERT(n1->flit_id != n2->flit_id);
+ return (n1->get_id() <= n2->get_id());
+ } else {
+ return (n1->get_time() <= n2->get_time());
+ }
+}
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const flit_d& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const flit_d& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/FlexibleConsumer.hh b/src/mem/ruby/network/garnet-flexible-pipeline/FlexibleConsumer.hh
new file mode 100644
index 000000000..773d00323
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/FlexibleConsumer.hh
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * FlexibleConsumer.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+#ifndef FLEXIBLE_CONSUMER_H
+#define FLEXIBLE_CONSUMER_H
+
+#include "Consumer.hh"
+#include "NetworkHeader.hh"
+#include "NetDest.hh"
+
+class FlexibleConsumer : public Consumer {
+public:
+ virtual bool isBufferNotFull(int vc, int inport) {return true;}
+ virtual void grant_vc(int out_port, int vc, Time grant_time) {}
+ virtual void release_vc(int out_port, int vc, Time release_time) {}
+ virtual void request_vc(int vc, int in_port, NetDest destination, Time request_time) {}
+};
+
+#endif
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.cc b/src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.cc
new file mode 100644
index 000000000..3d7c555b5
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.cc
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * GarnetNetwork.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "GarnetNetwork.hh"
+#include "MachineType.hh"
+#include "NetworkInterface.hh"
+#include "MessageBuffer.hh"
+#include "Router.hh"
+#include "Topology.hh"
+#include "SimpleNetwork.hh"
+#include "GarnetNetwork_d.hh"
+#include "NetworkLink.hh"
+#include "NetDest.hh"
+
+// calls new to abstract away from the network
+Network* Network::createNetwork(int nodes)
+{
+ NetworkConfig::readNetConfig();
+ // Instantiate a network depending on what kind of network is requested
+ if(NetworkConfig::isGarnetNetwork())
+ {
+ if(NetworkConfig::isDetailNetwork())
+ return new GarnetNetwork_d(nodes);
+ else
+ return new GarnetNetwork(nodes);
+ }
+ else
+ return new SimpleNetwork(nodes);
+}
+
+GarnetNetwork::GarnetNetwork(int nodes)
+{
+ m_nodes = MachineType_base_number(MachineType_NUM); // Total nodes in network
+ m_virtual_networks = NUMBER_OF_VIRTUAL_NETWORKS; // Number of virtual networks = number of message classes in the coherence protocol
+ m_ruby_start = 0;
+
+ // Allocate to and from queues
+ m_toNetQueues.setSize(m_nodes); // Queues that are getting messages from protocol
+ m_fromNetQueues.setSize(m_nodes); // Queues that are feeding the protocol
+ m_in_use.setSize(m_virtual_networks);
+ m_ordered.setSize(m_virtual_networks);
+ for (int i = 0; i < m_virtual_networks; i++)
+ {
+ m_in_use[i] = false;
+ m_ordered[i] = false;
+ }
+
+ for (int node = 0; node < m_nodes; node++)
+ {
+ //Setting how many vitual message buffers will there be per Network Queue
+ m_toNetQueues[node].setSize(m_virtual_networks);
+ m_fromNetQueues[node].setSize(m_virtual_networks);
+
+ for (int j = 0; j < m_virtual_networks; j++)
+ {
+ m_toNetQueues[node][j] = new MessageBuffer(); // Instantiating the Message Buffers that interact with the coherence protocol
+ m_fromNetQueues[node][j] = new MessageBuffer();
+ }
+ }
+
+ // Setup the network switches
+ m_topology_ptr = new Topology(this, m_nodes);
+
+ int number_of_routers = m_topology_ptr->numSwitches();
+ for (int i=0; i<number_of_routers; i++) {
+ m_router_ptr_vector.insertAtBottom(new Router(i, this));
+ }
+
+ for (int i=0; i < m_nodes; i++) {
+ NetworkInterface *ni = new NetworkInterface(i, m_virtual_networks, this);
+ ni->addNode(m_toNetQueues[i], m_fromNetQueues[i]);
+ m_ni_ptr_vector.insertAtBottom(ni);
+ }
+ m_topology_ptr->createLinks(false); // false because this isn't a reconfiguration
+}
+
+GarnetNetwork::~GarnetNetwork()
+{
+ for (int i = 0; i < m_nodes; i++)
+ {
+ m_toNetQueues[i].deletePointers();
+ m_fromNetQueues[i].deletePointers();
+ }
+ m_router_ptr_vector.deletePointers();
+ m_ni_ptr_vector.deletePointers();
+ m_link_ptr_vector.deletePointers();
+ delete m_topology_ptr;
+}
+
+void GarnetNetwork::reset()
+{
+ for (int node = 0; node < m_nodes; node++)
+ {
+ for (int j = 0; j < m_virtual_networks; j++)
+ {
+ m_toNetQueues[node][j]->clear();
+ m_fromNetQueues[node][j]->clear();
+ }
+ }
+}
+
+void GarnetNetwork::makeInLink(NodeID src, SwitchID dest, const NetDest& routing_table_entry, int link_latency, int bw_multiplier, bool isReconfiguration)
+{
+ assert(src < m_nodes);
+
+ if(!isReconfiguration)
+ {
+ NetworkLink *net_link = new NetworkLink(m_link_ptr_vector.size(), link_latency, this);
+ m_link_ptr_vector.insertAtBottom(net_link);
+ m_router_ptr_vector[dest]->addInPort(net_link);
+ m_ni_ptr_vector[src]->addOutPort(net_link);
+ }
+ else
+ {
+ ERROR_MSG("Fatal Error:: Reconfiguration not allowed here");
+ // do nothing
+ }
+}
+
+void GarnetNetwork::makeOutLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration)
+{
+ assert(dest < m_nodes);
+ assert(src < m_router_ptr_vector.size());
+ assert(m_router_ptr_vector[src] != NULL);
+
+ if(!isReconfiguration)
+ {
+ NetworkLink *net_link = new NetworkLink(m_link_ptr_vector.size(), link_latency, this);
+ m_link_ptr_vector.insertAtBottom(net_link);
+ m_router_ptr_vector[src]->addOutPort(net_link, routing_table_entry, link_weight);
+ m_ni_ptr_vector[dest]->addInPort(net_link);
+ }
+ else
+ {
+ ERROR_MSG("Fatal Error:: Reconfiguration not allowed here");
+ //do nothing
+ }
+}
+
+void GarnetNetwork::makeInternalLink(SwitchID src, SwitchID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration)
+{
+ if(!isReconfiguration)
+ {
+ NetworkLink *net_link = new NetworkLink(m_link_ptr_vector.size(), link_latency, this);
+ m_link_ptr_vector.insertAtBottom(net_link);
+ m_router_ptr_vector[dest]->addInPort(net_link);
+ m_router_ptr_vector[src]->addOutPort(net_link, routing_table_entry, link_weight);
+ }
+ else
+ {
+ ERROR_MSG("Fatal Error:: Reconfiguration not allowed here");
+ // do nothing
+ }
+
+}
+
+void GarnetNetwork::checkNetworkAllocation(NodeID id, bool ordered, int network_num)
+{
+ ASSERT(id < m_nodes);
+ ASSERT(network_num < m_virtual_networks);
+
+ if (ordered)
+ {
+ m_ordered[network_num] = true;
+ }
+ m_in_use[network_num] = true;
+}
+
+MessageBuffer* GarnetNetwork::getToNetQueue(NodeID id, bool ordered, int network_num)
+{
+ checkNetworkAllocation(id, ordered, network_num);
+ return m_toNetQueues[id][network_num];
+}
+
+MessageBuffer* GarnetNetwork::getFromNetQueue(NodeID id, bool ordered, int network_num)
+{
+ checkNetworkAllocation(id, ordered, network_num);
+ return m_fromNetQueues[id][network_num];
+}
+
+void GarnetNetwork::clearStats()
+{
+ m_ruby_start = g_eventQueue_ptr->getTime();
+}
+
+Time GarnetNetwork::getRubyStartTime()
+{
+ return m_ruby_start;
+}
+
+void GarnetNetwork::printStats(ostream& out) const
+{ double average_link_utilization = 0;
+ Vector<double > average_vc_load;
+ average_vc_load.setSize(m_virtual_networks*NetworkConfig::getVCsPerClass());
+
+ for(int i = 0; i < m_virtual_networks*NetworkConfig::getVCsPerClass(); i++)
+ {
+ average_vc_load[i] = 0;
+ }
+
+ out << endl;
+ out << "Network Stats" << endl;
+ out << "-------------" << endl;
+ out << endl;
+ for(int i = 0; i < m_link_ptr_vector.size(); i++)
+ {
+ average_link_utilization += m_link_ptr_vector[i]->getLinkUtilization();
+ Vector<int > vc_load = m_link_ptr_vector[i]->getVcLoad();
+ for(int j = 0; j < vc_load.size(); j++)
+ {
+ assert(vc_load.size() == NetworkConfig::getVCsPerClass()*m_virtual_networks);
+ average_vc_load[j] += vc_load[j];
+ }
+ }
+ average_link_utilization = average_link_utilization/m_link_ptr_vector.size();
+ out << "Average Link Utilization :: " << average_link_utilization << " flits/cycle" <<endl;
+ out << "-------------" << endl;
+
+ for(int i = 0; i < NetworkConfig::getVCsPerClass()*m_virtual_networks; i++)
+ {
+ average_vc_load[i] = (double(average_vc_load[i]) / (double(g_eventQueue_ptr->getTime()) - m_ruby_start));
+ out << "Average VC Load [" << i << "] = " << average_vc_load[i] << " flits/cycle" << endl;
+ }
+ out << "-------------" << endl;
+}
+
+void GarnetNetwork::printConfig(ostream& out) const
+{
+ out << endl;
+ out << "Network Configuration" << endl;
+ out << "---------------------" << endl;
+ out << "network: GARNET_NETWORK" << endl;
+ out << "topology: " << g_NETWORK_TOPOLOGY << endl;
+ out << endl;
+
+ for (int i = 0; i < m_virtual_networks; i++)
+ {
+ out << "virtual_net_" << i << ": ";
+ if (m_in_use[i])
+ {
+ out << "active, ";
+ if (m_ordered[i])
+ {
+ out << "ordered" << endl;
+ }
+ else
+ {
+ out << "unordered" << endl;
+ }
+ }
+ else
+ {
+ out << "inactive" << endl;
+ }
+ }
+ out << endl;
+
+ for(int i = 0; i < m_ni_ptr_vector.size(); i++)
+ {
+ m_ni_ptr_vector[i]->printConfig(out);
+ }
+ for(int i = 0; i < m_router_ptr_vector.size(); i++)
+ {
+ m_router_ptr_vector[i]->printConfig(out);
+ }
+ if (g_PRINT_TOPOLOGY)
+ {
+ m_topology_ptr->printConfig(out);
+ }
+}
+
+void GarnetNetwork::print(ostream& out) const
+{
+ out << "[GarnetNetwork]";
+}
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.hh b/src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.hh
new file mode 100644
index 000000000..cb9a8908a
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.hh
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * GarnetNetwork.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef GARNET_NETWORK_H
+#define GARNET_NETWORK_H
+
+#include "NetworkHeader.hh"
+#include "Vector.hh"
+#include "NetworkConfig.hh"
+#include "Network.hh"
+
+class NetworkInterface;
+class MessageBuffer;
+class Router;
+class Topology;
+class NetDest;
+class NetworkLink;
+
+class GarnetNetwork : public Network{
+public:
+ GarnetNetwork(int nodes);
+
+ ~GarnetNetwork();
+
+ // returns the queue requested for the given component
+ MessageBuffer* getToNetQueue(NodeID id, bool ordered, int network_num);
+ MessageBuffer* getFromNetQueue(NodeID id, bool ordered, int network_num);
+
+ void clearStats();
+ void printStats(ostream& out) const;
+ void printConfig(ostream& out) const;
+ void print(ostream& out) const;
+
+ bool isVNetOrdered(int vnet) { return m_ordered[vnet]; }
+ bool validVirtualNetwork(int vnet) { return m_in_use[vnet]; }
+
+ Time getRubyStartTime();
+ int getNumNodes(){ return m_nodes; }
+
+ void reset();
+
+ // Methods used by Topology to setup the network
+ void makeOutLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration);
+ void makeInLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int bw_multiplier, bool isReconfiguration);
+ void makeInternalLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration);
+
+private:
+ void checkNetworkAllocation(NodeID id, bool ordered, int network_num);
+
+// Private copy constructor and assignment operator
+ GarnetNetwork(const GarnetNetwork& obj);
+ GarnetNetwork& operator=(const GarnetNetwork& obj);
+
+/***********Data Members*************/
+ int m_virtual_networks;
+ int m_nodes;
+
+ Vector<bool> m_in_use;
+ Vector<bool> m_ordered;
+
+ Vector<Vector<MessageBuffer*> > m_toNetQueues;
+ Vector<Vector<MessageBuffer*> > m_fromNetQueues;
+
+ Vector<Router *> m_router_ptr_vector; // All Routers in Network
+ Vector<NetworkLink *> m_link_ptr_vector; // All links in the network
+ Vector<NetworkInterface *> m_ni_ptr_vector; // All NI's in Network
+
+ Topology* m_topology_ptr;
+ Time m_ruby_start;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const GarnetNetwork& obj);
+
+// ******************* Definitions *******************
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const GarnetNetwork& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //NETWORK_H
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/InVcState.cc b/src/mem/ruby/network/garnet-flexible-pipeline/InVcState.cc
new file mode 100644
index 000000000..52090cb2b
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/InVcState.cc
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * InVCState.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "InVcState.hh"
+
+InVcState::InVcState(int id)
+{
+ m_id = id;
+ m_vc_state = IDLE_;
+}
+
+bool InVcState::isInState(VC_state_type state, Time request_time)
+{
+ return ((m_vc_state == state) && (request_time >= m_time) );
+}
+
+void InVcState::setRoute(int route)
+{
+ m_route = route;
+}
+
+void InVcState::setState(VC_state_type state, Time time)
+{
+ m_vc_state = state;
+ m_time = time;
+}
+
+void InVcState::grant_vc(int out_vc, Time grant_time)
+{
+ m_vc_state = ACTIVE_;
+ m_time = grant_time;
+ m_output_vc = out_vc;
+}
+
+int InVcState::get_outport()
+{
+ return m_route;
+}
+
+int InVcState::get_outvc()
+{
+ return m_output_vc;
+}
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/InVcState.hh b/src/mem/ruby/network/garnet-flexible-pipeline/InVcState.hh
new file mode 100644
index 000000000..efbd0d513
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/InVcState.hh
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ *
+ * InVCState.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef IN_VC_STATE_H
+#define IN_VC_STATE_H
+
+#include "NetworkHeader.hh"
+
+class InVcState {
+public:
+ InVcState(int id);
+
+ void setRoute(int route);
+ void setState(VC_state_type state, Time time);
+ int get_outport();
+ int get_outvc();
+ void grant_vc(int out_vc, Time grant_time);
+ bool isInState(VC_state_type state, Time time);
+
+private:
+ int m_id;
+ int m_route;
+ int m_output_vc;
+ VC_state_type m_vc_state;
+ Time m_time;
+};
+
+#endif
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/NetworkConfig.hh b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkConfig.hh
new file mode 100644
index 000000000..53dd67563
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkConfig.hh
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NetworkConfig.h
+ *
+ * Description: This header file is used to define all configuration parameters required by the interconnection network.
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef NETWORKCONFIG_H
+#define NETWORKCONFIG_H
+
+#include "NetworkHeader.hh"
+#include "util.hh"
+#include "RubyConfig.hh"
+
+class NetworkConfig {
+ public:
+ static bool isGarnetNetwork() {return g_GARNET_NETWORK; }
+ static bool isDetailNetwork() {return g_DETAIL_NETWORK; }
+ static int isNetworkTesting() {return g_NETWORK_TESTING; }
+ static int getFlitSize() {return g_FLIT_SIZE; }
+ static int getNumPipeStages() {return g_NUM_PIPE_STAGES; }
+ static int getVCsPerClass() {return g_VCS_PER_CLASS; }
+ static int getBufferSize() {return g_BUFFER_SIZE; }
+ // This is no longer used. See config/rubyconfig.defaults to set Garnet parameters.
+ static void readNetConfig()
+ {
+ /*
+ string filename = "network/garnet-flexible-pipeline/";
+ filename += NETCONFIG_DEFAULTS;
+
+ if (g_SIMICS) {
+ filename = "../../../ruby/"+filename;
+ }
+ ifstream NetconfigFile( filename.c_str(), ios::in);
+ if(!NetconfigFile.is_open())
+ {
+ cout << filename << endl;
+ cerr << "Network Configuration file cannot be opened\n";
+ exit(1);
+ }
+
+ string line = "";
+
+ while(!NetconfigFile.eof())
+ {
+ getline(NetconfigFile, line, '\n');
+ string var = string_split(line, ':');
+
+ if(!var.compare("g_GARNET_NETWORK"))
+ {
+ if(!line.compare("true"))
+ g_GARNET_NETWORK = true;
+ else
+ g_GARNET_NETWORK = false;
+ }
+ if(!var.compare("g_DETAIL_NETWORK"))
+ {
+ if(!line.compare("true"))
+ g_DETAIL_NETWORK = true;
+ else
+ g_DETAIL_NETWORK = false;
+ }
+ if(!var.compare("g_NETWORK_TESTING"))
+ {
+ if(!line.compare("true"))
+ g_NETWORK_TESTING = true;
+ else
+ g_NETWORK_TESTING = false;
+ }
+ if(!var.compare("g_FLIT_SIZE"))
+ g_FLIT_SIZE = atoi(line.c_str());
+ if(!var.compare("g_NUM_PIPE_STAGES"))
+ g_NUM_PIPE_STAGES = atoi(line.c_str());
+ if(!var.compare("g_VCS_PER_CLASS"))
+ g_VCS_PER_CLASS = atoi(line.c_str());
+ if(!var.compare("g_BUFFER_SIZE"))
+ g_BUFFER_SIZE = atoi(line.c_str());
+ }
+ NetconfigFile.close();
+ */
+ /*
+ cout << "g_GARNET_NETWORK = " << g_GARNET_NETWORK << endl;
+ cout << "g_DETAIL_NETWORK = " << g_DETAIL_NETWORK << endl;
+ cout << "g_NETWORK_TESTING = " << g_NETWORK_TESTING << endl;
+ cout << "g_FLIT_SIZE = " << g_FLIT_SIZE << endl;
+ cout << "g_NUM_PIPE_STAGES = " << g_NUM_PIPE_STAGES << endl;
+ cout << "g_VCS_PER_CLASS= " << g_VCS_PER_CLASS << endl;
+ cout << "g_BUFFER_SIZE = " << g_BUFFER_SIZE << endl;
+ */
+ }
+};
+
+#endif
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/NetworkInterface.cc b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkInterface.cc
new file mode 100644
index 000000000..3dfb5b4b9
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkInterface.cc
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NetworkInterface.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "NetworkInterface.hh"
+#include "MessageBuffer.hh"
+#include "flitBuffer.hh"
+#include "NetworkMessage.hh"
+
+NetworkInterface::NetworkInterface(int id, int virtual_networks, GarnetNetwork *network_ptr)
+{
+ m_id = id;
+ m_net_ptr = network_ptr;
+ m_virtual_networks = virtual_networks;
+ m_vc_per_vnet = NetworkConfig::getVCsPerClass();
+ m_num_vcs = m_vc_per_vnet*m_virtual_networks;
+
+ m_vc_round_robin = 0;
+ m_ni_buffers.setSize(m_num_vcs);
+ inNode_ptr.setSize(m_virtual_networks);
+ outNode_ptr.setSize(m_virtual_networks);
+
+ for(int i =0; i < m_num_vcs; i++)
+ m_ni_buffers[i] = new flitBuffer(); // instantiating the NI flit buffers
+
+ m_vc_allocator.setSize(m_virtual_networks);
+ for(int i = 0; i < m_virtual_networks; i++)
+ {
+ m_vc_allocator[i] = 0;
+ }
+
+ for(int i = 0; i < m_num_vcs; i++)
+ {
+ m_out_vc_state.insertAtBottom(new OutVcState(i));
+ m_out_vc_state[i]->setState(IDLE_, g_eventQueue_ptr->getTime());
+ }
+}
+
+NetworkInterface::~NetworkInterface()
+{
+ m_out_vc_state.deletePointers();
+ m_ni_buffers.deletePointers();
+ delete outSrcQueue;
+}
+
+void NetworkInterface::addInPort(NetworkLink *in_link)
+{
+ inNetLink = in_link;
+ in_link->setLinkConsumer(this);
+}
+
+void NetworkInterface::addOutPort(NetworkLink *out_link)
+{
+ outNetLink = out_link;
+ outSrcQueue = new flitBuffer();
+ out_link->setSourceQueue(outSrcQueue);
+ out_link->setSource(this);
+}
+
+void NetworkInterface::addNode(Vector<MessageBuffer*>& in, Vector<MessageBuffer*>& out)
+{
+ ASSERT(in.size() == m_virtual_networks);
+ inNode_ptr = in;
+ outNode_ptr = out;
+ for (int j = 0; j < m_virtual_networks; j++)
+ {
+ inNode_ptr[j]->setConsumer(this); // So that protocol injects messages into the NI
+ }
+}
+
+void NetworkInterface::request_vc(int in_vc, int in_port, NetDest destination, Time request_time)
+{
+ inNetLink->grant_vc_link(in_vc, request_time);
+}
+
+bool NetworkInterface::flitisizeMessage(MsgPtr msg_ptr, int vnet)
+{
+ NetworkMessage *net_msg_ptr = dynamic_cast<NetworkMessage*>(msg_ptr.ref());
+ NetDest net_msg_dest = net_msg_ptr->getInternalDestination();
+ Vector<NodeID> dest_nodes = net_msg_dest.getAllDest(); // gets all the destinations associated with this message.
+ int num_flits = (int) ceil((double) MessageSizeType_to_int(net_msg_ptr->getMessageSize())/NetworkConfig::getFlitSize() ); // Number of flits is dependent on the link bandwidth available. This is expressed in terms of bytes/cycle or the flit size
+
+ for(int ctr = 0; ctr < dest_nodes.size(); ctr++) // loop because we will be converting all multicast messages into unicast messages
+ {
+ int vc = calculateVC(vnet); // this will return a free output virtual channel
+
+ if(vc == -1)
+ {
+ // did not find a free output vc
+ return false ;
+ }
+ MsgPtr new_msg_ptr = *(msg_ptr.ref());
+ NodeID destID = dest_nodes[ctr];
+
+ NetworkMessage *new_net_msg_ptr = dynamic_cast<NetworkMessage*>(new_msg_ptr.ref());
+ if(dest_nodes.size() > 1)
+ {
+ NetDest personal_dest;
+ for(int m = 0; m < (int) MachineType_NUM; m++)
+ {
+ if((destID >= MachineType_base_number((MachineType) m)) && destID < MachineType_base_number((MachineType) (m+1)))
+ {
+ // calculating the NetDest associated with this destination ID
+ personal_dest.clear();
+ personal_dest.add((MachineID) {(MachineType) m, (destID - MachineType_base_number((MachineType) m))});
+ new_net_msg_ptr->getInternalDestination() = personal_dest;
+ break;
+ }
+ }
+ net_msg_dest.removeNetDest(personal_dest);
+ net_msg_ptr->getInternalDestination().removeNetDest(personal_dest); // removing the destination from the original message to reflect that a message with this particular destination has been flitisized and an output vc is acquired
+ }
+ for(int i = 0; i < num_flits; i++)
+ {
+ flit *fl = new flit(i, vc, vnet, num_flits, new_msg_ptr);
+ m_ni_buffers[vc]->insert(fl);
+ }
+
+ m_out_vc_state[vc]->setState(VC_AB_, g_eventQueue_ptr->getTime());
+ outNetLink->request_vc_link(vc, new_net_msg_ptr->getInternalDestination(), g_eventQueue_ptr->getTime()); // setting an output vc request for the next hop. It is only when an output vc is acquired at the next hop that this flit will be ready to traverse the link and into the next hop
+ }
+
+ return true ;
+}
+
+// An output vc has been granted at the next hop to one of the vc's. We have to update the state of the vc to reflect this
+void NetworkInterface::grant_vc(int out_port, int vc, Time grant_time)
+{
+
+ assert(m_out_vc_state[vc]->isInState(VC_AB_, grant_time));
+ m_out_vc_state[vc]->grant_vc(grant_time);
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+}
+
+// The tail flit corresponding to this vc has been buffered at the next hop and thus this vc is now free
+void NetworkInterface::release_vc(int out_port, int vc, Time release_time)
+{
+ assert(m_out_vc_state[vc]->isInState(ACTIVE_, release_time));
+ m_out_vc_state[vc]->setState(IDLE_, release_time);
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+}
+
+// Looking for a free output vc
+int NetworkInterface::calculateVC(int vnet)
+{
+ int vc_per_vnet;
+ if(m_net_ptr->isVNetOrdered(vnet))
+ vc_per_vnet = 1;
+ else
+ vc_per_vnet = m_vc_per_vnet;
+
+ for(int i = 0; i < vc_per_vnet; i++)
+ {
+ int delta = m_vc_allocator[vnet];
+ m_vc_allocator[vnet]++;
+ if(m_vc_allocator[vnet] == vc_per_vnet)
+ m_vc_allocator[vnet] = 0;
+
+ if(m_out_vc_state[(vnet*m_vc_per_vnet) + delta]->isInState(IDLE_, g_eventQueue_ptr->getTime()))
+ {
+ return ((vnet*m_vc_per_vnet) + delta);
+ }
+ }
+ return -1;
+}
+
+/*
+ * The NI wakeup checks whether there are any ready messages in the protocol buffer. If yes, it picks that up, flitisizes it into a number of flits and puts it into an output
+ * buffer and schedules the output link.
+ * On a wakeup it also checks whether there are flits in the input link. If yes, it picks them up and if the flit is a tail, the NI inserts the corresponding message into
+ * the protocol buffer.
+ */
+
+void NetworkInterface::wakeup()
+{
+ MsgPtr msg_ptr;
+
+ //Checking for messages coming from the protocol
+ for (int vnet = 0; vnet < m_virtual_networks; vnet++) // can pick up a message/cycle for each virtual net
+ {
+ while(inNode_ptr[vnet]->isReady()) // Is there a message waiting
+ {
+ msg_ptr = inNode_ptr[vnet]->peekMsgPtr();
+ if(flitisizeMessage(msg_ptr, vnet))
+ {
+ inNode_ptr[vnet]->pop();
+ }
+ else
+ {
+ break;
+ }
+ }
+ }
+
+ scheduleOutputLink();
+ checkReschedule();
+
+/*********** Picking messages destined for this NI **********/
+
+ if(inNetLink->isReady())
+ {
+ flit *t_flit = inNetLink->consumeLink();
+ if(t_flit->get_type() == TAIL_ || t_flit->get_type() == HEAD_TAIL_)
+ {
+ DEBUG_EXPR(NETWORK_COMP, HighPrio, m_id);
+ DEBUG_MSG(NETWORK_COMP, HighPrio, "Message got delivered");
+ DEBUG_EXPR(NETWORK_COMP, HighPrio, g_eventQueue_ptr->getTime());
+ if(!NetworkConfig::isNetworkTesting()) // When we are doing network only testing, the messages do not have to be buffered into the message buffers
+ {
+ outNode_ptr[t_flit->get_vnet()]->enqueue(t_flit->get_msg_ptr(), 1); // enqueueing for protocol buffer. This is not required when doing network only testing
+ }
+ inNetLink->release_vc_link(t_flit->get_vc(), g_eventQueue_ptr->getTime() + 1); // signal the upstream router that this vc can be freed now
+ }
+ delete t_flit;
+ }
+}
+
+// This function look at the NI buffers and if some buffer has flits which are ready to traverse the link in the next cycle and also the downstream output vc associated with this flit has buffers left, the link is scheduled for the next cycle
+void NetworkInterface::scheduleOutputLink()
+{
+ int vc = m_vc_round_robin;
+ m_vc_round_robin++;
+ if(m_vc_round_robin == m_num_vcs)
+ m_vc_round_robin = 0;
+
+ for(int i = 0; i < m_num_vcs; i++)
+ {
+ vc++;
+ if(vc == m_num_vcs)
+ vc = 0;
+ if(m_ni_buffers[vc]->isReady())
+ {
+ if(m_out_vc_state[vc]->isInState(ACTIVE_, g_eventQueue_ptr->getTime()) && outNetLink->isBufferNotFull_link(vc)) // models buffer backpressure
+ {
+ flit *t_flit = m_ni_buffers[vc]->getTopFlit(); // Just removing the flit
+ t_flit->set_time(g_eventQueue_ptr->getTime() + 1);
+ outSrcQueue->insert(t_flit);
+ g_eventQueue_ptr->scheduleEvent(outNetLink, 1); // schedule the out link
+ return;
+ }
+ }
+ }
+}
+
+void NetworkInterface::checkReschedule()
+{
+ for(int vnet = 0; vnet < m_virtual_networks; vnet++)
+ {
+ if(inNode_ptr[vnet]->isReady()) // Is there a message waiting
+ {
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ return;
+ }
+ }
+ for(int vc = 0; vc < m_num_vcs; vc++)
+ {
+ if(m_ni_buffers[vc]->isReadyForNext())
+ {
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ return;
+ }
+ }
+}
+
+void NetworkInterface::printConfig(ostream& out) const
+{
+ out << "[Network Interface " << m_id << "] - ";
+ out << "[inLink " << inNetLink->get_id() << "] - ";
+ out << "[outLink " << outNetLink->get_id() << "]" << endl;
+}
+
+void NetworkInterface::print(ostream& out) const
+{
+ out << "[Network Interface]";
+}
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/NetworkInterface.hh b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkInterface.hh
new file mode 100644
index 000000000..d7932432b
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkInterface.hh
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NetworkInterface.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+#ifndef NET_INTERFACE_H
+#define NET_INTERFACE_H
+
+#include "NetworkHeader.hh"
+#include "GarnetNetwork.hh"
+#include "Vector.hh"
+#include "FlexibleConsumer.hh"
+#include "Message.hh"
+#include "NetworkLink.hh"
+#include "OutVcState.hh"
+
+class NetworkMessage;
+class MessageBuffer;
+class flitBuffer;
+
+class NetworkInterface : public FlexibleConsumer {
+public:
+ NetworkInterface(int id, int virtual_networks, GarnetNetwork* network_ptr);
+
+ ~NetworkInterface();
+
+ void addInPort(NetworkLink *in_link);
+ void addOutPort(NetworkLink *out_link);
+
+ void wakeup();
+ void addNode(Vector<MessageBuffer *> &inNode, Vector<MessageBuffer *> &outNode);
+ void grant_vc(int out_port, int vc, Time grant_time);
+ void release_vc(int out_port, int vc, Time release_time);
+ bool isBufferNotFull(int vc, int inport)
+ {
+ return true;
+ }
+ void request_vc(int in_vc, int in_port, NetDest destination, Time request_time);
+
+ void printConfig(ostream& out) const;
+ void print(ostream& out) const;
+
+private:
+/**************Data Members*************/
+ GarnetNetwork *m_net_ptr;
+ int m_virtual_networks, m_num_vcs, m_vc_per_vnet;
+ NodeID m_id;
+
+ Vector<OutVcState *> m_out_vc_state;
+ Vector<int > m_vc_allocator;
+ int m_vc_round_robin; // For round robin scheduling
+ flitBuffer *outSrcQueue; // For modelling link contention
+
+ NetworkLink *inNetLink;
+ NetworkLink *outNetLink;
+
+ // Input Flit Buffers
+ Vector<flitBuffer *> m_ni_buffers; // The flit buffers which will serve the Consumer
+
+ Vector<MessageBuffer *> inNode_ptr; // The Message buffers that takes messages from the protocol
+ Vector<MessageBuffer *> outNode_ptr; // The Message buffers that provides messages to the protocol
+
+ bool flitisizeMessage(MsgPtr msg_ptr, int vnet);
+ int calculateVC(int vnet);
+ void scheduleOutputLink();
+ void checkReschedule();
+};
+
+#endif
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/NetworkLink.cc b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkLink.cc
new file mode 100644
index 000000000..e586ece9e
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkLink.cc
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NetworkLink.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "NetworkLink.hh"
+#include "NetworkConfig.hh"
+#include "GarnetNetwork.hh"
+
+NetworkLink::NetworkLink(int id, int latency, GarnetNetwork *net_ptr)
+{
+ m_id = id;
+ linkBuffer = new flitBuffer();
+ m_in_port = 0;
+ m_out_port = 0;
+ m_link_utilized = 0;
+ m_net_ptr = net_ptr;
+ m_latency = latency;
+ int num_net = NUMBER_OF_VIRTUAL_NETWORKS;
+ int num_vc = NetworkConfig::getVCsPerClass();
+ m_vc_load.setSize(num_net*num_vc);
+
+ for(int i = 0; i < num_net*num_vc; i++)
+ m_vc_load[i] = 0;
+}
+
+NetworkLink::~NetworkLink()
+{
+ delete linkBuffer;
+}
+
+int NetworkLink::get_id()
+{
+ return m_id;
+}
+
+void NetworkLink::setLinkConsumer(FlexibleConsumer *consumer)
+{
+ link_consumer = consumer;
+}
+
+void NetworkLink::setSourceQueue(flitBuffer *srcQueue)
+{
+ link_srcQueue = srcQueue;
+}
+
+void NetworkLink::setSource(FlexibleConsumer *source)
+{
+ link_source = source;
+}
+void NetworkLink::request_vc_link(int vc, NetDest destination, Time request_time)
+{
+ link_consumer->request_vc(vc, m_in_port, destination, request_time);
+}
+bool NetworkLink::isBufferNotFull_link(int vc)
+{
+ return link_consumer->isBufferNotFull(vc, m_in_port);
+}
+
+void NetworkLink::grant_vc_link(int vc, Time grant_time)
+{
+ link_source->grant_vc(m_out_port, vc, grant_time);
+}
+
+void NetworkLink::release_vc_link(int vc, Time release_time)
+{
+ link_source->release_vc(m_out_port, vc, release_time);
+}
+
+Vector<int> NetworkLink::getVcLoad()
+{
+ return m_vc_load;
+}
+
+double NetworkLink::getLinkUtilization()
+{
+ Time m_ruby_start = m_net_ptr->getRubyStartTime();
+ return (double(m_link_utilized)) / (double(g_eventQueue_ptr->getTime()-m_ruby_start));
+}
+
+bool NetworkLink::isReady()
+{
+ return linkBuffer->isReady();
+}
+
+void NetworkLink::setInPort(int port)
+{
+ m_in_port = port;
+}
+
+void NetworkLink::setOutPort(int port)
+{
+ m_out_port = port;
+}
+
+void NetworkLink::wakeup()
+{
+ if(link_srcQueue->isReady())
+ {
+ flit *t_flit = link_srcQueue->getTopFlit();
+ t_flit->set_time(g_eventQueue_ptr->getTime() + m_latency);
+ linkBuffer->insert(t_flit);
+ g_eventQueue_ptr->scheduleEvent(link_consumer, m_latency);
+ m_link_utilized++;
+ m_vc_load[t_flit->get_vc()]++;
+ }
+}
+
+flit* NetworkLink::peekLink()
+{
+ return linkBuffer->peekTopFlit();
+}
+
+flit* NetworkLink::consumeLink()
+{
+ return linkBuffer->getTopFlit();
+}
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/NetworkLink.hh b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkLink.hh
new file mode 100644
index 000000000..cdea89d79
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkLink.hh
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NetworkLink.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+#ifndef NETWORK_LINK_H
+#define NETWORK_LINK_H
+
+#include "NetworkHeader.hh"
+#include "FlexibleConsumer.hh"
+#include "flitBuffer.hh"
+#include "PrioHeap.hh"
+#include "NetDest.hh"
+
+class GarnetNetwork;
+
+class NetworkLink : public FlexibleConsumer {
+public:
+ NetworkLink();
+ NetworkLink(int id, int latency, GarnetNetwork *net_ptr);
+ ~NetworkLink();
+
+ void setLinkConsumer(FlexibleConsumer *consumer);
+ void setSourceQueue(flitBuffer *srcQueue);
+ flit* peekLink();
+ flit* consumeLink();
+
+ void print(ostream& out) const {}
+
+ bool is_vc_ready(flit *t_flit);
+
+ int get_id();
+ void setInPort(int port);
+ void setOutPort(int port);
+ void wakeup();
+ bool isReady();
+ void grant_vc_link(int vc, Time grant_time);
+ void release_vc_link(int vc, Time release_time);
+ void request_vc_link(int vc, NetDest destination, Time request_time);
+ bool isBufferNotFull_link(int vc);
+ void setSource(FlexibleConsumer *source);
+ double getLinkUtilization();
+ Vector<int> getVcLoad();
+
+protected:
+ int m_id, m_latency;
+ int m_in_port, m_out_port;
+ int m_link_utilized;
+ Vector<int > m_vc_load;
+ GarnetNetwork *m_net_ptr;
+
+ flitBuffer *linkBuffer;
+ FlexibleConsumer *link_consumer;
+ FlexibleConsumer *link_source;
+ flitBuffer *link_srcQueue;
+};
+
+#endif
+
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/OutVcState.cc b/src/mem/ruby/network/garnet-flexible-pipeline/OutVcState.cc
new file mode 100644
index 000000000..5d43a7821
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/OutVcState.cc
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * OutVCState.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "OutVcState.hh"
+
+OutVcState::OutVcState(int id)
+{
+ m_id = id;
+ m_vc_state = IDLE_;
+}
+
+bool OutVcState::isInState(VC_state_type state, Time request_time)
+{
+ return ((m_vc_state == state) && (request_time >= m_time) );
+}
+
+void OutVcState::grant_vc(Time grant_time)
+{
+ m_time = grant_time;
+ m_vc_state = ACTIVE_;
+}
+
+void OutVcState::setState(VC_state_type state, Time time)
+{
+ m_vc_state = state;
+ m_time = time;
+}
+
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/OutVcState.hh b/src/mem/ruby/network/garnet-flexible-pipeline/OutVcState.hh
new file mode 100644
index 000000000..81120ee8c
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/OutVcState.hh
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * OutVCState.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef OUT_VC_STATE_H
+#define OUT_VC_STATE_H
+
+#include "NetworkHeader.hh"
+
+class OutVcState {
+public:
+ OutVcState(int id);
+
+ bool isInState(VC_state_type state, Time request_time);
+ void setState(VC_state_type state, Time time);
+ void grant_vc(Time grant_time);
+
+private:
+ int m_id ;
+ Time m_time;
+ VC_state_type m_vc_state;
+};
+
+#endif
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/Router.cc b/src/mem/ruby/network/garnet-flexible-pipeline/Router.cc
new file mode 100644
index 000000000..4809d43ed
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/Router.cc
@@ -0,0 +1,389 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Router.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "Router.hh"
+#include "NetworkMessage.hh"
+#include "InVcState.hh"
+#include "OutVcState.hh"
+#include "VCarbiter.hh"
+
+Router::Router(int id, GarnetNetwork *network_ptr)
+{
+ m_id = id;
+ m_net_ptr = network_ptr;
+ m_virtual_networks = NUMBER_OF_VIRTUAL_NETWORKS;
+ m_vc_per_vnet = NetworkConfig::getVCsPerClass();
+ m_round_robin_inport = 0;
+ m_round_robin_start = 0;
+ m_num_vcs = m_vc_per_vnet*m_virtual_networks;
+ m_vc_arbiter = new VCarbiter(this);
+}
+
+Router::~Router()
+{
+ for (int i = 0; i < m_in_link.size(); i++)
+ {
+ m_in_vc_state[i].deletePointers();
+ }
+ for (int i = 0; i < m_out_link.size(); i++)
+ {
+ m_out_vc_state[i].deletePointers();
+ m_router_buffers[i].deletePointers();
+ }
+ m_out_src_queue.deletePointers();
+ delete m_vc_arbiter;
+
+}
+
+void Router::addInPort(NetworkLink *in_link)
+{
+ int port = m_in_link.size();
+ Vector<InVcState *> in_vc_vector;
+ for(int i = 0; i < m_num_vcs; i++)
+ {
+ in_vc_vector.insertAtBottom(new InVcState(i));
+ in_vc_vector[i]->setState(IDLE_, g_eventQueue_ptr->getTime());
+ }
+ m_in_vc_state.insertAtBottom(in_vc_vector);
+ m_in_link.insertAtBottom(in_link);
+ in_link->setLinkConsumer(this);
+ in_link->setInPort(port);
+
+ int start = 0;
+ m_round_robin_invc.insertAtBottom(start);
+
+}
+
+void Router::addOutPort(NetworkLink *out_link, const NetDest& routing_table_entry, int link_weight)
+{
+ int port = m_out_link.size();
+ out_link->setOutPort(port);
+ int start = 0;
+ m_vc_round_robin.insertAtBottom(start);
+
+ m_out_src_queue.insertAtBottom(new flitBuffer());
+
+ m_out_link.insertAtBottom(out_link);
+ m_routing_table.insertAtBottom(routing_table_entry);
+ out_link->setSourceQueue(m_out_src_queue[port]);
+ out_link->setSource(this);
+
+ Vector<flitBuffer *> intermediateQueues;
+ for(int i = 0; i < m_num_vcs; i++)
+ {
+ intermediateQueues.insertAtBottom(new flitBuffer(NetworkConfig::getBufferSize()));
+ }
+ m_router_buffers.insertAtBottom(intermediateQueues);
+
+ Vector<OutVcState *> out_vc_vector;
+ for(int i = 0; i < m_num_vcs; i++)
+ {
+ out_vc_vector.insertAtBottom(new OutVcState(i));
+ out_vc_vector[i]->setState(IDLE_, g_eventQueue_ptr->getTime());
+ }
+ m_out_vc_state.insertAtBottom(out_vc_vector);
+ m_link_weights.insertAtBottom(link_weight);
+}
+
+bool Router::isBufferNotFull(int vc, int inport)
+{
+ int outport = m_in_vc_state[inport][vc]->get_outport();
+ int outvc = m_in_vc_state[inport][vc]->get_outvc();
+
+ return (!m_router_buffers[outport][outvc]->isFull());
+}
+
+// A request for an output vc has been placed by an upstream Router/NI. This has to be updated and arbitration performed
+void Router::request_vc(int in_vc, int in_port, NetDest destination, Time request_time)
+{
+ assert(m_in_vc_state[in_port][in_vc]->isInState(IDLE_, request_time));
+
+ int outport = getRoute(destination);
+ m_in_vc_state[in_port][in_vc]->setRoute(outport);
+ m_in_vc_state[in_port][in_vc]->setState(VC_AB_, request_time);
+ assert(request_time >= g_eventQueue_ptr->getTime());
+ if(request_time > g_eventQueue_ptr->getTime())
+ g_eventQueue_ptr->scheduleEventAbsolute(m_vc_arbiter, request_time);
+ else
+ vc_arbitrate();
+}
+
+void Router::vc_arbitrate()
+{
+ int inport = m_round_robin_inport;
+ m_round_robin_inport++;
+ if(m_round_robin_inport == m_in_link.size())
+ m_round_robin_inport = 0;
+
+ for(int port_iter = 0; port_iter < m_in_link.size(); port_iter++)
+ {
+ inport++;
+ if(inport >= m_in_link.size())
+ inport = 0;
+ int invc = m_round_robin_invc[inport];
+ m_round_robin_invc[inport]++;
+
+ if(m_round_robin_invc[inport] >= m_num_vcs)
+ m_round_robin_invc[inport] = 0;
+ for(int vc_iter = 0; vc_iter < m_num_vcs; vc_iter++)
+ {
+ invc++;
+ if(invc >= m_num_vcs)
+ invc = 0;
+ InVcState *in_vc_state = m_in_vc_state[inport][invc];
+
+ if(in_vc_state->isInState(VC_AB_, g_eventQueue_ptr->getTime()))
+ {
+ int outport = in_vc_state->get_outport();
+ Vector<int > valid_vcs = get_valid_vcs(invc);
+ for(int valid_vc_iter = 0; valid_vc_iter < valid_vcs.size(); valid_vc_iter++)
+ {
+ if(m_out_vc_state[outport][valid_vcs[valid_vc_iter]]->isInState(IDLE_, g_eventQueue_ptr->getTime()))
+ {
+ in_vc_state->grant_vc(valid_vcs[valid_vc_iter], g_eventQueue_ptr->getTime());
+ m_in_link[inport]->grant_vc_link(invc, g_eventQueue_ptr->getTime());
+ m_out_vc_state[outport][valid_vcs[valid_vc_iter]]->setState(VC_AB_, g_eventQueue_ptr->getTime());
+ break;
+ }
+ }
+ }
+ }
+ }
+}
+
+Vector<int > Router::get_valid_vcs(int invc)
+{
+ Vector<int > vc_list;
+
+ for(int vnet = 0; vnet < m_virtual_networks; vnet++)
+ {
+ if(invc >= (vnet*m_vc_per_vnet) && invc < ((vnet+1)*m_vc_per_vnet))
+ {
+ int base = vnet*m_vc_per_vnet;
+ int vc_per_vnet;
+ if(m_net_ptr->isVNetOrdered(vnet))
+ vc_per_vnet = 1;
+ else
+ vc_per_vnet = m_vc_per_vnet;
+
+ for(int offset = 0; offset < vc_per_vnet; offset++)
+ {
+ vc_list.insertAtBottom(base+offset);
+ }
+ break;
+ }
+ }
+ return vc_list;
+}
+
+void Router::grant_vc(int out_port, int vc, Time grant_time)
+{
+ assert(m_out_vc_state[out_port][vc]->isInState(VC_AB_, grant_time));
+ m_out_vc_state[out_port][vc]->grant_vc(grant_time);
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+}
+
+void Router::release_vc(int out_port, int vc, Time release_time)
+{
+ assert(m_out_vc_state[out_port][vc]->isInState(ACTIVE_, release_time));
+ m_out_vc_state[out_port][vc]->setState(IDLE_, release_time);
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+}
+
+// This function calculated the output port for a particular destination.
+int Router::getRoute(NetDest destination)
+{
+ int output_link = -1;
+ int min_weight = INFINITE_;
+ for(int link = 0; link < m_routing_table.size(); link++)
+ {
+ if (destination.intersectionIsNotEmpty(m_routing_table[link]))
+ {
+ if((m_link_weights[link] >= min_weight))
+ continue;
+ output_link = link;
+ min_weight = m_link_weights[link];
+ }
+ }
+ return output_link;
+}
+
+void Router::routeCompute(flit *m_flit, int inport)
+{
+ int invc = m_flit->get_vc();
+ int outport = m_in_vc_state[inport][invc]->get_outport();
+ int outvc = m_in_vc_state[inport][invc]->get_outvc();
+
+ assert(NetworkConfig::getNumPipeStages() >= 1);
+ m_flit->set_time(g_eventQueue_ptr->getTime() + (NetworkConfig::getNumPipeStages() - 1)); // Becasuse 1 cycle will be consumed in scheduling the output link
+ m_flit->set_vc(outvc);
+ m_router_buffers[outport][outvc]->insert(m_flit);
+
+ if(NetworkConfig::getNumPipeStages() > 1)
+ g_eventQueue_ptr->scheduleEvent(this, NetworkConfig::getNumPipeStages() -1 );
+ if((m_flit->get_type() == HEAD_) || (m_flit->get_type() == HEAD_TAIL_))
+ {
+ NetDest destination = dynamic_cast<NetworkMessage*>(m_flit->get_msg_ptr().ref())->getInternalDestination();
+ if(NetworkConfig::getNumPipeStages() > 1)
+ {
+ m_out_vc_state[outport][outvc]->setState(VC_AB_, g_eventQueue_ptr->getTime() + 1);
+ m_out_link[outport]->request_vc_link(outvc, destination, g_eventQueue_ptr->getTime() + 1);
+ }
+ else
+ {
+ m_out_vc_state[outport][outvc]->setState(VC_AB_, g_eventQueue_ptr->getTime());
+ m_out_link[outport]->request_vc_link(outvc, destination, g_eventQueue_ptr->getTime());
+ }
+ }
+ if((m_flit->get_type() == TAIL_) || (m_flit->get_type() == HEAD_TAIL_))
+ {
+ m_in_vc_state[inport][invc]->setState(IDLE_, g_eventQueue_ptr->getTime() + 1);
+ m_in_link[inport]->release_vc_link(invc, g_eventQueue_ptr->getTime() + 1);
+ }
+}
+
+void Router::wakeup()
+{
+ flit *t_flit;
+
+ int incoming_port = m_round_robin_start; // This is for round-robin scheduling of incoming ports
+ m_round_robin_start++;
+ if (m_round_robin_start >= m_in_link.size()) {
+ m_round_robin_start = 0;
+ }
+
+ for(int port = 0; port < m_in_link.size(); port++)
+ {
+ // Round robin scheduling
+ incoming_port++;
+ if(incoming_port >= m_in_link.size())
+ incoming_port = 0;
+ if(m_in_link[incoming_port]->isReady()) // checking the incoming link
+ {
+ DEBUG_EXPR(NETWORK_COMP, HighPrio, m_id);
+ DEBUG_EXPR(NETWORK_COMP, HighPrio, g_eventQueue_ptr->getTime());
+ t_flit = m_in_link[incoming_port]->peekLink();
+ routeCompute(t_flit, incoming_port);
+ m_in_link[incoming_port]->consumeLink();
+ }
+ }
+ scheduleOutputLinks();
+ checkReschedule(); // This is for flits lying in the router buffers
+ vc_arbitrate();
+ check_arbiter_reschedule();
+}
+
+void Router::scheduleOutputLinks()
+{
+ for(int port = 0; port < m_out_link.size(); port++)
+ {
+ int vc_tolookat = m_vc_round_robin[port];
+ m_vc_round_robin[port]++;
+ if(m_vc_round_robin[port] == m_num_vcs)
+ m_vc_round_robin[port] = 0;
+
+ for(int i = 0; i < m_num_vcs; i++)
+ {
+ vc_tolookat++;
+ if(vc_tolookat == m_num_vcs)
+ vc_tolookat = 0;
+
+ if(m_router_buffers[port][vc_tolookat]->isReady())
+ {
+ if(m_out_vc_state[port][vc_tolookat]->isInState(ACTIVE_, g_eventQueue_ptr->getTime()) && m_out_link[port]->isBufferNotFull_link(vc_tolookat))
+ // models buffer backpressure
+ {
+ flit *t_flit = m_router_buffers[port][vc_tolookat]->getTopFlit();
+ t_flit->set_time(g_eventQueue_ptr->getTime() + 1 );
+ m_out_src_queue[port]->insert(t_flit);
+ g_eventQueue_ptr->scheduleEvent(m_out_link[port], 1);
+ break; // done for this port
+ }
+ }
+ }
+ }
+}
+
+void Router::checkReschedule()
+{
+ for(int port = 0; port < m_out_link.size(); port++)
+ {
+ for(int vc = 0; vc < m_num_vcs; vc++)
+ {
+ if(m_router_buffers[port][vc]->isReadyForNext())
+ {
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ return;
+ }
+ }
+ }
+}
+
+void Router::check_arbiter_reschedule()
+{
+ for(int port = 0; port < m_in_link.size(); port++)
+ {
+ for(int vc = 0; vc < m_num_vcs; vc++)
+ {
+ if(m_in_vc_state[port][vc]->isInState(VC_AB_, g_eventQueue_ptr->getTime() + 1))
+ {
+ g_eventQueue_ptr->scheduleEvent(m_vc_arbiter, 1);
+ return;
+ }
+ }
+ }
+}
+
+void Router::printConfig(ostream& out) const
+{
+ out << "[Router " << m_id << "] :: " << endl;
+ out << "[inLink - ";
+ for(int i = 0;i < m_in_link.size(); i++)
+ out << m_in_link[i]->get_id() << " - ";
+ out << "]" << endl;
+ out << "[outLink - ";
+ for(int i = 0;i < m_out_link.size(); i++)
+ out << m_out_link[i]->get_id() << " - ";
+ out << "]" << endl;
+/* out << "---------- routing table -------------" << endl;
+ for(int i =0; i < m_routing_table.size(); i++)
+ out << m_routing_table[i] << endl;
+*/
+}
+
+void Router::print(ostream& out) const
+{
+ out << "[Router]";
+}
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/Router.hh b/src/mem/ruby/network/garnet-flexible-pipeline/Router.hh
new file mode 100644
index 000000000..c0d91e0dd
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/Router.hh
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Router.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef ROUTER_H
+#define ROUTER_H
+
+#include "NetworkHeader.hh"
+#include "GarnetNetwork.hh"
+#include "FlexibleConsumer.hh"
+#include "PrioHeap.hh"
+#include "NetworkLink.hh"
+#include "NetDest.hh"
+#include "flitBuffer.hh"
+#include "InVcState.hh"
+#include "OutVcState.hh"
+
+class VCarbiter;
+
+class Router : public FlexibleConsumer {
+public:
+ Router(int id, GarnetNetwork *network_ptr);
+
+ ~Router();
+
+ void addInPort(NetworkLink *in_link);
+ void addOutPort(NetworkLink *out_link, const NetDest& routing_table_entry, int link_weight);
+ void wakeup();
+ void request_vc(int in_vc, int in_port, NetDest destination, Time request_time);
+ bool isBufferNotFull(int vc, int inport);
+ void grant_vc(int out_port, int vc, Time grant_time);
+ void release_vc(int out_port, int vc, Time release_time);
+ void vc_arbitrate();
+
+ void printConfig(ostream& out) const;
+ void print(ostream& out) const;
+
+private:
+/***************Data Members******************/
+ int m_id;
+ int m_virtual_networks, m_num_vcs, m_vc_per_vnet;
+ GarnetNetwork *m_net_ptr;
+ Vector<int > m_vc_round_robin; // For scheduling of out source queues
+ int m_round_robin_inport, m_round_robin_start; // for vc arbitration
+ Vector<int > m_round_robin_invc; // For every outport. for vc arbitration
+
+ Vector<Vector<flitBuffer *> > m_router_buffers; // These are essentially output buffers
+ Vector<flitBuffer *> m_out_src_queue; // These are source queues for the output link
+ Vector<NetworkLink *> m_in_link;
+ Vector<NetworkLink *> m_out_link;
+ Vector<Vector<InVcState * > > m_in_vc_state;
+ Vector<Vector<OutVcState * > > m_out_vc_state;
+ Vector<NetDest> m_routing_table;
+ Vector<int > m_link_weights;
+ VCarbiter *m_vc_arbiter;
+
+/*********** Private methods *************/
+ int getRoute(NetDest destination);
+ Vector<int > get_valid_vcs(int invc);
+ void routeCompute(flit *m_flit, int inport);
+ void checkReschedule();
+ void check_arbiter_reschedule();
+ void scheduleOutputLinks();
+};
+
+#endif
+
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/VCarbiter.cc b/src/mem/ruby/network/garnet-flexible-pipeline/VCarbiter.cc
new file mode 100644
index 000000000..7ebd83de3
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/VCarbiter.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * VCarbiter.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "VCarbiter.hh"
+#include "Router.hh"
+
+VCarbiter::VCarbiter(Router *router)
+{
+ m_router = router;
+}
+
+void VCarbiter::wakeup()
+{
+ m_router->vc_arbitrate();
+}
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/VCarbiter.hh b/src/mem/ruby/network/garnet-flexible-pipeline/VCarbiter.hh
new file mode 100644
index 000000000..10368f2b4
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/VCarbiter.hh
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * VCarbiter.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef VC_ARBITER_H
+#define VC_ARBITER_H
+
+#include "NetworkHeader.hh"
+#include "Consumer.hh"
+
+class Router;
+
+class VCarbiter : public Consumer{
+public:
+ VCarbiter(Router *router);
+ ~VCarbiter() {}
+
+ void print(ostream& out) const {}
+ void wakeup();
+
+private:
+ Router *m_router;
+};
+
+#endif
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/flit.cc b/src/mem/ruby/network/garnet-flexible-pipeline/flit.cc
new file mode 100644
index 000000000..f3cba2035
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/flit.cc
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * flit.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "flit.hh"
+
+flit::flit(int id, int vc, int vnet, int size, MsgPtr msg_ptr)
+{
+ m_size = size;
+ m_msg_ptr = msg_ptr;
+ m_enqueue_time = g_eventQueue_ptr->getTime();
+ m_time = g_eventQueue_ptr->getTime();
+ m_id = id;
+ m_vnet = vnet;
+ m_vc = vc;
+
+ if(size == 1)
+ {
+ m_type = HEAD_TAIL_;
+ return;
+ }
+ if(id == 0)
+ m_type = HEAD_;
+ else if(id == (size - 1))
+ m_type = TAIL_;
+ else
+ m_type = BODY_;
+}
+
+int flit::get_size()
+{
+ return m_size;
+}
+int flit::get_id()
+{
+ return m_id;
+}
+Time flit::get_time()
+{
+ return m_time;
+}
+
+Time flit::get_enqueue_time()
+{
+ return m_enqueue_time;
+}
+void flit::set_time(Time time)
+{
+ m_time = time;
+}
+
+int flit::get_vnet()
+{
+ return m_vnet;
+}
+int flit::get_vc()
+{
+ return m_vc;
+}
+void flit::set_vc(int vc)
+{
+ m_vc = vc;
+}
+MsgPtr& flit::get_msg_ptr()
+{
+ return m_msg_ptr;
+}
+flit_type flit::get_type()
+{
+ return m_type;
+}
+
+void flit::print(ostream& out) const
+{
+ out << "[flit:: ";
+ out << "Id=" << m_id << " ";
+ out << "Type=" << m_type << " ";
+ out << "Vnet=" << m_vnet << " ";
+ out << "VC=" << m_vc << " ";
+ out << "Enqueue Time=" << m_enqueue_time << " ";
+ out << "]";
+}
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/flit.hh b/src/mem/ruby/network/garnet-flexible-pipeline/flit.hh
new file mode 100644
index 000000000..fc8042cfc
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/flit.hh
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * flit.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "NetworkHeader.hh"
+#include "Message.hh"
+
+#ifndef FLIT_H
+#define FLIT_H
+
+class flit {
+public:
+ flit(int id, int vc, int vnet, int size, MsgPtr msg_ptr);
+
+ int get_size();
+ int get_id();
+ Time get_time();
+ Time get_enqueue_time();
+ void set_time(Time time);
+ int get_vnet();
+ int get_vc();
+ void set_vc(int vc);
+ MsgPtr& get_msg_ptr();
+ flit_type get_type();
+ void print(ostream&out) const;
+
+private:
+/************Data Members*************/
+ int m_id;
+ int m_vnet;
+ int m_vc;
+ int m_size;
+ Time m_enqueue_time, m_time;
+ flit_type m_type;
+ MsgPtr m_msg_ptr;
+
+};
+
+inline extern bool node_less_then_eq(flit* n1, flit* n2);
+
+inline extern
+bool node_less_then_eq(flit* n1, flit* n2)
+{
+ if (n1->get_time() == n2->get_time()) {
+// ASSERT(n1->flit_id != n2->flit_id);
+ return (n1->get_id() <= n2->get_id());
+ } else {
+ return (n1->get_time() <= n2->get_time());
+ }
+}
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const flit& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const flit& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/flitBuffer.cc b/src/mem/ruby/network/garnet-flexible-pipeline/flitBuffer.cc
new file mode 100644
index 000000000..e0fb26e0a
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/flitBuffer.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * flitBuffer.C
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#include "flitBuffer.hh"
+
+flitBuffer::flitBuffer()
+{
+ max_size = INFINITE_;
+}
+
+flitBuffer::flitBuffer(int maximum_size)
+{
+ max_size = maximum_size;
+}
+
+bool flitBuffer::isEmpty()
+{
+ return (m_buffer.size() == 0);
+}
+
+bool flitBuffer::isReady()
+{
+ if(m_buffer.size() != 0 )
+ {
+ flit *t_flit = m_buffer.peekMin();
+ if(t_flit->get_time() <= g_eventQueue_ptr->getTime())
+ return true;
+ }
+ return false;
+}
+
+bool flitBuffer::isReadyForNext()
+{
+ if(m_buffer.size() != 0 )
+ {
+ flit *t_flit = m_buffer.peekMin();
+ if(t_flit->get_time() <= (g_eventQueue_ptr->getTime() + 1))
+ return true;
+ }
+ return false;
+}
+
+bool flitBuffer::isFull()
+{
+ return (m_buffer.size() >= max_size);
+}
+
+void flitBuffer::setMaxSize(int maximum)
+{
+ max_size = maximum;
+}
+
+flit* flitBuffer:: getTopFlit()
+{
+ return m_buffer.extractMin();
+}
+
+flit* flitBuffer::peekTopFlit()
+{
+ return m_buffer.peekMin();
+}
+
+void flitBuffer::insert(flit *flt)
+{
+ m_buffer.insert(flt);
+}
+
+void flitBuffer::print(ostream& out) const
+{
+ out << "[flitBuffer: ";
+ out << m_buffer.size() << "] " << endl;
+}
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/flitBuffer.hh b/src/mem/ruby/network/garnet-flexible-pipeline/flitBuffer.hh
new file mode 100644
index 000000000..1eb122a51
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/flitBuffer.hh
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * flitBuffer.h
+ *
+ * Niket Agarwal, Princeton University
+ *
+ * */
+
+#ifndef FLIT_BUFFER_H
+#define FLIT_BUFFER_H
+
+#include "NetworkHeader.hh"
+#include "PrioHeap.hh"
+#include "flit.hh"
+
+class flitBuffer {
+public:
+ flitBuffer();
+ flitBuffer(int maximum_size);
+
+ bool isReady();
+ bool isReadyForNext();
+ bool isFull();
+ bool isEmpty();
+ void setMaxSize(int maximum);
+ flit *getTopFlit();
+ flit *peekTopFlit();
+ void insert(flit *flt);
+ void print(ostream& out) const;
+
+/**********Data Members*********/
+private:
+ PrioHeap <flit *> m_buffer;
+ int size, max_size;
+};
+
+ostream& operator<<(ostream& out, const flitBuffer& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const flitBuffer& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif
+
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/netconfig.defaults b/src/mem/ruby/network/garnet-flexible-pipeline/netconfig.defaults
new file mode 100644
index 000000000..e60f1921d
--- /dev/null
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/netconfig.defaults
@@ -0,0 +1,8 @@
+// Note: You should set Garnet's parameters in config/rubyconfig.defaults and not here
+g_GARNET_NETWORK:true
+g_DETAIL_NETWORK:true
+g_NETWORK_TESTING:false
+g_FLIT_SIZE:16
+g_NUM_PIPE_STAGES:4
+g_VCS_PER_CLASS:4
+g_BUFFER_SIZE:4
diff --git a/src/mem/ruby/network/orion/NetworkPower.cc b/src/mem/ruby/network/orion/NetworkPower.cc
new file mode 100644
index 000000000..6e5994071
--- /dev/null
+++ b/src/mem/ruby/network/orion/NetworkPower.cc
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <math.h>
+
+#include "power_router_init.hh"
+#include "power_array.hh"
+#include "power_crossbar.hh"
+#include "power_arbiter.hh"
+#include "power_bus.hh"
+#include "NetworkPower.hh"
+#include "Router_d.hh"
+#include "NetworkLink_d.hh"
+#include "GarnetNetwork_d.hh"
+#include "SIM_port.hh"
+#include "parm_technology.hh"
+
+/* --------- Static energy calculation functions ------------ */
+
+//Input buffer
+double SIM_reg_stat_energy(power_array_info *info, power_array *arr, double n_read, double n_write)
+{
+ double Eavg = 0, Eatomic, Estruct, Estatic;
+
+
+ /* decoder */
+ if (info->row_dec_model) {
+ //row decoder
+ Estruct = 0;
+ /* assume switch probability 0.5 for address bits */
+ //input
+ Eatomic = arr->row_dec.e_chg_addr * arr->row_dec.n_bits * SWITCHING_FACTOR * (n_read + n_write);
+ Estruct += Eatomic;
+
+ //output
+ Eatomic = arr->row_dec.e_chg_output * (n_read + n_write);
+ Estruct += Eatomic;
+
+ /* assume all 1st-level decoders change output */
+ //internal node
+ Eatomic = arr->row_dec.e_chg_l1 * arr->row_dec.n_in_2nd * (n_read + n_write);
+ Estruct += Eatomic;
+
+ Eavg += Estruct;
+ }
+
+ /* wordline */
+ Estruct = 0;
+ //read
+ Eatomic = arr->data_wordline.e_read * n_read;
+ Estruct += Eatomic;
+ //write
+ Eatomic = arr->data_wordline.e_write * n_write;
+ Estruct += Eatomic;
+
+ Eavg += Estruct;
+
+ /* bitlines */
+ Estruct = 0;
+ //read
+ if (arr->data_bitline.end == 2) {
+ Eatomic = arr->data_bitline.e_col_read * info->eff_data_cols * n_read;
+ }
+ else {
+ /* assume switch probability 0.5 for single-ended bitlines */
+ Eatomic = arr->data_bitline.e_col_read * info->eff_data_cols * SWITCHING_FACTOR * n_read;
+ }
+
+ Estruct += Eatomic;
+ //write
+ /* assume switch probability 0.5 for write bitlines */
+ Eatomic = arr->data_bitline.e_col_write * info->data_width * SWITCHING_FACTOR * n_write;
+ Estruct += Eatomic;
+ //precharge
+ Eatomic = arr->data_bitline_pre.e_charge * info->eff_data_cols * n_read;
+ Estruct += Eatomic;
+
+ Eavg += Estruct;
+
+ /* memory cells */
+ Estruct = 0;
+
+ /* assume switch probability 0.5 for memory cells */
+ Eatomic = arr->data_mem.e_switch * info->data_width * SWITCHING_FACTOR * n_write;
+ Estruct += Eatomic;
+
+ Eavg += Estruct;
+
+ /* sense amplifier */
+ if (info->data_end == 2) {
+ Estruct = 0;
+
+ Eatomic = arr->data_amp.e_access * info->eff_data_cols * n_read;
+ Estruct += Eatomic;
+
+ Eavg += Estruct;
+ }
+
+ /* output driver */
+ if (info->outdrv_model) {
+ Estruct = 0;
+ //enable
+ Eatomic = arr->outdrv.e_select * n_read;
+ Estruct += Eatomic;
+ //data
+ /* same switch probability as bitlines */
+ Eatomic = arr->outdrv.e_chg_data * arr->outdrv.item_width * SWITCHING_FACTOR * info->n_item * info->assoc * n_read;
+ Estruct += Eatomic;
+ //output 1
+ /* assume 1 and 0 are uniformly distributed */
+ if (arr->outdrv.e_out_1 >= arr->outdrv.e_out_0 ) {
+ Eatomic = arr->outdrv.e_out_1 * arr->outdrv.item_width * SWITCHING_FACTOR * n_read;
+ Estruct += Eatomic;
+ }
+ //output 0
+ if (arr->outdrv.e_out_1 < arr->outdrv.e_out_0) {
+ Eatomic = arr->outdrv.e_out_0 * arr->outdrv.item_width * SWITCHING_FACTOR * n_read;
+ Estruct += Eatomic;
+ }
+
+ Eavg += Estruct;
+ }
+
+ /* static power */
+ Estatic = arr->i_leakage * Vdd * Period * SCALE_S;
+
+ //static energy
+ Eavg += Estatic;
+
+ return Eavg;
+}
+
+//crossbar
+double SIM_crossbar_stat_energy(power_crossbar *crsbar, double n_data)
+{
+ double Eavg = 0, Eatomic, Estatic;
+
+ if (n_data > crsbar->n_out) {
+ n_data = crsbar->n_out;
+ }
+
+
+ switch (crsbar->model) {
+ case MATRIX_CROSSBAR:
+ case CUT_THRU_CROSSBAR:
+ case MULTREE_CROSSBAR:
+ /* assume 0.5 data switch probability */
+ //input
+ Eatomic = crsbar->e_chg_in * crsbar->data_width * SWITCHING_FACTOR * n_data;
+ Eavg += Eatomic;
+
+ //output
+ Eatomic = crsbar->e_chg_out * crsbar->data_width * SWITCHING_FACTOR * n_data;
+ Eavg += Eatomic;
+
+ //control
+ Eatomic = crsbar->e_chg_ctr * n_data;
+ Eavg += Eatomic;
+
+ if (crsbar->model == MULTREE_CROSSBAR && crsbar->depth > 1) {
+ //internal node
+ Eatomic = crsbar->e_chg_int * crsbar->data_width * (crsbar->depth - 1) * SWITCHING_FACTOR * n_data;
+ Eavg += Eatomic;
+ }
+ break;
+
+ default: break;/* some error handler */
+ }
+
+ return Eavg;
+}
+
+//arbiter
+/* stat over one cycle */
+/* info is only used by queuing arbiter */
+double SIM_arbiter_stat_energy(power_arbiter *arb, power_array_info *info, double n_req)
+{
+ double Eavg = 0, Estruct, Eatomic;
+ double total_pri, n_chg_pri, n_grant;
+
+ /* energy cycle distribution */
+ if (n_req > arb->req_width) {
+ n_req = arb->req_width;
+ }
+ if (n_req >= 1) n_grant = 1;
+ else n_grant = 1.0 / ceil(1.0 / n_req);
+
+ switch (arb->model) {
+ case RR_ARBITER:
+ /* FIXME: we may overestimate request switch */
+ //request
+ Eatomic = arb->e_chg_req * n_req;
+ Eavg += Eatomic;
+
+ //grant
+ Eatomic = arb->e_chg_grant * n_grant;
+ Eavg += Eatomic;
+
+ /* assume carry signal propagates half length in average case */
+ /* carry does not propagate in maximum case, i.e. all carrys go down */
+ //carry
+ Eatomic = arb->e_chg_carry * arb->req_width * SWITCHING_FACTOR * n_grant;
+ Eavg += Eatomic;
+
+ //internal carry
+ Eatomic = arb->e_chg_carry_in * (arb->req_width * SWITCHING_FACTOR - 1) * n_grant;
+ Eavg += Eatomic;
+
+ /* priority registers */
+ Estruct = 0;
+ //priority
+
+ //switch
+ Eatomic = arb->pri_ff.e_switch * 2 * n_grant;
+ Estruct += Eatomic;
+
+ //keep 0
+ Eatomic = arb->pri_ff.e_keep_0 * (arb->req_width - 2 * n_grant);
+ Estruct += Eatomic;
+
+ //clock
+ Eatomic = arb->pri_ff.e_clock * arb->req_width;
+ Estruct += Eatomic;
+
+ Eavg += Estruct;
+ break;
+
+ case MATRIX_ARBITER:
+ total_pri = arb->req_width * (arb->req_width - 1) * 0.5;
+ /* assume switch probability 0.5 for priorities */
+ n_chg_pri = (arb->req_width - 1) * SWITCHING_FACTOR;
+
+ /* FIXME: we may overestimate request switch */
+ //request
+ Eatomic = arb->e_chg_req * n_req;
+ Eavg += Eatomic;
+
+ //grant
+ Eatomic = arb->e_chg_grant * n_grant;
+ Eavg += Eatomic;
+
+ /* priority registers */
+ Estruct = 0;
+ //priority
+
+ //switch
+ Eatomic = arb->pri_ff.e_switch * n_chg_pri * n_grant;
+ Estruct += Eatomic;
+
+ /* assume 1 and 0 are uniformly distributed */
+ //keep 0
+ if (arb->pri_ff.e_keep_0 >= arb->pri_ff.e_keep_1) {
+ Eatomic = arb->pri_ff.e_keep_0 * (total_pri - n_chg_pri * n_grant) * SWITCHING_FACTOR;
+ Estruct += Eatomic;
+ }
+
+ //keep 1
+ if (arb->pri_ff.e_keep_0 < arb->pri_ff.e_keep_1) {
+ Eatomic = arb->pri_ff.e_keep_1 * (total_pri - n_chg_pri * n_grant) * SWITCHING_FACTOR;
+ Estruct += Eatomic;
+ }
+
+ //clock
+ Eatomic = arb->pri_ff.e_clock * total_pri;
+ Estruct += Eatomic;
+
+ Eavg += Estruct;
+
+ /* based on above assumptions */
+ //internal node
+ /* p(n-1)/2 + (n-1)/2 */
+ Eatomic = arb->e_chg_mint * (n_req + 1) * (arb->req_width - 1) * 0.5;
+ Eavg += Eatomic;
+ break;
+
+ case QUEUE_ARBITER:
+ /* FIXME: what if n_req > 1? */
+ Eavg = SIM_reg_stat_energy(info, &arb->queue, n_req, n_grant);
+ break;
+
+ default: break;/* some error handler */
+ }
+
+
+ return Eavg;
+}
+
+double SIM_bus_stat_energy(power_bus *bus, double e_link)
+{
+ double Ebus;
+ Ebus = bus->e_switch * e_link * SWITCHING_FACTOR * bus->bit_width;
+
+ return (Ebus);
+}
+
+double Router_d::calculate_offline_power(power_router *router, power_router_info *info)
+{
+ double Eavg = 0;
+ double P_in_buf, P_xbar, P_vc_in_arb, P_vc_out_arb, P_sw_in_arb, P_sw_out_arb, P_leakage, P_total;
+
+ double E_in_buf, E_xbar, E_vc_in_arb, E_vc_out_arb, E_sw_in_arb, E_sw_out_arb, E_leakage;
+ double e_in_buf_read, e_in_buf_write, e_crossbar, e_vc_local_arb, e_vc_global_arb, e_sw_local_arb, e_sw_global_arb;
+ double sim_cycles;
+
+ sim_cycles = g_eventQueue_ptr->getTime() - m_network_ptr->getRubyStartTime();
+
+ calculate_performance_numbers();
+ //counts obtained from perf. simulator
+ e_in_buf_read = (double )(buf_read_count/sim_cycles);
+ e_in_buf_write = (double )(buf_write_count/sim_cycles);
+ e_crossbar = (double )(crossbar_count/sim_cycles);
+ e_vc_local_arb = (double)(vc_local_arbit_count/sim_cycles);
+ e_vc_global_arb = (double)(vc_global_arbit_count/sim_cycles);
+ e_sw_local_arb = (double )(sw_local_arbit_count/sim_cycles);
+ e_sw_global_arb = (double )(sw_global_arbit_count/sim_cycles);
+ // e_link = (double )(link_traversal_count/sim_cycles);
+
+ /* input buffers */
+ if (info->in_buf)
+ E_in_buf = SIM_reg_stat_energy(&info->in_buf_info, &router->in_buf, e_in_buf_read, e_in_buf_write);
+ P_in_buf = E_in_buf * PARM_Freq;
+ Eavg += E_in_buf;
+
+ /* main crossbar */
+ if (info->crossbar_model)
+ E_xbar= SIM_crossbar_stat_energy(&router->crossbar, e_crossbar);
+ P_xbar = E_xbar * PARM_Freq;
+ Eavg += E_xbar;
+
+ /* vc input (local) arbiter */
+ if (info->vc_in_arb_model)
+ E_vc_in_arb = SIM_arbiter_stat_energy(&router->vc_in_arb, &info->vc_in_arb_queue_info, e_sw_local_arb);
+ P_vc_in_arb = E_vc_in_arb * PARM_Freq;
+ Eavg += E_vc_in_arb;
+
+ /* vc output (global) arbiter */
+ if (info->vc_out_arb_model)
+ E_vc_out_arb = SIM_arbiter_stat_energy(&router->vc_out_arb, &info->vc_out_arb_queue_info, e_sw_global_arb);
+ P_vc_out_arb = E_vc_out_arb * PARM_Freq;
+ Eavg += E_vc_out_arb;
+
+ /* sw input (local) arbiter */
+ if (info->sw_in_arb_model)
+ E_sw_in_arb = SIM_arbiter_stat_energy(&router->sw_in_arb, &info->sw_in_arb_queue_info, e_sw_local_arb);
+ P_sw_in_arb = E_sw_in_arb * PARM_Freq;
+ Eavg += E_sw_in_arb;
+
+ /* sw output (global) arbiter */
+ if (info->sw_out_arb_model)
+ E_sw_out_arb = SIM_arbiter_stat_energy(&router->sw_out_arb, &info->sw_out_arb_queue_info, e_sw_global_arb);
+ P_sw_out_arb = E_sw_out_arb * PARM_Freq;
+ Eavg += E_sw_out_arb;
+
+ /* static power */
+ E_leakage = router->i_leakage * Vdd * Period * SCALE_S;
+ P_leakage = E_leakage * PARM_Freq;
+ Eavg += E_leakage;
+
+ P_total = Eavg * PARM_Freq;
+
+ return Eavg;
+}
+
+double NetworkLink_d::calculate_offline_power(power_bus* bus)
+{
+ double sim_cycles = (double) (g_eventQueue_ptr->getTime() - m_net_ptr->getRubyStartTime());
+ double e_link = (double) (m_link_utilized)/ sim_cycles;
+ double E_link = SIM_bus_stat_energy(bus, e_link);
+ double P_link = E_link * PARM_Freq;
+ return P_link;
+}
+
+double NetworkLink_d::calculate_power()
+{
+ power_bus bus;
+ power_bus_init(&bus, GENERIC_BUS, IDENT_ENC, PARM_flit_width, 0, 1, 1, PARM_link_length, 0);
+ double total_power = calculate_offline_power(&bus);
+ return total_power;
+}
+
+void Router_d::power_router_initialize(power_router *router, power_router_info *info)
+{
+ info->n_in = m_input_unit.size();
+ info->n_out = m_output_unit.size();
+ info->flit_width = PARM_flit_width;
+
+ info->n_v_channel = m_num_vcs;
+ info->n_v_class = m_virtual_networks;
+
+}
+
+double Router_d::calculate_power()
+{
+ power_router router;
+ power_router_info router_info;
+ double total_energy, total_power;
+
+ power_router_initialize(&router, &router_info);
+ power_router_init(&router, &router_info);
+
+ total_energy = calculate_offline_power(&router, &router_info);
+ total_power = total_energy * PARM_Freq;
+ return total_power;
+}
diff --git a/src/mem/ruby/network/orion/NetworkPower.hh b/src/mem/ruby/network/orion/NetworkPower.hh
new file mode 100644
index 000000000..560d58376
--- /dev/null
+++ b/src/mem/ruby/network/orion/NetworkPower.hh
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NETWORK_POWER_H
+#define _NETWORK_POWER_H
+
+#endif
diff --git a/src/mem/ruby/network/orion/SIM_port.hh b/src/mem/ruby/network/orion/SIM_port.hh
new file mode 100644
index 000000000..96d26daad
--- /dev/null
+++ b/src/mem/ruby/network/orion/SIM_port.hh
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SIM_PORT_H
+#define _SIM_PORT_H
+
+#define PARM_POWER_STATS 1
+
+/* RF module parameters */
+#define PARM_read_port 1
+#define PARM_write_port 1
+#define PARM_n_regs 64
+#define PARM_reg_width 32
+
+#define PARM_ndwl 1
+#define PARM_ndbl 1
+#define PARM_nspd 1
+
+//Niket
+
+#define PARM_vc_in_arb_model QUEUE_ARBITER
+#define PARM_vc_out_arb_model QUEUE_ARBITER
+#define PARM_vc_in_arb_ff_model NEG_DFF
+#define PARM_vc_out_arb_ff_model NEG_DFF
+#define PARM_sw_in_arb_model QUEUE_ARBITER
+#define PARM_sw_out_arb_model QUEUE_ARBITER
+#define PARM_sw_in_arb_ff_model NEG_DFF
+#define PARM_sw_out_arb_ff_model NEG_DFF
+#define PARM_VC_per_MC 4
+
+//Niket
+
+//#define PARM_wordline_model CACHE_RW_WORDLINE
+//#define PARM_bitline_model RW_BITLINE
+//#define PARM_mem_model NORMAL_MEM
+//#define PARM_row_dec_model SIM_NO_MODEL
+//#define PARM_row_dec_pre_model SIM_NO_MODEL
+//#define PARM_col_dec_model SIM_NO_MODEL
+//#define PARM_col_dec_pre_model SIM_NO_MODEL
+//#define PARM_mux_model SIM_NO_MODEL
+//#define PARM_outdrv_model SIM_NO_MODEL
+
+/* these 3 should be changed together */
+//#define PARM_data_end 2
+//#define PARM_amp_model GENERIC_AMP
+//#define PARM_bitline_pre_model EQU_BITLINE
+//#define PARM_data_end 1
+//#define PARM_amp_model SIM_NO_MODEL
+//#define PARM_bitline_pre_model SINGLE_OTHER
+
+
+/* router module parameters */
+/* general parameters */
+#define PARM_in_port 9
+#define PARM_cache_in_port 0 /* # of cache input ports */
+#define PARM_mc_in_port 0 /* # of memory controller input ports */
+#define PARM_io_in_port 0 /* # of I/O device input ports */
+#define PARM_out_port 9
+#define PARM_cache_out_port 0 /* # of cache output ports */
+#define PARM_mc_out_port 0 /* # of memory controller output ports */
+#define PARM_io_out_port 0 /* # of I/O device output ports */
+#define PARM_flit_width 128 /* flit width in bits */
+
+/* virtual channel parameters */
+#define PARM_v_channel 1 /* # of network port virtual channels */
+#define PARM_v_class 0 /* # of total virtual classes */
+#define PARM_cache_class 0 /* # of cache port virtual classes */
+#define PARM_mc_class 0 /* # of memory controller port virtual classes */
+#define PARM_io_class 0 /* # of I/O device port virtual classes */
+/* ?? */
+#define PARM_in_share_buf 0 /* do input virtual channels physically share buffers? */
+#define PARM_out_share_buf 0 /* do output virtual channels physically share buffers? */
+/* ?? */
+#define PARM_in_share_switch 1 /* do input virtual channels share crossbar input ports? */
+#define PARM_out_share_switch 1 /* do output virtual channels share crossbar output ports? */
+
+/* crossbar parameters */
+#define PARM_crossbar_model MATRIX_CROSSBAR /* crossbar model type */
+#define PARM_crsbar_degree 4 /* crossbar mux degree */
+#define PARM_connect_type TRISTATE_GATE /* crossbar connector type */
+#define PARM_trans_type NP_GATE /* crossbar transmission gate type */
+#define PARM_crossbar_in_len 0 /* crossbar input line length, if known */
+#define PARM_crossbar_out_len 0 /* crossbar output line length, if known */
+#define PARM_xb_in_seg 0
+#define PARM_xb_out_seg 0
+/* HACK HACK HACK */
+#define PARM_exp_xb_model MATRIX_CROSSBAR
+#define PARM_exp_in_seg 2
+#define PARM_exp_out_seg 2
+
+/* input buffer parameters */
+#define PARM_in_buf 1 /* have input buffer? */
+#define PARM_in_buf_set 4
+#define PARM_in_buf_rport 1 /* # of read ports */
+
+#define PARM_cache_in_buf 0
+#define PARM_cache_in_buf_set 0
+#define PARM_cache_in_buf_rport 0
+
+#define PARM_mc_in_buf 0
+#define PARM_mc_in_buf_set 0
+#define PARM_mc_in_buf_rport 0
+
+#define PARM_io_in_buf 0
+#define PARM_io_in_buf_set 0
+#define PARM_io_in_buf_rport 0
+
+/* output buffer parameters */
+#define PARM_out_buf 0
+#define PARM_out_buf_set 4
+#define PARM_out_buf_wport 1
+
+/* central buffer parameters */
+#define PARM_central_buf 0 /* have central buffer? */
+#define PARM_cbuf_set 2560 /* # of rows */
+#define PARM_cbuf_rport 2 /* # of read ports */
+#define PARM_cbuf_wport 2 /* # of write ports */
+#define PARM_cbuf_width 4 /* # of flits in one row */
+#define PARM_pipe_depth 4 /* # of banks */
+
+/* array parameters shared by various buffers */
+#define PARM_wordline_model CACHE_RW_WORDLINE
+#define PARM_bitline_model RW_BITLINE
+#define PARM_mem_model NORMAL_MEM
+#define PARM_row_dec_model GENERIC_DEC
+#define PARM_row_dec_pre_model SINGLE_OTHER
+#define PARM_col_dec_model SIM_NO_MODEL
+#define PARM_col_dec_pre_model SIM_NO_MODEL
+#define PARM_mux_model SIM_NO_MODEL
+#define PARM_outdrv_model REG_OUTDRV
+
+/* these 3 should be changed together */
+/* use double-ended bitline because the array is too large */
+#define PARM_data_end 2
+#define PARM_amp_model GENERIC_AMP
+#define PARM_bitline_pre_model EQU_BITLINE
+//#define PARM_data_end 1
+//#define PARM_amp_model SIM_NO_MODEL
+//#define PARM_bitline_pre_model SINGLE_OTHER
+
+/* arbiter parameters */
+#define PARM_in_arb_model MATRIX_ARBITER /* input side arbiter model type */
+#define PARM_in_arb_ff_model NEG_DFF /* input side arbiter flip-flop model type */
+#define PARM_out_arb_model MATRIX_ARBITER /* output side arbiter model type */
+#define PARM_out_arb_ff_model NEG_DFF /* output side arbiter flip-flop model type */
+
+#endif /* _SIM_PORT_H */
diff --git a/src/mem/ruby/network/orion/SIM_power.hh b/src/mem/ruby/network/orion/SIM_power.hh
new file mode 100644
index 000000000..1f0ddd36c
--- /dev/null
+++ b/src/mem/ruby/network/orion/SIM_power.hh
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SIM_POWER_H
+#define _SIM_POWER_H
+
+#include <sys/types.h>
+#include "SIM_power_test.hh"
+
+#define SIM_NO_MODEL 0
+
+#define MAX_ENERGY 1
+#define AVG_ENERGY 0
+#ifndef MAX
+#define MAX(a,b) (((a)>(b))?(a):(b))
+#endif
+#ifndef MIN
+#define MIN(a,b) (((a)>(b))?(b):(a))
+#endif
+
+#define NEXT_DEPTH(d) ((d) > 0 ? (d) - 1 : (d))
+
+#define BIGNUM 1e30
+#define BIGONE ((LIB_Type_max_uint)1)
+#define BIGNONE ((LIB_Type_max_uint)-1)
+#define HAMM_MASK(w) ((unsigned int)w < (sizeof(LIB_Type_max_uint) << 3) ? (BIGONE << w) - 1 : BIGNONE)
+
+/* Used to communicate with the horowitz model */
+#define RISE 1
+#define FALL 0
+#define NCH 1
+#define PCH 0
+
+/*
+ * Cache layout parameters and process parameters
+ * Thanks to Glenn Reinman for the technology scaling factors
+ */
+#if ( PARM(TECH_POINT) == 10 )
+#define CSCALE (84.2172) /* wire capacitance scaling factor */
+ /* linear: 51.7172, predicted: 84.2172 */
+#define RSCALE (80.0000) /* wire resistance scaling factor */
+#define LSCALE 0.1250 /* length (feature) scaling factor */
+#define ASCALE (LSCALE*LSCALE) /* area scaling factor */
+#define VSCALE 0.38 /* voltage scaling factor */
+#define VTSCALE 0.49 /* threshold voltage scaling factor */
+#define SSCALE 0.80 /* sense voltage scaling factor */
+/* FIXME: borrowed from 0.11u technology */
+#define MCSCALE 5.2277 /* metal coupling capacitance scaling factor */
+#define MCSCALE2 3 /* metal coupling capacitance scaling factor (2X) */
+#define MCSCALE3 1.5 /* metal coupling capacitance scaling factor (3X) */
+#define GEN_POWER_SCALE (1/PARM(GEN_POWER_FACTOR))
+/* copied from TECH_POINT 10 except LSCALE */
+#elif ( PARM(TECH_POINT) == 11 )
+#define CSCALE (84.2172) /* wire capacitance scaling factor */
+#define RSCALE (80.0000) /* wire resistance scaling factor */
+#define LSCALE 0.1375 /* length (feature) scaling factor */
+#define ASCALE (LSCALE*LSCALE) /* area scaling factor */
+#define VSCALE 0.38 /* voltage scaling factor */
+#define VTSCALE 0.49 /* threshold voltage scaling factor */
+#define SSCALE 0.80 /* sense voltage scaling factor */
+#define MCSCALE 5.2277 /* metal coupling capacitance scaling factor */
+#define MCSCALE2 3 /* metal coupling capacitance scaling factor (2X) */
+#define MCSCALE3 1.5 /* metal coupling capacitance scaling factor (3X) */
+#define GEN_POWER_SCALE (1/PARM(GEN_POWER_FACTOR))
+#elif ( PARM(TECH_POINT) == 18 )
+#define CSCALE (19.7172) /* wire capacitance scaling factor */
+#define RSCALE (20.0000) /* wire resistance scaling factor */
+#define LSCALE 0.2250 /* length (feature) scaling factor */
+#define ASCALE (LSCALE*LSCALE) /* area scaling factor */
+#define VSCALE 0.4 /* voltage scaling factor */
+#define VTSCALE 0.5046 /* threshold voltage scaling factor */
+#define SSCALE 0.85 /* sense voltage scaling factor */
+#define MCSCALE 4.1250 /* metal coupling capacitance scaling factor */
+#define MCSCALE2 2.4444 /* metal coupling capacitance scaling factor (2X) */
+#define MCSCALE3 1.2 /* metal coupling capacitance scaling factor (3X) */
+#define GEN_POWER_SCALE 1
+#elif ( PARM(TECH_POINT) == 25 )
+#define CSCALE (10.2197) /* wire capacitance scaling factor */
+#define RSCALE (10.2571) /* wire resistance scaling factor */
+#define LSCALE 0.3571 /* length (feature) scaling factor */
+#define ASCALE (LSCALE*LSCALE) /* area scaling factor */
+#define VSCALE 0.45 /* voltage scaling factor */
+#define VTSCALE 0.5596 /* threshold voltage scaling factor */
+#define SSCALE 0.90 /* sense voltage scaling factor */
+#define MCSCALE 1.0 /* metal coupling capacitance scaling factor */
+#define MCSCALE2 1.0 /* metal coupling capacitance scaling factor (2X) */
+#define MCSCALE3 1.0 /* metal coupling capacitance scaling factor (3X) */
+#define GEN_POWER_SCALE PARM(GEN_POWER_FACTOR)
+#elif ( PARM(TECH_POINT) == 35 )
+#define CSCALE (5.2197) /* wire capacitance scaling factor */
+#define RSCALE (5.2571) /* wire resistance scaling factor */
+#define LSCALE 0.4375 /* length (feature) scaling factor */
+#define ASCALE (LSCALE*LSCALE) /* area scaling factor */
+#define VSCALE 0.5 /* voltage scaling factor */
+#define VTSCALE 0.6147 /* threshold voltage scaling factor */
+#define SSCALE 0.95 /* sense voltage scaling factor */
+#define MCSCALE 1.0 /* metal coupling capacitance scaling factor */
+#define MCSCALE2 1.0 /* metal coupling capacitance scaling factor (2X) */
+#define MCSCALE3 1.0 /* metal coupling capacitance scaling factor (3X) */
+#define GEN_POWER_SCALE (PARM(GEN_POWER_FACTOR)*PARM(GEN_POWER_FACTOR))
+#elif ( PARM(TECH_POINT) == 40 )
+#define CSCALE 1.0 /* wire capacitance scaling factor */
+#define RSCALE 1.0 /* wire resistance scaling factor */
+#define LSCALE 0.5 /* length (feature) scaling factor */
+#define ASCALE (LSCALE*LSCALE) /* area scaling factor */
+#define VSCALE 1.0 /* voltage scaling factor */
+#define VTSCALE 1.0 /* threshold voltage scaling factor */
+#define SSCALE 1.0 /* sense voltage scaling factor */
+#define MCSCALE 1.0 /* metal coupling capacitance scaling factor */
+#define MCSCALE2 1.0 /* metal coupling capacitance scaling factor (2X) */
+#define MCSCALE3 1.0 /* metal coupling capacitance scaling factor (3X) */
+#define GEN_POWER_SCALE (PARM(GEN_POWER_FACTOR)*PARM(GEN_POWER_FACTOR)*PARM(GEN_POWER_FACTOR))
+#else /* ( PARM(TECH_POINT) == 80 ) */
+#define CSCALE 1.0 /* wire capacitance scaling factor */
+#define RSCALE 1.0 /* wire resistance scaling factor */
+#define LSCALE 1.0 /* length (feature) scaling factor */
+#define ASCALE (LSCALE*LSCALE) /* area scaling factor */
+#define VSCALE 1.0 /* voltage scaling factor */
+#define VTSCALE 1.0 /* threshold voltage scaling factor */
+#define SSCALE 1.0 /* sense voltage scaling factor */
+#define MCSCALE 1.0 /* metal coupling capacitance scaling factor */
+#define MCSCALE2 1.0 /* metal coupling capacitance scaling factor (2X) */
+#define MCSCALE3 1.0 /* metal coupling capacitance scaling factor (3X) */
+#define GEN_POWER_SCALE (PARM(GEN_POWER_FACTOR)*PARM(GEN_POWER_FACTOR)*PARM(GEN_POWER_FACTOR)*PARM(GEN_POWER_FACTOR))
+#endif
+
+#define MSCALE (LSCALE * .624 / .2250)
+
+/*
+ * CMOS 0.8um model parameters
+ * - from Appendix II of Cacti tech report
+ */
+/* corresponds to 8um of m3 @ 225ff/um */
+#define Cwordmetal (1.8e-15 * (CSCALE * ASCALE) * SCALE_M)
+
+/* corresponds to 16um of m2 @ 275ff/um */
+#define Cbitmetal (4.4e-15 * (CSCALE * ASCALE) * SCALE_M)
+
+/* corresponds to 1um of m2 @ 275ff/um */
+#define Cmetal (Cbitmetal/16)
+#define CM2metal (Cbitmetal/16)
+#define CM3metal (Cbitmetal/16)
+
+/* minimal spacing metal cap per unit length */
+#define CCmetal (Cmetal * MCSCALE)
+#define CCM2metal (CM2metal * MCSCALE)
+#define CCM3metal (CM3metal * MCSCALE)
+/* 2x minimal spacing metal cap per unit length */
+#define CC2metal (Cmetal * MCSCALE2)
+#define CC2M2metal (CM2metal * MCSCALE2)
+#define CC2M3metal (CM3metal * MCSCALE2)
+/* 3x minimal spacing metal cap per unit length */
+#define CC3metal (Cmetal * MCSCALE3)
+#define CC3M2metal (CM2metal * MCSCALE3)
+#define CC3M3metal (CM3metal * MCSCALE3)
+
+/* um */
+#define Leff (0.8 * LSCALE)
+/* length unit in um */
+#define Lamda (Leff * 0.5)
+
+/* fF/um */
+#define Cpolywire (0.25e-15 * CSCALE * LSCALE)
+
+/* ohms*um of channel width */
+#define Rnchannelstatic (25800 * LSCALE)
+
+/* ohms*um of channel width */
+#define Rpchannelstatic (61200 * LSCALE)
+
+#define Rnchannelon (9723 * LSCALE)
+
+#define Rpchannelon (22400 * LSCALE)
+
+/* corresponds to 16um of m2 @ 48mO/sq */
+#define Rbitmetal (0.320 * (RSCALE * ASCALE))
+
+/* corresponds to 8um of m3 @ 24mO/sq */
+#define Rwordmetal (0.080 * (RSCALE * ASCALE))
+
+#ifndef Vdd
+#define Vdd (5 * VSCALE)
+#endif /* Vdd */
+
+/* other stuff (from tech report, appendix 1) */
+#define Period ((double)1/(double)PARM(Freq))
+
+#define krise (0.4e-9 * LSCALE)
+#define tsensedata (5.8e-10 * LSCALE)
+#define tsensetag (2.6e-10 * LSCALE)
+#define tfalldata (7e-10 * LSCALE)
+#define tfalltag (7e-10 * LSCALE)
+#define Vbitpre (3.3 * SSCALE)
+#define Vt (1.09 * VTSCALE)
+#define Vbitsense (0.10 * SSCALE)
+
+#define Powerfactor (PARM(Freq))*Vdd*Vdd
+#define EnergyFactor (Vdd*Vdd)
+
+#define SensePowerfactor3 (PARM(Freq))*(Vbitsense)*(Vbitsense)
+#define SensePowerfactor2 (PARM(Freq))*(Vbitpre-Vbitsense)*(Vbitpre-Vbitsense)
+#define SensePowerfactor (PARM(Freq))*Vdd*(Vdd/2)
+#define SenseEnergyFactor (Vdd*Vdd/2)
+
+/* transistor widths in um (as described in tech report, appendix 1) */
+#define Wdecdrivep (57.0 * LSCALE)
+#define Wdecdriven (40.0 * LSCALE)
+#define Wdec3to8n (14.4 * LSCALE)
+#define Wdec3to8p (14.4 * LSCALE)
+#define WdecNORn (5.4 * LSCALE)
+#define WdecNORp (30.5 * LSCALE)
+#define Wdecinvn (5.0 * LSCALE)
+#define Wdecinvp (10.0 * LSCALE)
+
+#define Wworddrivemax (100.0 * LSCALE)
+#define Wmemcella (2.4 * LSCALE)
+#define Wmemcellr (4.0 * LSCALE)
+#define Wmemcellw (2.1 * LSCALE)
+#define Wmemcellbscale 2 /* means 2x bigger than Wmemcella */
+#define Wbitpreequ (10.0 * LSCALE)
+
+#define Wbitmuxn (10.0 * LSCALE)
+#define WsenseQ1to4 (4.0 * LSCALE)
+#define Wcompinvp1 (10.0 * LSCALE)
+#define Wcompinvn1 (6.0 * LSCALE)
+#define Wcompinvp2 (20.0 * LSCALE)
+#define Wcompinvn2 (12.0 * LSCALE)
+#define Wcompinvp3 (40.0 * LSCALE)
+#define Wcompinvn3 (24.0 * LSCALE)
+#define Wevalinvp (20.0 * LSCALE)
+#define Wevalinvn (80.0 * LSCALE)
+
+#define Wcompn (20.0 * LSCALE)
+#define Wcompp (30.0 * LSCALE)
+#define Wcomppreequ (40.0 * LSCALE)
+#define Wmuxdrv12n (30.0 * LSCALE)
+#define Wmuxdrv12p (50.0 * LSCALE)
+#define WmuxdrvNANDn (20.0 * LSCALE)
+#define WmuxdrvNANDp (80.0 * LSCALE)
+#define WmuxdrvNORn (60.0 * LSCALE)
+#define WmuxdrvNORp (80.0 * LSCALE)
+#define Wmuxdrv3n (200.0 * LSCALE)
+#define Wmuxdrv3p (480.0 * LSCALE)
+#define Woutdrvseln (12.0 * LSCALE)
+#define Woutdrvselp (20.0 * LSCALE)
+#define Woutdrvnandn (24.0 * LSCALE)
+#define Woutdrvnandp (10.0 * LSCALE)
+#define Woutdrvnorn (6.0 * LSCALE)
+#define Woutdrvnorp (40.0 * LSCALE)
+#define Woutdrivern (48.0 * LSCALE)
+#define Woutdriverp (80.0 * LSCALE)
+#define Wbusdrvn (48.0 * LSCALE)
+#define Wbusdrvp (80.0 * LSCALE)
+
+#define Wcompcellpd2 (2.4 * LSCALE)
+#define Wcompdrivern (400.0 * LSCALE)
+#define Wcompdriverp (800.0 * LSCALE)
+#define Wcomparen2 (40.0 * LSCALE)
+#define Wcomparen1 (20.0 * LSCALE)
+#define Wmatchpchg (10.0 * LSCALE)
+#define Wmatchinvn (10.0 * LSCALE)
+#define Wmatchinvp (20.0 * LSCALE)
+#define Wmatchnandn (20.0 * LSCALE)
+#define Wmatchnandp (10.0 * LSCALE)
+#define Wmatchnorn (20.0 * LSCALE)
+#define Wmatchnorp (10.0 * LSCALE)
+
+#define WSelORn (10.0 * LSCALE)
+#define WSelORprequ (40.0 * LSCALE)
+#define WSelPn (10.0 * LSCALE)
+#define WSelPp (15.0 * LSCALE)
+#define WSelEnn (5.0 * LSCALE)
+#define WSelEnp (10.0 * LSCALE)
+
+#define Wsenseextdrv1p (40.0*LSCALE)
+#define Wsenseextdrv1n (24.0*LSCALE)
+#define Wsenseextdrv2p (200.0*LSCALE)
+#define Wsenseextdrv2n (120.0*LSCALE)
+
+/* bit width of RAM cell in um */
+#define BitWidth (16.0 * LSCALE)
+
+/* bit height of RAM cell in um */
+#define BitHeight (16.0 * LSCALE)
+
+#define Cout (0.5e-12 * LSCALE)
+
+/* Sizing of cells and spacings */
+#define RatCellHeight (40.0 * LSCALE)
+#define RatCellWidth (70.0 * LSCALE)
+#define RatShiftRegWidth (120.0 * LSCALE)
+#define RatNumShift 4
+#define BitlineSpacing (6.0 * LSCALE)
+#define WordlineSpacing (6.0 * LSCALE)
+
+#define RegCellHeight (16.0 * LSCALE)
+#define RegCellWidth (8.0 * LSCALE)
+
+#define CamCellHeight (40.0 * LSCALE)
+#define CamCellWidth (25.0 * LSCALE)
+#define MatchlineSpacing (6.0 * LSCALE)
+#define TaglineSpacing (6.0 * LSCALE)
+
+#define CrsbarCellHeight (6.0 * LSCALE)
+#define CrsbarCellWidth (6.0 * LSCALE)
+
+/*===================================================================*/
+
+/* ALU POWER NUMBERS for .18um 733Mhz */
+/* normalize .18um cap to other gen's cap, then xPowerfactor */
+#define POWER_SCALE (GEN_POWER_SCALE * PARM(NORMALIZE_SCALE) * Powerfactor)
+#define I_ADD ((.37 - .091)*POWER_SCALE)
+#define I_ADD32 (((.37 - .091)/2)*POWER_SCALE)
+#define I_MULT16 ((.31-.095)*POWER_SCALE)
+#define I_SHIFT ((.21-.089)*POWER_SCALE)
+#define I_LOGIC ((.04-.015)*POWER_SCALE)
+#define F_ADD ((1.307-.452)*POWER_SCALE)
+#define F_MULT ((1.307-.452)*POWER_SCALE)
+
+#define I_ADD_CLOCK (.091*POWER_SCALE)
+#define I_MULT_CLOCK (.095*POWER_SCALE)
+#define I_SHIFT_CLOCK (.089*POWER_SCALE)
+#define I_LOGIC_CLOCK (.015*POWER_SCALE)
+#define F_ADD_CLOCK (.452*POWER_SCALE)
+#define F_MULT_CLOCK (.452*POWER_SCALE)
+
+/*
+
+ transmission gate type
+typedef enum {
+ N_GATE,
+ NP_GATE
+} SIM_power_trans_t;
+
+*/
+/* some utility routines */
+extern unsigned int SIM_power_logtwo(LIB_Type_max_uint x);
+//extern int SIM_power_squarify(int rows, int cols);
+extern double SIM_power_driver_size(double driving_cap, double desiredrisetime);
+
+/* functions from cacti */
+extern double SIM_power_gatecap(double width, double wirelength);
+extern double SIM_power_gatecappass(double width, double wirelength);
+extern double SIM_power_draincap(double width, int nchannel, int stack);
+extern double SIM_power_restowidth(double res, int nchannel);
+
+extern int SIM_power_init(void);
+
+extern unsigned int SIM_power_Hamming(LIB_Type_max_uint old_val, LIB_Type_max_uint new_val, LIB_Type_max_uint mask);
+extern unsigned int SIM_power_Hamming_group(LIB_Type_max_uint d1_new, LIB_Type_max_uint d1_old, LIB_Type_max_uint d2_new, LIB_Type_max_uint d2_old, u_int width, u_int n_grp);
+
+/* statistical functions */
+//extern int SIM_print_stat_energy(char *path, double Energy, int print_flag);
+//extern u_int SIM_power_strlen(char *s);
+//extern char *SIM_power_strcat(char *dest, char *src);
+//extern int SIM_power_res_path(char *path, u_int id);
+//extern int SIM_power_dump_tech_para(void);
+
+#endif /* _SIM_POWER_H */
diff --git a/src/mem/ruby/network/orion/SIM_power_test.hh b/src/mem/ruby/network/orion/SIM_power_test.hh
new file mode 100644
index 000000000..95b304042
--- /dev/null
+++ b/src/mem/ruby/network/orion/SIM_power_test.hh
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* all needed to perform computation out of Liberty */
+#ifndef _SIM_POWER_TEST_H
+#define _SIM_POWER_TEST_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#define LIB_Type_max_uint unsigned long int
+#define LIB_Type_max_int long int
+
+#define __INSTANCE__ mainpe__power
+#define GLOBDEF(t,n) t mainpe__power___ ## n
+#define GLOB(n) mainpe__power___ ## n
+#define FUNC(n, args...) mainpe__power___ ## n (args)
+#define FUNCPTR(n) mainpe__power___ ## n
+#define PARM(x) PARM_ ## x
+
+#undef PARM_AF
+#undef PARM_MAXN
+#undef PARM_MAXSUBARRAYS
+#undef PARM_MAXSPD
+#undef PARM_VTHSENSEEXTDRV
+#undef PARM_VTHOUTDRNOR
+#undef PARM_res_fpalu
+#undef PARM_VTHCOMPINV
+#undef PARM_MD_NUM_IREGS
+#undef PARM_die_length
+#undef PARM_BITOUT
+#undef PARM_Cndiffside
+#undef PARM_ruu_decode_width
+#undef PARM_ruu_issue_width
+#undef PARM_amp_Idsat
+#undef PARM_AF_TYPE
+#undef PARM_VSINV
+#undef PARM_Cpdiffovlp
+#undef PARM_data_width
+#undef PARM_Cgatepass
+#undef PARM_Cpdiffarea
+#undef PARM_GEN_POWER_FACTOR
+#undef PARM_res_memport
+#undef PARM_VTHNAND60x90
+#undef PARM_Cpdiffside
+#undef PARM_Cpoxideovlp
+#undef PARM_opcode_length
+#undef PARM_MD_NUM_FREGS
+#undef PARM_FUDGEFACTOR
+#undef PARM_ruu_commit_width
+#undef PARM_Cndiffovlp
+#undef PARM_VTHOUTDRIVE
+#undef PARM_Cndiffarea
+#undef PARM_VTHMUXDRV1
+#undef PARM_inst_length
+#undef PARM_VTHMUXDRV2
+#undef PARM_NORMALIZE_SCALE
+#undef PARM_ras_size
+#undef PARM_VTHMUXDRV3
+#undef PARM_ADDRESS_BITS
+#undef PARM_RUU_size
+#undef PARM_Cgate
+#undef PARM_VTHNOR12x4x1
+#undef PARM_VTHNOR12x4x2
+#undef PARM_VTHOUTDRINV
+#undef PARM_VTHNOR12x4x3
+#undef PARM_VTHEVALINV
+#undef PARM_crossover_scaling
+#undef PARM_VTHNOR12x4x4
+#undef PARM_turnoff_factor
+#undef PARM_res_ialu
+#undef PARM_Cnoxideovlp
+#undef PARM_VTHOUTDRNAND
+#undef PARM_VTHINV100x60
+#undef PARM_LSQ_size
+
+#ifndef PARM_AF
+#define PARM_AF (5.000000e-01)
+#endif /* PARM_AF */
+#ifndef PARM_MAXN
+#define PARM_MAXN (8)
+#endif /* PARM_MAXN */
+#ifndef PARM_MAXSUBARRAYS
+#define PARM_MAXSUBARRAYS (8)
+#endif /* PARM_MAXSUBARRAYS */
+#ifndef PARM_MAXSPD
+#define PARM_MAXSPD (8)
+#endif /* PARM_MAXSPD */
+#ifndef PARM_VTHSENSEEXTDRV
+#define PARM_VTHSENSEEXTDRV (4.370000e-01)
+#endif /* PARM_VTHSENSEEXTDRV */
+#ifndef PARM_VTHOUTDRNOR
+#define PARM_VTHOUTDRNOR (4.310000e-01)
+#endif /* PARM_VTHOUTDRNOR */
+#ifndef PARM_res_fpalu
+#define PARM_res_fpalu (4)
+#endif /* PARM_res_fpalu */
+#ifndef PARM_VTHCOMPINV
+#define PARM_VTHCOMPINV (4.370000e-01)
+#endif /* PARM_VTHCOMPINV */
+#ifndef PARM_MD_NUM_IREGS
+#define PARM_MD_NUM_IREGS (32)
+#endif /* PARM_MD_NUM_IREGS */
+#ifndef PARM_die_length
+#define PARM_die_length (1.800000e-02)
+#endif /* PARM_die_length */
+#ifndef PARM_BITOUT
+#define PARM_BITOUT (64)
+#endif /* PARM_BITOUT */
+#ifndef PARM_Cndiffside
+#define PARM_Cndiffside (2.750000e-16)
+#endif /* PARM_Cndiffside */
+#ifndef PARM_ruu_decode_width
+#define PARM_ruu_decode_width (4)
+#endif /* PARM_ruu_decode_width */
+#ifndef PARM_ruu_issue_width
+#define PARM_ruu_issue_width (4)
+#endif /* PARM_ruu_issue_width */
+#ifndef PARM_amp_Idsat
+#define PARM_amp_Idsat (5.000000e-04)
+#endif /* PARM_amp_Idsat */
+#ifndef PARM_AF_TYPE
+#define PARM_AF_TYPE (1)
+#endif /* PARM_AF_TYPE */
+#ifndef PARM_VSINV
+#define PARM_VSINV (4.560000e-01)
+#endif /* PARM_VSINV */
+#ifndef PARM_Cpdiffovlp
+#define PARM_Cpdiffovlp (1.380000e-16)
+#endif /* PARM_Cpdiffovlp */
+#ifndef PARM_Cgatepass
+#define PARM_Cgatepass (1.450000e-15)
+#endif /* PARM_Cgatepass */
+#ifndef PARM_Cpdiffarea
+#define PARM_Cpdiffarea (3.430000e-16)
+#endif /* PARM_Cpdiffarea */
+#ifndef PARM_GEN_POWER_FACTOR
+#define PARM_GEN_POWER_FACTOR (1.310000e+00)
+#endif /* PARM_GEN_POWER_FACTOR */
+#ifndef PARM_res_memport
+#define PARM_res_memport (2)
+#endif /* PARM_res_memport */
+#ifndef PARM_VTHNAND60x90
+#define PARM_VTHNAND60x90 (5.610000e-01)
+#endif /* PARM_VTHNAND60x90 */
+#ifndef PARM_Cpdiffside
+#define PARM_Cpdiffside (2.750000e-16)
+#endif /* PARM_Cpdiffside */
+#ifndef PARM_Cpoxideovlp
+#define PARM_Cpoxideovlp (3.380000e-16)
+#endif /* PARM_Cpoxideovlp */
+#ifndef PARM_opcode_length
+#define PARM_opcode_length (8)
+#endif /* PARM_opcode_length */
+#ifndef PARM_MD_NUM_FREGS
+#define PARM_MD_NUM_FREGS (32)
+#endif /* PARM_MD_NUM_FREGS */
+#ifndef PARM_FUDGEFACTOR
+#define PARM_FUDGEFACTOR (1.000000e+00)
+#endif /* PARM_FUDGEFACTOR */
+#ifndef PARM_ruu_commit_width
+#define PARM_ruu_commit_width (4)
+#endif /* PARM_ruu_commit_width */
+#ifndef PARM_Cndiffovlp
+#define PARM_Cndiffovlp (1.380000e-16)
+#endif /* PARM_Cndiffovlp */
+#ifndef PARM_VTHOUTDRIVE
+#define PARM_VTHOUTDRIVE (4.250000e-01)
+#endif /* PARM_VTHOUTDRIVE */
+#ifndef PARM_Cndiffarea
+#define PARM_Cndiffarea (1.370000e-16)
+#endif /* PARM_Cndiffarea */
+#ifndef PARM_VTHMUXDRV1
+#define PARM_VTHMUXDRV1 (4.370000e-01)
+#endif /* PARM_VTHMUXDRV1 */
+#ifndef PARM_inst_length
+#define PARM_inst_length (32)
+#endif /* PARM_inst_length */
+#ifndef PARM_VTHMUXDRV2
+#define PARM_VTHMUXDRV2 (4.860000e-01)
+#endif /* PARM_VTHMUXDRV2 */
+#ifndef PARM_NORMALIZE_SCALE
+#define PARM_NORMALIZE_SCALE (6.488730e-10)
+#endif /* PARM_NORMALIZE_SCALE */
+#ifndef PARM_ras_size
+#define PARM_ras_size (8)
+#endif /* PARM_ras_size */
+#ifndef PARM_VTHMUXDRV3
+#define PARM_VTHMUXDRV3 (4.370000e-01)
+#endif /* PARM_VTHMUXDRV3 */
+#ifndef PARM_ADDRESS_BITS
+#define PARM_ADDRESS_BITS (64)
+#endif /* PARM_ADDRESS_BITS */
+#ifndef PARM_RUU_size
+#define PARM_RUU_size (16)
+#endif /* PARM_RUU_size */
+#ifndef PARM_Cgate
+#define PARM_Cgate (1.950000e-15)
+#endif /* PARM_Cgate */
+#ifndef PARM_VTHNOR12x4x1
+#define PARM_VTHNOR12x4x1 (5.030000e-01)
+#endif /* PARM_VTHNOR12x4x1 */
+#ifndef PARM_VTHNOR12x4x2
+#define PARM_VTHNOR12x4x2 (4.520000e-01)
+#endif /* PARM_VTHNOR12x4x2 */
+#ifndef PARM_VTHOUTDRINV
+#define PARM_VTHOUTDRINV (4.370000e-01)
+#endif /* PARM_VTHOUTDRINV */
+#ifndef PARM_VTHNOR12x4x3
+#define PARM_VTHNOR12x4x3 (4.170000e-01)
+#endif /* PARM_VTHNOR12x4x3 */
+#ifndef PARM_VTHEVALINV
+#define PARM_VTHEVALINV (2.670000e-01)
+#endif /* PARM_VTHEVALINV */
+#ifndef PARM_crossover_scaling
+#define PARM_crossover_scaling (1.200000e+00)
+#endif /* PARM_crossover_scaling */
+#ifndef PARM_VTHNOR12x4x4
+#define PARM_VTHNOR12x4x4 (3.900000e-01)
+#endif /* PARM_VTHNOR12x4x4 */
+#ifndef PARM_turnoff_factor
+#define PARM_turnoff_factor (1.000000e-01)
+#endif /* PARM_turnoff_factor */
+#ifndef PARM_res_ialu
+#define PARM_res_ialu (4)
+#endif /* PARM_res_ialu */
+#ifndef PARM_Cnoxideovlp
+#define PARM_Cnoxideovlp (2.630000e-16)
+#endif /* PARM_Cnoxideovlp */
+#ifndef PARM_VTHOUTDRNAND
+#define PARM_VTHOUTDRNAND (4.410000e-01)
+#endif /* PARM_VTHOUTDRNAND */
+#ifndef PARM_VTHINV100x60
+#define PARM_VTHINV100x60 (4.380000e-01)
+#endif /* PARM_VTHINV100x60 */
+#ifndef PARM_LSQ_size
+#define PARM_LSQ_size (8)
+#endif /* PARM_LSQ_size */
+
+#define TEST_LENGTH (100)
+/* scaling factors from 0.1u to 0.07u, 0.05u and 0.035u */
+#if (TEST_LENGTH == 70)
+#define SCALE_T (0.5489156157)
+#define SCALE_M (0.6566502462)
+#define SCALE_S (1.4088071075)
+#elif (TEST_LENGTH == 50)
+#define SCALE_T (0.3251012552)
+#define SCALE_M (0.4426460239)
+#define SCALE_S (2.8667111607)
+#elif (TEST_LENGTH == 35)
+#define SCALE_T (0.2016627474)
+#define SCALE_M (0.2489788586)
+#define SCALE_S (8.7726826878)
+#else
+#define SCALE_T (1)
+#define SCALE_M (1)
+#define SCALE_S (1)
+#endif /* TEST_LENGTH */
+
+#endif /* _SIM_POWER_TEST_H */
diff --git a/src/mem/ruby/network/orion/parm_technology.hh b/src/mem/ruby/network/orion/parm_technology.hh
new file mode 100644
index 000000000..87049d4cd
--- /dev/null
+++ b/src/mem/ruby/network/orion/parm_technology.hh
@@ -0,0 +1,474 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PARM_TECHNOLOGY_H
+#define _PARM_TECHNOLOGY_H
+
+/******* LEAKAGE current values for different tech points are in power_static.c *******/
+/******* for LINKS: metal cap is assumed to be CC2metal and repeater size is assumed to be n_size=Lambda*10, p_size=n_size*2 *******/
+
+#define SWITCHING_FACTOR 0.5 /*used for offline calculations*/
+
+/********************* Technology point **********/
+#define PARM_TECH_POINT 10 /* 100nm */
+#define PARM_Freq 3e9
+/*************************************************/
+
+
+/*********** Transistor parasitics ******************/
+#define PARM_Cndiffside (2.750000e-16)
+#define PARM_Cpdiffovlp (1.380000e-16)
+#define PARM_Cgatepass (1.450000e-15)
+#define PARM_Cpdiffarea (3.430000e-16)
+#define PARM_Cpdiffside (2.750000e-16)
+#define PARM_Cpoxideovlp (3.380000e-16)
+#define PARM_Cndiffovlp (1.380000e-16)
+#define PARM_Cndiffarea (1.370000e-16)
+#define PARM_Cgate (1.950000e-15)
+#define PARM_Cnoxideovlp (2.630000e-16)
+/*************************************************/
+
+
+
+/************* Scaling factors for different technology points ******************/
+#if ( PARM_TECH_POINT == 10 ) /* 100nm */
+#define CSCALE (84.2172) /* wire capacitance scaling factor */
+ /* linear: 51.7172, predicted: 84.2172 */
+#define RSCALE (80.0000) /* wire resistance scaling factor */
+#define LSCALE 0.1250 /* length (feature) scaling factor */
+#define ASCALE (LSCALE*LSCALE) /* area scaling factor */
+#define VSCALE 0.38 /* voltage scaling factor */
+#define VTSCALE 0.49 /* threshold voltage scaling factor */
+#define SSCALE 0.80 /* sense voltage scaling factor */
+/* FIXME: borrowed from 0.11u technology */
+#define MCSCALE 5.2277 /* metal coupling capacitance scaling factor */
+#define MCSCALE2 3 /* metal coupling capacitance scaling factor (2X) */
+#define MCSCALE3 1.5 /* metal coupling capacitance scaling factor (3X) */
+#define GEN_POWER_SCALE (1/PARM_GEN_POWER_FACTOR)
+/* copied from TECH_POINT 10 except LSCALE */
+#elif ( PARM_TECH_POINT == 11 ) /* 110nm */
+#define CSCALE (84.2172) /* wire capacitance scaling factor */
+#define RSCALE (80.0000) /* wire resistance scaling factor */
+#define LSCALE 0.1375 /* length (feature) scaling factor */
+#define ASCALE (LSCALE*LSCALE) /* area scaling factor */
+#define VSCALE 0.38 /* voltage scaling factor */
+#define VTSCALE 0.49 /* threshold voltage scaling factor */
+#define SSCALE 0.80 /* sense voltage scaling factor */
+#define MCSCALE 5.2277 /* metal coupling capacitance scaling factor */
+#define MCSCALE2 3 /* metal coupling capacitance scaling factor (2X) */
+#define MCSCALE3 1.5 /* metal coupling capacitance scaling factor (3X) */
+#define GEN_POWER_SCALE (1/PARM_GEN_POWER_FACTOR)
+#elif ( PARM_TECH_POINT == 18 ) /* 180nm */
+#define CSCALE (19.7172) /* wire capacitance scaling factor */
+#define RSCALE (20.0000) /* wire resistance scaling factor */
+#define LSCALE 0.2250 /* length (feature) scaling factor */
+#define ASCALE (LSCALE*LSCALE) /* area scaling factor */
+#define VSCALE 0.4 /* voltage scaling factor */
+#define VTSCALE 0.5046 /* threshold voltage scaling factor */
+#define SSCALE 0.85 /* sense voltage scaling factor */
+#define MCSCALE 4.1250 /* metal coupling capacitance scaling factor */
+#define MCSCALE2 2.4444 /* metal coupling capacitance scaling factor (2X) */
+#define MCSCALE3 1.2 /* metal coupling capacitance scaling factor (3X) */
+#define GEN_POWER_SCALE 1
+#elif ( PARM_TECH_POINT == 25 ) /* 250nm */
+#define CSCALE (10.2197) /* wire capacitance scaling factor */
+#define RSCALE (10.2571) /* wire resistance scaling factor */
+#define LSCALE 0.3571 /* length (feature) scaling factor */
+#define ASCALE (LSCALE*LSCALE) /* area scaling factor */
+#define VSCALE 0.45 /* voltage scaling factor */
+#define VTSCALE 0.5596 /* threshold voltage scaling factor */
+#define SSCALE 0.90 /* sense voltage scaling factor */
+#define MCSCALE 1.0 /* metal coupling capacitance scaling factor */
+#define MCSCALE2 1.0 /* metal coupling capacitance scaling factor (2X) */
+#define MCSCALE3 1.0 /* metal coupling capacitance scaling factor (3X) */
+#define GEN_POWER_SCALE PARM_GEN_POWER_FACTOR
+#elif ( PARM_TECH_POINT == 35 ) /* 350nm */
+#define CSCALE (5.2197) /* wire capacitance scaling factor */
+#define RSCALE (5.2571) /* wire resistance scaling factor */
+#define LSCALE 0.4375 /* length (feature) scaling factor */
+#define ASCALE (LSCALE*LSCALE) /* area scaling factor */
+#define VSCALE 0.5 /* voltage scaling factor */
+#define VTSCALE 0.6147 /* threshold voltage scaling factor */
+#define SSCALE 0.95 /* sense voltage scaling factor */
+#define MCSCALE 1.0 /* metal coupling capacitance scaling factor */
+#define MCSCALE2 1.0 /* metal coupling capacitance scaling factor (2X) */
+#define MCSCALE3 1.0 /* metal coupling capacitance scaling factor (3X) */
+#define GEN_POWER_SCALE (PARM_GEN_POWER_FACTOR*PARM_GEN_POWER_FACTOR)
+#elif ( PARM_TECH_POINT == 40 ) /* 400nm */
+#define CSCALE 1.0 /* wire capacitance scaling factor */
+#define RSCALE 1.0 /* wire resistance scaling factor */
+#define LSCALE 0.5 /* length (feature) scaling factor */
+#define ASCALE (LSCALE*LSCALE) /* area scaling factor */
+#define VSCALE 1.0 /* voltage scaling factor */
+#define VTSCALE 1.0 /* threshold voltage scaling factor */
+#define SSCALE 1.0 /* sense voltage scaling factor */
+#define MCSCALE 1.0 /* metal coupling capacitance scaling factor */
+#define MCSCALE2 1.0 /* metal coupling capacitance scaling factor (2X) */
+#define MCSCALE3 1.0 /* metal coupling capacitance scaling factor (3X) */
+#define GEN_POWER_SCALE (PARM_GEN_POWER_FACTOR*PARM_GEN_POWER_FACTOR*PARM_GEN_POWER_FACTOR)
+#else /* ( PARM_TECH_POINT == 80 ) */ /* 800nm */
+#define CSCALE 1.0 /* wire capacitance scaling factor */
+#define RSCALE 1.0 /* wire resistance scaling factor */
+#define LSCALE 1.0 /* length (feature) scaling factor */
+#define ASCALE (LSCALE*LSCALE) /* area scaling factor */
+#define VSCALE 1.0 /* voltage scaling factor */
+#define VTSCALE 1.0 /* threshold voltage scaling factor */
+#define SSCALE 1.0 /* sense voltage scaling factor */
+#define MCSCALE 1.0 /* metal coupling capacitance scaling factor */
+#define MCSCALE2 1.0 /* metal coupling capacitance scaling factor (2X) */
+#define MCSCALE3 1.0 /* metal coupling capacitance scaling factor (3X) */
+#define GEN_POWER_SCALE (PARM_GEN_POWER_FACTOR*PARM_GEN_POWER_FACTOR*PARM_GEN_POWER_FACTOR*PARM_GEN_POWER_FACTOR)
+#endif
+
+#define MSCALE (LSCALE * .624 / .2250)
+#define PARM_NORMALIZE_SCALE (6.488730e-10)
+
+/* scaling factors from 0.1u to 0.07u, 0.05u and 0.035u */
+/* if technology point is 0.07u, o.05u or 0.035u set PARM_TECH_POINT to 10 and
+ * set TEST_LENGTH to 70, 50 or 35 respectively(TEST_LENGTH =100 for all other technologies) */
+#define TEST_LENGTH (70)
+
+#if (TEST_LENGTH == 70)
+#define SCALE_T (0.5489156157)
+#define SCALE_M (0.6566502462)
+#define SCALE_S (1.4088071075)
+#elif (TEST_LENGTH == 50)
+#define SCALE_T (0.3251012552)
+#define SCALE_M (0.4426460239)
+#define SCALE_S (2.8667111607)
+#elif (TEST_LENGTH == 35)
+#define SCALE_T (0.2016627474)
+#define SCALE_M (0.2489788586)
+#define SCALE_S (8.7726826878)
+#else
+#define SCALE_T (1)
+#define SCALE_M (1)
+#define SCALE_S (1)
+#endif /* TEST_LENGTH */
+/*************************************************/
+
+
+
+/********************** technology point related ***************************/
+/* um */
+#define Leff (0.8 * LSCALE)
+/* length unit in um */
+#define Lamda (Leff * 0.5)
+
+#ifndef Vdd
+#define Vdd (5 * VSCALE)
+#endif /* Vdd */
+
+#define Period ((double)1/(double)PARM_Freq)
+/*************************************************/
+
+
+/**** for SRAM - decoder, mem cell, wordline, bitline, output driver ****/
+/*
+ * CMOS 0.8um model parameters
+ * - from Appendix II of Cacti tech report
+ */
+/* corresponds to 8um of m3 @ 225ff/um */
+#define Cwordmetal (1.8e-15 * (CSCALE * ASCALE) * SCALE_M)
+
+/* corresponds to 16um of m2 @ 275ff/um */
+#define Cbitmetal (4.4e-15 * (CSCALE * ASCALE) * SCALE_M)
+
+/* corresponds to 1um of m2 @ 275ff/um */
+#define Cmetal (Cbitmetal/16)
+#define CM2metal (Cbitmetal/16)
+#define CM3metal (Cbitmetal/16)
+
+/* minimal spacing metal cap per unit length */
+#define CCmetal (Cmetal * MCSCALE)
+#define CCM2metal (CM2metal * MCSCALE)
+#define CCM3metal (CM3metal * MCSCALE)
+/* 2x minimal spacing metal cap per unit length */
+#define CC2metal (Cmetal * MCSCALE2)
+#define CC2M2metal (CM2metal * MCSCALE2)
+#define CC2M3metal (CM3metal * MCSCALE2)
+/* 3x minimal spacing metal cap per unit length */
+#define CC3metal (Cmetal * MCSCALE3)
+#define CC3M2metal (CM2metal * MCSCALE3)
+#define CC3M3metal (CM3metal * MCSCALE3)
+
+/* fF/um */
+#define Cpolywire (0.25e-15 * CSCALE * LSCALE)
+
+/* ohms*um of channel width */
+#define Rnchannelstatic (25800 * LSCALE)
+
+/* ohms*um of channel width */
+#define Rpchannelstatic (61200 * LSCALE)
+
+#define Rnchannelon (9723 * LSCALE)
+
+#define Rpchannelon (22400 * LSCALE)
+
+/* corresponds to 16um of m2 @ 48mO/sq */
+#define Rbitmetal (0.320 * (RSCALE * ASCALE))
+
+/* corresponds to 8um of m3 @ 24mO/sq */
+#define Rwordmetal (0.080 * (RSCALE * ASCALE))
+
+#define Powerfactor (PARM_Freq)*Vdd*Vdd
+#define EnergyFactor (Vdd*Vdd)
+
+#define SensePowerfactor3 (PARM_Freq)*(Vbitsense)*(Vbitsense) //
+#define SensePowerfactor2 (PARM_Freq)*(Vbitpre-Vbitsense)*(Vbitpre-Vbitsense) //
+#define SensePowerfactor (PARM_Freq)*Vdd*(Vdd/2) //
+#define SenseEnergyFactor (Vdd*Vdd/2)
+
+/* transistor widths in um (as described in tech report, appendix 1) */
+#define Wdecdrivep (57.0 * LSCALE)
+#define Wdecdriven (40.0 * LSCALE)
+#define Wdec3to8n (14.4 * LSCALE)
+#define Wdec3to8p (14.4 * LSCALE)
+#define WdecNORn (5.4 * LSCALE)
+#define WdecNORp (30.5 * LSCALE)
+#define Wdecinvn (5.0 * LSCALE)
+#define Wdecinvp (10.0 * LSCALE)
+
+#define Wworddrivemax (100.0 * LSCALE)
+#define Wmemcella (2.4 * LSCALE) /* AMIT memory cell inverter PMOS transistor width */
+#define Wmemcellr (4.0 * LSCALE) /* AMIT memory cell read access transistor width */
+#define Wmemcellw (2.1 * LSCALE) /* AMIT memory cell write access transistor width */
+#define Wmemcellbscale 2 /* AMIT (mem cell inverter NMOS trans width = Wmemcella * Wmemcellbscale) means 2x bigger than Wmemcella */
+#define Wbitpreequ (10.0 * LSCALE) //
+
+#define Wbitmuxn (10.0 * LSCALE)
+#define WsenseQ1to4 (4.0 * LSCALE)
+#define Wcompinvp1 (10.0 * LSCALE)
+#define Wcompinvn1 (6.0 * LSCALE)
+#define Wcompinvp2 (20.0 * LSCALE)
+#define Wcompinvn2 (12.0 * LSCALE)
+#define Wcompinvp3 (40.0 * LSCALE)
+#define Wcompinvn3 (24.0 * LSCALE)
+#define Wevalinvp (20.0 * LSCALE)
+#define Wevalinvn (80.0 * LSCALE)
+
+#define Wcompn (20.0 * LSCALE)
+#define Wcompp (30.0 * LSCALE)
+#define Wcomppreequ (40.0 * LSCALE)
+#define Wmuxdrv12n (30.0 * LSCALE)
+#define Wmuxdrv12p (50.0 * LSCALE)
+#define WmuxdrvNANDn (20.0 * LSCALE)
+#define WmuxdrvNANDp (80.0 * LSCALE)
+#define WmuxdrvNORn (60.0 * LSCALE)
+#define WmuxdrvNORp (80.0 * LSCALE)
+#define Wmuxdrv3n (200.0 * LSCALE)
+#define Wmuxdrv3p (480.0 * LSCALE)
+#define Woutdrvseln (12.0 * LSCALE)
+#define Woutdrvselp (20.0 * LSCALE)
+#define Woutdrvnandn (24.0 * LSCALE)
+#define Woutdrvnandp (10.0 * LSCALE)
+#define Woutdrvnorn (6.0 * LSCALE)
+#define Woutdrvnorp (40.0 * LSCALE)
+#define Woutdrivern (48.0 * LSCALE)
+#define Woutdriverp (80.0 * LSCALE)
+#define Wbusdrvn (48.0 * LSCALE)
+#define Wbusdrvp (80.0 * LSCALE)
+
+#define Wcompcellpd2 (2.4 * LSCALE) //
+#define Wcompdrivern (400.0 * LSCALE)
+#define Wcompdriverp (800.0 * LSCALE)
+#define Wcomparen2 (40.0 * LSCALE)
+#define Wcomparen1 (20.0 * LSCALE)
+#define Wmatchpchg (10.0 * LSCALE)
+#define Wmatchinvn (10.0 * LSCALE)
+#define Wmatchinvp (20.0 * LSCALE)
+#define Wmatchnandn (20.0 * LSCALE)
+#define Wmatchnandp (10.0 * LSCALE)
+#define Wmatchnorn (20.0 * LSCALE)
+#define Wmatchnorp (10.0 * LSCALE)
+
+#define WSelORn (10.0 * LSCALE) //
+#define WSelORprequ (40.0 * LSCALE) //
+#define WSelPn (10.0 * LSCALE) //
+#define WSelPp (15.0 * LSCALE) //
+#define WSelEnn (5.0 * LSCALE) //
+#define WSelEnp (10.0 * LSCALE) //
+
+#define Wsenseextdrv1p (40.0*LSCALE) //
+#define Wsenseextdrv1n (24.0*LSCALE) //
+#define Wsenseextdrv2p (200.0*LSCALE) //
+#define Wsenseextdrv2n (120.0*LSCALE) //
+
+/* bit width of RAM cell in um */
+#define BitWidth (16.0 * LSCALE)
+
+/* bit height of RAM cell in um */
+#define BitHeight (16.0 * LSCALE)
+
+#define Cout (0.5e-12 * LSCALE)
+
+/* Sizing of cells and spacings */
+#define RatCellHeight (40.0 * LSCALE) //
+#define RatCellWidth (70.0 * LSCALE) //
+#define RatShiftRegWidth (120.0 * LSCALE) //
+#define RatNumShift 4 //
+#define BitlineSpacing (6.0 * LSCALE)
+#define WordlineSpacing (6.0 * LSCALE)
+
+#define RegCellHeight (16.0 * LSCALE) /* AMIT memory cell height */
+#define RegCellWidth (8.0 * LSCALE) /* AMIT memory cell width */
+
+#define CamCellHeight (40.0 * LSCALE)
+#define CamCellWidth (25.0 * LSCALE)
+#define MatchlineSpacing (6.0 * LSCALE)
+#define TaglineSpacing (6.0 * LSCALE)
+
+#define CrsbarCellHeight (6.0 * LSCALE)
+#define CrsbarCellWidth (6.0 * LSCALE)
+
+/* Link length in um */
+#define PARM_link_length 1000
+
+
+/*************************************************/
+
+/******* LEAKAGE current values for different tech points are in power_static.c *******/
+/******* for LINKS: metal cap is assumed to be CC2metal and repeater size is assumed to be n_size=Lambda*10, p_size=n_size*2 *******/
+
+/* -------------------------Miscellaneous---------------------------- */
+
+#define SIM_NO_MODEL 0
+
+#define MAX_ENERGY 1
+#define AVG_ENERGY 0
+
+#ifndef MAX
+#define MAX(a,b) (((a)>(b))?(a):(b))
+#endif
+#ifndef MIN
+#define MIN(a,b) (((a)>(b))?(b):(a))
+#endif
+
+#define NEXT_DEPTH(d) ((d) > 0 ? (d) - 1 : (d))
+
+#define BIGNUM 1e30
+#define BIGONE ((unsigned long int)1)
+#define BIGNONE ((unsigned long int)-1)
+#define HAMM_MASK(w) ((unsigned)w < (sizeof(unsigned long int) << 3) ? (BIGONE << w) - 1 : BIGNONE)
+
+/* Used to communicate with the horowitz model */
+#define RISE 1
+#define FALL 0
+#define NCH 1
+#define PCH 0
+/* --------------------------------------------------- */
+
+
+
+/* ---------------------- not used ------------------------------- */
+
+#define krise (0.4e-9 * LSCALE) //
+#define tsensedata (5.8e-10 * LSCALE) //
+#define tsensetag (2.6e-10 * LSCALE) //
+#define tfalldata (7e-10 * LSCALE) //
+#define tfalltag (7e-10 * LSCALE) //
+#define Vbitpre (3.3 * SSCALE) //
+#define Vt (1.09 * VTSCALE)
+#define Vbitsense (0.10 * SSCALE) //
+
+
+#define PARM_AF (5.000000e-01) //
+#define PARM_MAXN (8) //
+#define PARM_MAXSUBARRAYS (8) //
+#define PARM_MAXSPD (8) //
+#define PARM_VTHSENSEEXTDRV (4.370000e-01) //
+#define PARM_VTHOUTDRNOR (4.310000e-01) //
+#define PARM_res_fpalu (4) //
+#define PARM_VTHCOMPINV (4.370000e-01) //
+#define PARM_MD_NUM_IREGS (32) //
+#define PARM_die_length (1.800000e-02) //
+#define PARM_BITOUT (64) //
+
+#define PARM_ruu_decode_width (4) //
+#define PARM_ruu_issue_width (4) // AMIT used only for result bus
+#define PARM_amp_Idsat (5.000000e-04) //used in amp_energy
+#define PARM_AF_TYPE (1) //
+#define PARM_VSINV (4.560000e-01) //used in driver size calculations
+
+#define PARM_GEN_POWER_FACTOR (1.310000e+00) //
+#define PARM_res_memport (2) //
+#define PARM_VTHNAND60x90 (5.610000e-01) //
+
+#define PARM_opcode_length (8) //
+#define PARM_MD_NUM_FREGS (32) //
+#define PARM_FUDGEFACTOR (1.000000e+00) //
+#define PARM_ruu_commit_width (4) //
+
+#define PARM_VTHOUTDRIVE (4.250000e-01) //
+
+#define PARM_VTHMUXDRV1 (4.370000e-01) //
+#define PARM_inst_length (32) //
+#define PARM_VTHMUXDRV2 (4.860000e-01) //
+
+#define PARM_ras_size (8) //
+#define PARM_VTHMUXDRV3 (4.370000e-01) //
+#define PARM_ADDRESS_BITS (64) //
+#define PARM_RUU_size (16) //
+
+#define PARM_VTHNOR12x4x1 (5.030000e-01) //
+#define PARM_VTHNOR12x4x2 (4.520000e-01) //
+#define PARM_VTHOUTDRINV (4.370000e-01) //
+#define PARM_VTHNOR12x4x3 (4.170000e-01) //
+#define PARM_VTHEVALINV (2.670000e-01) //
+#define PARM_crossover_scaling (1.200000e+00) //
+#define PARM_VTHNOR12x4x4 (3.900000e-01) //
+#define PARM_turnoff_factor (1.000000e-01) //
+#define PARM_res_ialu (4) //
+
+#define PARM_VTHOUTDRNAND (4.410000e-01) //
+#define PARM_VTHINV100x60 (4.380000e-01) //
+#define PARM_LSQ_size (8) //
+
+/* ALU POWER NUMBERS for .18um 733Mhz */
+/* normalize .18um cap to other gen's cap, then xPowerfactor */
+#define POWER_SCALE (GEN_POWER_SCALE * PARM_NORMALIZE_SCALE * Powerfactor) //
+#define I_ADD ((.37 - .091)*POWER_SCALE) //
+#define I_ADD32 (((.37 - .091)/2)*POWER_SCALE) //
+#define I_MULT16 ((.31-.095)*POWER_SCALE) //
+#define I_SHIFT ((.21-.089)*POWER_SCALE) //
+#define I_LOGIC ((.04-.015)*POWER_SCALE) //
+#define F_ADD ((1.307-.452)*POWER_SCALE) //
+#define F_MULT ((1.307-.452)*POWER_SCALE) //
+
+#define I_ADD_CLOCK (.091*POWER_SCALE) //
+#define I_MULT_CLOCK (.095*POWER_SCALE) //
+#define I_SHIFT_CLOCK (.089*POWER_SCALE) //
+#define I_LOGIC_CLOCK (.015*POWER_SCALE) //
+#define F_ADD_CLOCK (.452*POWER_SCALE) //
+#define F_MULT_CLOCK (.452*POWER_SCALE) //
+
+
+/* ----------------------------------------------------- */
+
+
+#endif
diff --git a/src/mem/ruby/network/orion/power_arbiter.cc b/src/mem/ruby/network/orion/power_arbiter.cc
new file mode 100644
index 000000000..ba68fbe1c
--- /dev/null
+++ b/src/mem/ruby/network/orion/power_arbiter.cc
@@ -0,0 +1,392 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+
+#include "power_arbiter.hh"
+#include "power_array.hh"
+#include "power_ll.hh"
+#include "parm_technology.hh"
+#include "SIM_port.hh"
+#include "power_static.hh"
+#include "power_utils.hh"
+
+
+
+
+
+/******************************* Power model for flip flop *****************************/
+
+/* ------- flip flop model ---------- */
+
+/* this model is based on the gate-level design given by Randy H. Katz "Contemporary Logic Design"
+ * Figure 6.24, node numbers (1-6) are assigned to all gate outputs, left to right, top to bottom
+ *
+ * We should have pure cap functions and leave the decision of whether or not to have coefficient
+ * 1/2 in init function.
+ */
+static double SIM_fpfp_node_cap(unsigned fan_in, unsigned fan_out)
+{
+ double Ctotal = 0;
+
+ /* FIXME: all need actual sizes */
+ /* part 1: drain cap of NOR gate */
+ Ctotal += fan_in * SIM_power_draincap(WdecNORn, NCH, 1) + SIM_power_draincap(WdecNORp, PCH, fan_in);
+
+ /* part 2: gate cap of NOR gates */
+ Ctotal += fan_out * SIM_power_gatecap(WdecNORn + WdecNORp, 0);
+
+ return Ctotal;
+}
+
+
+static double SIM_fpfp_clock_cap(void)
+{
+ /* gate cap of clock load */
+ return (2 * SIM_power_gatecap(WdecNORn + WdecNORp, 0));
+}
+
+
+int SIM_fpfp_clear_stat(power_ff *ff)
+{
+ ff->n_switch = ff->n_keep_1 = ff->n_keep_0 = ff->n_clock = 0;
+
+ return 0;
+}
+
+
+int SIM_fpfp_init(power_ff *ff, int model, double load)
+{
+ double c1, c2, c3, c4, c5, c6;
+
+ if ((ff->model = model) && model < FF_MAX_MODEL) {
+ switch (model) {
+ case NEG_DFF:
+ SIM_fpfp_clear_stat(ff);
+
+ /* node 5 and node 6 are identical to node 1 in capacitance */
+ c1 = c5 = c6 = SIM_fpfp_node_cap(2, 1);
+ c2 = SIM_fpfp_node_cap(2, 3);
+ c3 = SIM_fpfp_node_cap(3, 2);
+ c4 = SIM_fpfp_node_cap(2, 3);
+
+ ff->e_switch = (c4 + c1 + c2 + c3 + c5 + c6 + load) / 2 * EnergyFactor;
+ /* no 1/2 for e_keep and e_clock because clock signal switches twice in one cycle */
+ ff->e_keep_1 = c3 * EnergyFactor;
+ ff->e_keep_0 = c2 * EnergyFactor;
+ ff->e_clock = SIM_fpfp_clock_cap() * EnergyFactor;
+
+ /* static power */
+ ff->i_leakage = (WdecNORp * NOR2_TAB[0] + WdecNORn * (NOR2_TAB[1] + NOR2_TAB[2] + NOR2_TAB[3])) / 4 * 6 / PARM_TECH_POINT * 100;
+ break;
+
+ default: break;/* some error handler */
+ }
+
+ return 0;
+ }
+ else
+ return -1;
+}
+
+
+double SIM_fpfp_report(power_ff *ff)
+{
+ return (ff->e_switch * ff->n_switch + ff->e_clock * ff->n_clock +
+ ff->e_keep_0 * ff->n_keep_0 + ff->e_keep_1 * ff->n_keep_1);
+}
+
+/* ------- flip flop model ---------- */
+
+
+
+
+
+/* -------- arbiter power model ------------- */
+
+/* switch cap of request signal (round robin arbiter) */
+static double rr_arbiter_req_cap(double length)
+{
+ double Ctotal = 0;
+
+ /* part 1: gate cap of 2 NOR gates */
+ /* FIXME: need actual size */
+ Ctotal += 2 * SIM_power_gatecap(WdecNORn + WdecNORp, 0);
+
+ /* part 2: inverter */
+ /* FIXME: need actual size */
+ Ctotal += SIM_power_draincap(Wdecinvn, NCH, 1) + SIM_power_draincap(Wdecinvp, PCH, 1) +
+ SIM_power_gatecap(Wdecinvn + Wdecinvp, 0);
+
+ /* part 3: wire cap */
+ Ctotal += length * Cmetal;
+
+ return Ctotal;
+}
+
+
+/* switch cap of priority signal (round robin arbiter) */
+static double rr_arbiter_pri_cap()
+{
+ double Ctotal = 0;
+
+ /* part 1: gate cap of NOR gate */
+ /* FIXME: need actual size */
+ Ctotal += SIM_power_gatecap(WdecNORn + WdecNORp, 0);
+
+ return Ctotal;
+}
+
+
+/* switch cap of grant signal (round robin arbiter) */
+static double rr_arbiter_grant_cap()
+{
+ double Ctotal = 0;
+
+ /* part 1: drain cap of NOR gate */
+ /* FIXME: need actual size */
+ Ctotal += 2 * SIM_power_draincap(WdecNORn, NCH, 1) + SIM_power_draincap(WdecNORp, PCH, 2);
+
+ return Ctotal;
+}
+
+
+/* switch cap of carry signal (round robin arbiter) */
+static double rr_arbiter_carry_cap()
+{
+ double Ctotal = 0;
+
+ /* part 1: drain cap of NOR gate (this block) */
+ /* FIXME: need actual size */
+ Ctotal += 2 * SIM_power_draincap(WdecNORn, NCH, 1) + SIM_power_draincap(WdecNORp, PCH, 2);
+
+ /* part 2: gate cap of NOR gate (next block) */
+ /* FIXME: need actual size */
+ Ctotal += SIM_power_gatecap(WdecNORn + WdecNORp, 0);
+
+ return Ctotal;
+}
+
+
+/* switch cap of internal carry node (round robin arbiter) */
+static double rr_arbiter_carry_in_cap()
+{
+ double Ctotal = 0;
+
+ /* part 1: gate cap of 2 NOR gates */
+ /* FIXME: need actual size */
+ Ctotal += 2 * SIM_power_gatecap(WdecNORn + WdecNORp, 0);
+
+ /* part 2: drain cap of NOR gate */
+ /* FIXME: need actual size */
+ Ctotal += 2 * SIM_power_draincap(WdecNORn, NCH, 1) + SIM_power_draincap(WdecNORp, PCH, 2);
+
+ return Ctotal;
+}
+
+
+/* the "huge" NOR gate in matrix arbiter model is an approximation */
+/* switch cap of request signal (matrix arbiter) */
+static double matrix_arbiter_req_cap(unsigned req_width, double length)
+{
+ double Ctotal = 0;
+
+ /* FIXME: all need actual sizes */
+ /* part 1: gate cap of NOR gates */
+ Ctotal += (req_width - 1) * SIM_power_gatecap(WdecNORn + WdecNORp, 0);
+
+ /* part 2: inverter */
+ Ctotal += SIM_power_draincap(Wdecinvn, NCH, 1) + SIM_power_draincap(Wdecinvp, PCH, 1) +
+ SIM_power_gatecap(Wdecinvn + Wdecinvp, 0);
+
+ /* part 3: gate cap of the "huge" NOR gate */
+ Ctotal += SIM_power_gatecap(WdecNORn + WdecNORp, 0);
+
+ /* part 4: wire cap */
+ Ctotal += length * Cmetal;
+
+ return Ctotal;
+}
+
+
+/* switch cap of priority signal (matrix arbiter) */
+static double matrix_arbiter_pri_cap(unsigned req_width)
+{
+ double Ctotal = 0;
+
+ /* part 1: gate cap of NOR gates (2 groups) */
+ Ctotal += 2 * SIM_power_gatecap(WdecNORn + WdecNORp, 0);
+
+ /* no inverter because priority signal is kept by a flip flop */
+ return Ctotal;
+}
+
+
+/* switch cap of grant signal (matrix arbiter) */
+static double matrix_arbiter_grant_cap(unsigned req_width)
+{
+ /* drain cap of the "huge" NOR gate */
+ return (req_width * SIM_power_draincap(WdecNORn, NCH, 1) + SIM_power_draincap(WdecNORp, PCH, req_width));
+}
+
+
+/* switch cap of internal node (matrix arbiter) */
+static double matrix_arbiter_int_cap()
+{
+ double Ctotal = 0;
+
+ /* part 1: drain cap of NOR gate */
+ Ctotal += 2 * SIM_power_draincap(WdecNORn, NCH, 1) + SIM_power_draincap(WdecNORp, PCH, 2);
+
+ /* part 2: gate cap of the "huge" NOR gate */
+ Ctotal += SIM_power_gatecap(WdecNORn + WdecNORp, 0);
+
+ return Ctotal;
+}
+
+
+static int arbiter_clear_stat(power_arbiter *arb)
+{
+ arb->n_chg_req = arb->n_chg_grant = arb->n_chg_mint = 0;
+ arb->n_chg_carry = arb->n_chg_carry_in = 0;
+
+ SIM_array_clear_stat(&arb->queue);
+ SIM_fpfp_clear_stat(&arb->pri_ff);
+
+ return 0;
+}
+
+
+int power_arbiter_init(power_arbiter *arb, int arbiter_model, int ff_model, unsigned req_width, double length, power_array_info *info)
+{
+ if ((arb->model = arbiter_model) && arbiter_model < ARBITER_MAX_MODEL) {
+ arb->req_width = req_width;
+ arbiter_clear_stat(arb);
+ /* redundant field */
+ arb->mask = HAMM_MASK(req_width);
+
+ switch (arbiter_model) {
+ case RR_ARBITER:
+ arb->e_chg_req = rr_arbiter_req_cap(length) / 2 * EnergyFactor;
+ /* two grant signals switch together, so no 1/2 */
+ arb->e_chg_grant = rr_arbiter_grant_cap() * EnergyFactor;
+ arb->e_chg_carry = rr_arbiter_carry_cap() / 2 * EnergyFactor;
+ arb->e_chg_carry_in = rr_arbiter_carry_in_cap() / 2 * EnergyFactor;
+ arb->e_chg_mint = 0;
+
+ if (SIM_fpfp_init(&arb->pri_ff, ff_model, rr_arbiter_pri_cap()))
+ return -1;
+ break;
+
+ case MATRIX_ARBITER:
+ arb->e_chg_req = matrix_arbiter_req_cap(req_width, length) / 2 * EnergyFactor;
+ /* 2 grant signals switch together, so no 1/2 */
+ arb->e_chg_grant = matrix_arbiter_grant_cap(req_width) * EnergyFactor;
+ arb->e_chg_mint = matrix_arbiter_int_cap() / 2 * EnergyFactor;
+ arb->e_chg_carry = arb->e_chg_carry_in = 0;
+
+ if (SIM_fpfp_init(&arb->pri_ff, ff_model, matrix_arbiter_pri_cap(req_width)))
+ return -1;
+ break;
+
+ case QUEUE_ARBITER:
+ arb->e_chg_req = arb->e_chg_grant = arb->e_chg_mint = 0;
+ arb->e_chg_carry = arb->e_chg_carry_in = 0;
+
+ return power_array_init(info, &arb->queue);
+ break;
+
+ default: break;/* some error handler */
+ }
+
+ return 0;
+ }
+ else
+ return -1;
+}
+
+
+int arbiter_record(power_arbiter *arb, unsigned long int new_req, unsigned long int old_req, unsigned new_grant, unsigned old_grant)
+{
+ switch (arb->model) {
+ case MATRIX_ARBITER:
+ arb->n_chg_req += SIM_power_Hamming(new_req, old_req, arb->mask);
+ arb->n_chg_grant += new_grant != old_grant;
+ /* FIXME: approximation */
+ arb->n_chg_mint += (arb->req_width - 1) * arb->req_width / 2;
+ /* priority registers */
+ /* FIXME: use average instead */
+ arb->pri_ff.n_switch += (arb->req_width - 1) / 2;
+ break;
+
+ case RR_ARBITER:
+ arb->n_chg_req += SIM_power_Hamming(new_req, old_req, arb->mask);
+ arb->n_chg_grant += new_grant != old_grant;
+ /* FIXME: use average instead */
+ arb->n_chg_carry += arb->req_width / 2;
+ arb->n_chg_carry_in += arb->req_width / 2 - 1;
+ /* priority registers */
+ arb->pri_ff.n_switch += 2;
+ break;
+
+ case QUEUE_ARBITER:
+ break;
+
+ default: break;/* some error handler */
+ }
+
+ return 0;
+}
+
+
+double arbiter_report(power_arbiter *arb)
+{
+ switch (arb->model) {
+ case MATRIX_ARBITER:
+ return (arb->n_chg_req * arb->e_chg_req + arb->n_chg_grant * arb->e_chg_grant +
+ arb->n_chg_mint * arb->e_chg_mint +
+ arb->pri_ff.n_switch * arb->pri_ff.e_switch +
+ arb->pri_ff.n_keep_1 * arb->pri_ff.e_keep_1 +
+ arb->pri_ff.n_keep_0 * arb->pri_ff.e_keep_0 +
+ arb->pri_ff.n_clock * arb->pri_ff.e_clock);
+
+ case RR_ARBITER:
+ return (arb->n_chg_req * arb->e_chg_req + arb->n_chg_grant * arb->e_chg_grant +
+ arb->n_chg_carry * arb->e_chg_carry + arb->n_chg_carry_in * arb->e_chg_carry_in +
+ arb->pri_ff.n_switch * arb->pri_ff.e_switch +
+ arb->pri_ff.n_keep_1 * arb->pri_ff.e_keep_1 +
+ arb->pri_ff.n_keep_0 * arb->pri_ff.e_keep_0 +
+ arb->pri_ff.n_clock * arb->pri_ff.e_clock);
+
+ default: return -1;
+ }
+}
+
+/* ---------- arbiter power model ----------- */
+
+
diff --git a/src/mem/ruby/network/orion/power_arbiter.hh b/src/mem/ruby/network/orion/power_arbiter.hh
new file mode 100644
index 000000000..671608c2f
--- /dev/null
+++ b/src/mem/ruby/network/orion/power_arbiter.hh
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Arbiter
+
+
+#ifndef _POWER_ARBITER_H
+#define _POWER_ARBITER_H
+
+#include "power_array.hh"
+
+typedef enum {
+ RR_ARBITER =1,
+ MATRIX_ARBITER,
+ QUEUE_ARBITER,
+ ARBITER_MAX_MODEL
+} power_arbiter_model;
+
+typedef enum {
+ NEG_DFF = 1, /* negative egde-triggered D flip-flop */
+ FF_MAX_MODEL
+} power_ff_model;
+
+typedef struct {
+ int model;
+ unsigned long int n_switch;
+ unsigned long int n_keep_1;
+ unsigned long int n_keep_0;
+ unsigned long int n_clock;
+ double e_switch;
+ double e_keep_1;
+ double e_keep_0;
+ double e_clock;
+ double i_leakage;
+} power_ff;
+
+typedef struct{
+ int model;
+ unsigned req_width;
+ unsigned long int n_chg_req;
+ unsigned long int n_chg_grant;
+ unsigned long int n_chg_carry; //internal node of rr arbiter
+ unsigned long int n_chg_carry_in; //internal node of rr arbiter
+ unsigned long int n_chg_mint; //internal node of matrix arbiter
+ unsigned long int mask;
+ double e_chg_req;
+ double e_chg_grant;
+ double e_chg_carry;
+ double e_chg_carry_in;
+ double e_chg_mint;
+ power_ff pri_ff; //priority ff
+ power_array queue; //request queue
+ double i_leakage;
+} power_arbiter;
+
+extern int arbiter_record(power_arbiter *arb, unsigned long int new_req, unsigned long int old_req, unsigned new_grant, unsigned old_grant);
+
+extern double arbiter_report(power_arbiter *arb);
+
+extern int power_arbiter_init(power_arbiter *arb, int arbiter_model, int ff_model, unsigned req_width, double length, power_array_info *info);
+
+#endif
+
+
+
diff --git a/src/mem/ruby/network/orion/power_array.cc b/src/mem/ruby/network/orion/power_array.cc
new file mode 100644
index 000000000..225f45377
--- /dev/null
+++ b/src/mem/ruby/network/orion/power_array.cc
@@ -0,0 +1,2158 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <math.h>
+
+#include "power_array.hh"
+#include "power_ll.hh"
+#include "parm_technology.hh"
+#include "SIM_port.hh"
+#include "power_static.hh"
+#include "power_utils.hh"
+
+/* local macros */
+
+#define IS_DIRECT_MAP( info ) ((info)->assoc == 1)
+#define IS_FULLY_ASSOC( info ) ((info)->n_set == 1 && (info)->assoc > 1)
+#define IS_WRITE_THROUGH( info ) (! (info)->write_policy)
+#define IS_WRITE_BACK( info ) ((info)->write_policy)
+
+/* sufficient (not necessary) condition */
+#define HAVE_TAG( info ) ((info)->tag_mem_model)
+#define HAVE_USE_BIT( info ) ((info)->use_bit_width)
+#define HAVE_COL_DEC( info ) ((info)->col_dec_model)
+#define HAVE_COL_MUX( info ) ((info)->mux_model)
+
+
+/* ----------------------------- CAM ---------------------------------- */
+/*============================== wordlines ==============================*/
+
+/* each time one wordline 1->0, another wordline 0->1, so no 1/2 */
+double SIM_cam_wordline_cap( unsigned cols, double wire_cap, double tx_width )
+{
+ double Ctotal, Cline, psize, nsize;
+
+ /* part 1: line cap, including gate cap of pass tx's and metal cap */
+ Ctotal = Cline = SIM_power_gatecappass( tx_width, 2 ) * cols + wire_cap;
+
+ /* part 2: input driver */
+ psize = SIM_power_driver_size( Cline, Period / 8 );
+ nsize = psize * Wdecinvn / Wdecinvp;
+ /* WHS: 20 should go to PARM */
+ Ctotal += SIM_power_draincap( nsize, NCH, 1 ) + SIM_power_draincap( psize, PCH, 1 ) +
+ SIM_power_gatecap( nsize + psize, 20 );
+
+ return Ctotal;
+}
+
+/*============================== wordlines ==============================*/
+
+
+
+/*============================== tag comparator ==============================*/
+
+/* tag and tagbar switch simultaneously, so no 1/2 */
+double SIM_cam_comp_tagline_cap( unsigned rows, double taglinelength )
+{
+ double Ctotal;
+
+ /* part 1: line cap, including drain cap of pass tx's and metal cap */
+ Ctotal = rows * SIM_power_gatecap( Wcomparen2, 2 ) + CC3M2metal * taglinelength;
+
+ /* part 2: input driver */
+ Ctotal += SIM_power_draincap( Wcompdrivern, NCH, 1 ) + SIM_power_draincap( Wcompdriverp, PCH, 1 ) +
+ SIM_power_gatecap( Wcompdrivern + Wcompdriverp, 1 );
+
+ return Ctotal;
+}
+
+
+/* upon mismatch, matchline 1->0, then 0->1 on next precharging, so no 1/2 */
+double SIM_cam_comp_mismatch_cap( unsigned n_bits, unsigned n_pre, double matchline_len )
+{
+ double Ctotal;
+
+ /* part 1: drain cap of precharge tx */
+ Ctotal = n_pre * SIM_power_draincap( Wmatchpchg, PCH, 1 );
+
+ /* part 2: drain cap of comparator tx */
+ Ctotal += n_bits * ( SIM_power_draincap( Wcomparen1, NCH, 1 ) + SIM_power_draincap( Wcomparen1, NCH, 2 ));
+
+ /* part 3: metal cap of matchline */
+ Ctotal += CC3M3metal * matchline_len;
+
+ /* FIXME: I don't understand the Wattch code here */
+ /* part 4: nor gate of valid output */
+ Ctotal += SIM_power_gatecap( Wmatchnorn + Wmatchnorp, 10 );
+
+ return Ctotal;
+}
+
+
+/* WHS: subtle difference of valid output between cache and inst window:
+ * fully-associative cache: nor all matchlines of the same port
+ * instruction window: nor all matchlines of the same tag line */
+/* upon miss, valid output switches twice in one cycle, so no 1/2 */
+double SIM_cam_comp_miss_cap( unsigned assoc )
+{
+ /* drain cap of valid output */
+ return ( assoc * SIM_power_draincap( Wmatchnorn, NCH, 1 ) + SIM_power_draincap( Wmatchnorp, PCH, assoc ));
+}
+
+/*============================== tag comparator ==============================*/
+
+
+
+/*============================== memory cell ==============================*/
+
+/* WHS: use Wmemcella and Wmemcellbscale to compute tx width of memory cell */
+double SIM_cam_tag_mem_cap( unsigned read_ports, unsigned write_ports, int share_rw, unsigned end, int only_write )
+{
+ double Ctotal;
+
+ /* part 1: drain capacitance of pass transistors */
+ if ( only_write )
+ Ctotal = SIM_power_draincap( Wmemcellw, NCH, 1 ) * write_ports;
+ else {
+ Ctotal = SIM_power_draincap( Wmemcellr, NCH, 1 ) * read_ports * end / 2;
+ if ( ! share_rw )
+ Ctotal += SIM_power_draincap( Wmemcellw, NCH, 1 ) * write_ports;
+ }
+
+ /* has coefficient ( 1/2 * 2 ) */
+ /* part 2: drain capacitance of memory cell */
+ Ctotal += SIM_power_draincap( Wmemcella, NCH, 1 ) + SIM_power_draincap( Wmemcella * Wmemcellbscale, PCH, 1 );
+
+ /* has coefficient ( 1/2 * 2 ) */
+ /* part 3: gate capacitance of memory cell */
+ Ctotal += SIM_power_gatecap( Wmemcella, 1 ) + SIM_power_gatecap( Wmemcella * Wmemcellbscale, 1 );
+
+ /* has coefficient ( 1/2 * 2 ) */
+ /* part 4: gate capacitance of comparator */
+ Ctotal += SIM_power_gatecap( Wcomparen1, 2 ) * read_ports;
+
+ return Ctotal;
+}
+
+
+double SIM_cam_data_mem_cap( unsigned read_ports, unsigned write_ports )
+{
+ double Ctotal;
+
+ /* has coefficient ( 1/2 * 2 ) */
+ /* part 1: drain capacitance of pass transistors */
+ Ctotal = SIM_power_draincap( Wmemcellw, NCH, 1 ) * write_ports;
+
+ /* has coefficient ( 1/2 * 2 ) */
+ /* part 2: drain capacitance of memory cell */
+ Ctotal += SIM_power_draincap( Wmemcella, NCH, 1 ) + SIM_power_draincap( Wmemcella * Wmemcellbscale, PCH, 1 );
+
+ /* has coefficient ( 1/2 * 2 ) */
+ /* part 3: gate capacitance of memory cell */
+ Ctotal += SIM_power_gatecap( Wmemcella, 1 ) + SIM_power_gatecap( Wmemcella * Wmemcellbscale, 1 );
+
+ /* part 4: gate capacitance of output driver */
+ Ctotal += ( SIM_power_gatecap( Woutdrvnandn, 1 ) + SIM_power_gatecap( Woutdrvnandp, 1 ) +
+ SIM_power_gatecap( Woutdrvnorn, 1 ) + SIM_power_gatecap( Woutdrvnorp, 1 )) / 2 * read_ports;
+
+ return Ctotal;
+}
+
+/*============================== memory cell ==============================*/
+
+
+
+
+
+
+
+/* ---------- buffer model ------------ */
+
+// ------- Decoder begin
+
+/*#
+ * compute switching cap when decoder changes output (select signal)
+ *
+ * Parameters:
+ * n_input -- fanin of 1 gate of last level decoder
+ *
+ * Return value: switching cap
+ *
+ * NOTES: 2 select signals switch, so no 1/2
+ */
+static double SIM_array_dec_select_cap( unsigned n_input )
+{
+ double Ctotal = 0;
+
+ /* FIXME: why? */
+ // if ( numstack > 5 ) numstack = 5;
+
+ /* part 1: drain cap of last level decoders */
+ Ctotal = n_input * SIM_power_draincap( WdecNORn, NCH, 1 ) + SIM_power_draincap( WdecNORp, PCH, n_input );
+
+ /* part 2: output inverter */
+ /* WHS: 20 should go to PARM */
+ Ctotal += SIM_power_draincap( Wdecinvn, NCH, 1 ) + SIM_power_draincap( Wdecinvp, PCH, 1) +
+ SIM_power_gatecap( Wdecinvn + Wdecinvp, 20 );
+
+ return Ctotal;
+}
+
+
+/*#
+ * compute switching cap when 1 input bit of decoder changes
+ *
+ * Parameters:
+ * n_gates -- fanout of 1 addr signal
+ *
+ * Return value: switching cap
+ *
+ * NOTES: both addr and its complement change, so no 1/2
+ */
+static double SIM_array_dec_chgaddr_cap( unsigned n_gates )
+{
+ double Ctotal;
+
+ /* stage 1: input driver */
+ Ctotal = SIM_power_draincap( Wdecdrivep, PCH, 1 ) + SIM_power_draincap( Wdecdriven, NCH, 1 ) +
+ SIM_power_gatecap( Wdecdrivep, 1 ) + SIM_power_gatecap( Wdecdriven, 1 );
+ /* inverter to produce complement addr, this needs 1/2 */
+ /* WHS: assume Wdecinv(np) for this inverter */
+ Ctotal += ( SIM_power_draincap( Wdecinvp, PCH, 1 ) + SIM_power_draincap( Wdecinvn, NCH, 1 ) +
+ SIM_power_gatecap( Wdecinvp, 1 ) + SIM_power_gatecap( Wdecinvn, 1 )) / 2;
+
+ /* stage 2: gate cap of level-1 decoder */
+ /* WHS: 10 should go to PARM */
+ Ctotal += n_gates * SIM_power_gatecap( Wdec3to8n + Wdec3to8p, 10 );
+
+ return Ctotal;
+}
+
+
+/*#
+ * compute switching cap when 1st-level decoder changes output
+ *
+ * Parameters:
+ * n_in_1st -- fanin of 1 gate of 1st-level decoder
+ * n_in_2nd -- fanin of 1 gate of 2nd-level decoder
+ * n_gates -- # of gates of 2nd-level decoder, i.e.
+ * fanout of 1 gate of 1st-level decoder
+ *
+ * Return value: switching cap
+ *
+ * NOTES: 2 complementary signals switch, so no 1/2
+ */
+static double SIM_array_dec_chgl1_cap( unsigned n_in_1st, unsigned n_in_2nd, unsigned n_gates )
+{
+ double Ctotal;
+
+ /* part 1: drain cap of level-1 decoder */
+ Ctotal = n_in_1st * SIM_power_draincap( Wdec3to8p, PCH, 1 ) + SIM_power_draincap( Wdec3to8n, NCH, n_in_1st );
+
+ /* part 2: gate cap of level-2 decoder */
+ /* WHS: 40 and 20 should go to PARM */
+ Ctotal += n_gates * SIM_power_gatecap( WdecNORn + WdecNORp, n_in_2nd * 40 + 20 );
+
+ return Ctotal;
+}
+
+
+static int SIM_array_dec_clear_stat(power_decoder *dec)
+{
+ dec->n_chg_output = dec->n_chg_l1 = dec->n_chg_addr = 0;
+
+ return 0;
+}
+
+
+/*#
+ * initialize decoder
+ *
+ * Parameters:
+ * dec -- decoder structure
+ * model -- decoder model type
+ * n_bits -- decoder width
+ *
+ * Side effects:
+ * initialize dec structure if model type is valid
+ *
+ * Return value: -1 if model type is invalid
+ * 0 otherwise
+ */
+static int SIM_array_dec_init(power_decoder *dec, int model, unsigned n_bits )
+{
+ if ((dec->model = model) && model < DEC_MAX_MODEL) {
+ dec->n_bits = n_bits;
+ /* redundant field */
+ dec->addr_mask = HAMM_MASK(n_bits);
+
+ SIM_array_dec_clear_stat(dec);
+ dec->e_chg_output = dec->e_chg_l1 = dec->e_chg_addr = 0;
+
+ /* compute geometry parameters */
+ if ( n_bits >= 4 ) { /* 2-level decoder */
+ /* WHS: inaccurate for some n_bits */
+ dec->n_in_1st = ( n_bits == 4 ) ? 2:3;
+ dec->n_out_0th = BIGONE << ( dec->n_in_1st - 1 );
+ dec->n_in_2nd = (unsigned)ceil((double)n_bits / dec->n_in_1st );
+ dec->n_out_1st = BIGONE << ( n_bits - dec->n_in_1st );
+ }
+ else if ( n_bits >= 2 ) { /* 1-level decoder */
+ dec->n_in_1st = n_bits;
+ dec->n_out_0th = BIGONE << ( n_bits - 1 );
+ dec->n_in_2nd = dec->n_out_1st = 0;
+ }
+ else { /* no decoder basically */
+ dec->n_in_1st = dec->n_in_2nd = dec->n_out_0th = dec->n_out_1st = 0;
+ }
+
+ /* compute energy constants */
+ if ( n_bits >= 2 ) {
+ dec->e_chg_l1 = SIM_array_dec_chgl1_cap( dec->n_in_1st, dec->n_in_2nd, dec->n_out_1st ) * EnergyFactor;
+ if ( n_bits >= 4 )
+ dec->e_chg_output = SIM_array_dec_select_cap( dec->n_in_2nd ) * EnergyFactor;
+ }
+ dec->e_chg_addr = SIM_array_dec_chgaddr_cap( dec->n_out_0th ) * EnergyFactor;
+
+ return 0;
+ }
+ else
+ return -1;
+}
+
+
+/*#
+ * record decoder power stats
+ *
+ * Parameters:
+ * dec -- decoder structure
+ * prev_addr -- previous input
+ * curr_addr -- current input
+ *
+ * Side effects:
+ * update counters in dec structure
+ *
+ * Return value: 0
+ */
+int SIM_array_dec_record(power_decoder *dec, unsigned long int prev_addr, unsigned long int curr_addr )
+{
+ unsigned n_chg_bits, n_chg_l1 = 0, n_chg_output = 0;
+ unsigned i;
+ unsigned long int mask;
+
+ /* compute Hamming distance */
+ n_chg_bits = SIM_power_Hamming( prev_addr, curr_addr, dec->addr_mask );
+ if ( n_chg_bits ) {
+ if ( dec->n_bits >= 4 ) { /* 2-level decoder */
+ /* WHS: inaccurate for some n_bits */
+ n_chg_output ++;
+ /* count addr group changes */
+ mask = HAMM_MASK(dec->n_in_1st);
+ for ( i = 0; i < dec->n_in_2nd; i ++ ) {
+ if ( SIM_power_Hamming( prev_addr, curr_addr, mask ))
+ n_chg_l1 ++;
+ mask = mask << dec->n_in_1st;
+ }
+ }
+ else if ( dec->n_bits >= 2 ) { /* 1-level decoder */
+ n_chg_l1 ++;
+ }
+
+ dec->n_chg_addr += n_chg_bits;
+ dec->n_chg_l1 += n_chg_l1;
+ dec->n_chg_output += n_chg_output;
+ }
+
+ return 0;
+}
+
+
+/*#
+ * report decoder power stats
+ *
+ * Parameters:
+ * dec -- decoder structure
+ *
+ * Return value: total energy consumption of this decoder
+ *
+ * TODO: add more report functionality, currently only total energy is reported
+ */
+double SIM_array_dec_report(power_decoder *dec )
+{
+ double Etotal;
+
+ Etotal = dec->n_chg_output * dec->e_chg_output + dec->n_chg_l1 * dec->e_chg_l1 +
+ dec->n_chg_addr * dec->e_chg_addr;
+
+ /* bonus energy for dynamic decoder :) */
+ //if ( is_dynamic_dec( dec->model )) Etotal += Etotal;
+
+ return Etotal;
+}
+
+// ------- Decoder end
+
+
+
+// ------- Wordlines begin
+
+/*#
+ * compute wordline switching cap
+ *
+ * Parameters:
+ * cols -- # of pass transistors, i.e. # of bitlines
+ * wordlinelength -- length of wordline
+ * tx_width -- width of pass transistor
+ *
+ * Return value: switching cap
+ *
+ * NOTES: upon address change, one wordline 1->0, another 0->1, so no 1/2
+ */
+static double SIM_array_wordline_cap( unsigned cols, double wire_cap, double tx_width )
+{
+ double Ctotal, Cline, psize, nsize;
+
+ /* part 1: line cap, including gate cap of pass tx's and metal cap */
+ Ctotal = Cline = SIM_power_gatecappass( tx_width, BitWidth / 2 - tx_width ) * cols + wire_cap;
+
+ /* part 2: input driver */
+ psize = SIM_power_driver_size( Cline, Period / 16 );
+ nsize = psize * Wdecinvn / Wdecinvp;
+ /* WHS: 20 should go to PARM */
+ Ctotal += SIM_power_draincap( nsize, NCH, 1 ) + SIM_power_draincap( psize, PCH, 1 ) +
+ SIM_power_gatecap( nsize + psize, 20 );
+
+ return Ctotal;
+}
+
+
+static int SIM_array_wordline_clear_stat(power_wordline *wordline)
+{
+ wordline->n_read = wordline->n_write = 0;
+
+ return 0;
+}
+
+
+/*#
+ * initialize wordline
+ *
+ * Parameters:
+ * wordline -- wordline structure
+ * model -- wordline model type
+ * share_rw -- 1 if shared R/W wordlines, 0 if separate R/W wordlines
+ * cols -- # of array columns, NOT # of bitlines
+ * wire_cap -- wordline wire capacitance
+ * end -- end of bitlines
+ *
+ * Return value: -1 if invalid model type
+ * 0 otherwise
+ *
+ * Side effects:
+ * initialize wordline structure if model type is valid
+ *
+ * TODO: add error handler
+ */
+static int SIM_array_wordline_init(power_wordline *wordline, int model, int share_rw, unsigned cols, double wire_cap, unsigned end )
+{
+ if ((wordline->model = model) && model < WORDLINE_MAX_MODEL) {
+ SIM_array_wordline_clear_stat(wordline);
+
+ switch ( model ) {
+ case CAM_RW_WORDLINE:
+ wordline->e_read = SIM_cam_wordline_cap( cols * end, wire_cap, Wmemcellr ) * EnergyFactor;
+ if ( wordline->share_rw = share_rw )
+ wordline->e_write = wordline->e_read;
+ else
+ /* write bitlines are always double-ended */
+ wordline->e_write = SIM_cam_wordline_cap( cols * 2, wire_cap, Wmemcellw ) * EnergyFactor;
+ break;
+
+ case CAM_WO_WORDLINE: /* only have write wordlines */
+ wordline->share_rw = 0;
+ wordline->e_read = 0;
+ wordline->e_write = SIM_cam_wordline_cap( cols * 2, wire_cap, Wmemcellw ) * EnergyFactor;
+ break;
+
+ case CACHE_WO_WORDLINE: /* only have write wordlines */
+ wordline->share_rw = 0;
+ wordline->e_read = 0;
+ wordline->e_write = SIM_array_wordline_cap( cols * 2, wire_cap, Wmemcellw ) * EnergyFactor;
+ break;
+
+ case CACHE_RW_WORDLINE:
+ wordline->e_read = SIM_array_wordline_cap( cols * end, wire_cap, Wmemcellr ) * EnergyFactor;
+ if ( wordline->share_rw = share_rw )
+ wordline->e_write = wordline->e_read;
+ else
+ wordline->e_write = SIM_array_wordline_cap( cols * 2, wire_cap, Wmemcellw ) * EnergyFactor;
+
+ /* static power */
+ /* input driver */
+ wordline->i_leakage = (Woutdrivern * NMOS_TAB[0] + Woutdriverp * PMOS_TAB[0]) / PARM_TECH_POINT * 100;
+ break;
+
+ default: break;/* some error handler */
+ }
+
+ return 0;
+ }
+ else
+ return -1;
+}
+
+
+/*#
+ * record wordline power stats
+ *
+ * Parameters:
+ * wordline -- wordline structure
+ * rw -- 1 if write operation, 0 if read operation
+ * n_switch -- switching times
+ *
+ * Return value: 0
+ *
+ * Side effects:
+ * update counters of wordline structure
+ */
+int SIM_array_wordline_record(power_wordline *wordline, int rw, unsigned long int n_switch )
+{
+ if ( rw ) wordline->n_write += n_switch;
+ else wordline->n_read += n_switch;
+
+ return 0;
+}
+
+
+/*#
+ * report wordline power stats
+ *
+ * Parameters:
+ * wordline -- wordline structure
+ *
+ * Return value: total energy consumption of all wordlines of this array
+ *
+ * TODO: add more report functionality, currently only total energy is reported
+ */
+double SIM_array_wordline_report(power_wordline *wordline )
+{
+ return ( wordline->n_read * wordline->e_read +
+ wordline->n_write * wordline->e_write );
+}
+
+// ------- Wordlines end
+
+
+
+// ------- Bitlines begin
+
+/*#
+ * compute switching cap of reading 1 separate bitline column
+ *
+ * Parameters:
+ * rows -- # of array rows, i.e. # of wordlines
+ * wire_cap -- bitline wire capacitance
+ * end -- end of bitlines
+ * n_share_amp -- # of columns who share one sense amp
+ * n_bitline_pre -- # of precharge transistor drains for 1 bitline column
+ * n_colsel_pre -- # of precharge transistor drains for 1 column selector, if any
+ * pre_size -- width of precharge transistors
+ * outdrv_model -- output driver model type
+ *
+ * Return value: switching cap
+ *
+ * NOTES: one bitline 1->0, then 0->1 on next precharging, so no 1/2
+ */
+static double SIM_array_column_read_cap(unsigned rows, double wire_cap, unsigned end, unsigned n_share_amp, unsigned n_bitline_pre, unsigned n_colsel_pre, double pre_size, int outdrv_model)
+{
+ double Ctotal=0, Cprecharge=0, Cpass=0, Cwire=0, Ccol_sel=0, Csense=0;
+
+ /* part 1: drain cap of precharge tx's */
+ Cprecharge = n_bitline_pre * SIM_power_draincap( pre_size, PCH, 1 );
+// printf("Precharge = %g\n", Cprecharge);
+ Ctotal = Cprecharge;
+
+ /* part 2: drain cap of pass tx's */
+ Cpass = rows * SIM_power_draincap( Wmemcellr, NCH, 1 );
+// printf("Pass = %g\n", Cpass);
+ Ctotal += Cpass;
+
+ /* part 3: metal cap */
+ Cwire = wire_cap;
+// printf("Wire = %g\n", Cwire);
+ Ctotal += Cwire;
+
+ /* part 4: column selector or bitline inverter */
+ if ( end == 1 ) { /* bitline inverter */
+ /* FIXME: magic numbers */
+ Ccol_sel = SIM_power_gatecap( MSCALE * ( 29.9 + 7.8 ), 0 ) +
+ SIM_power_gatecap( MSCALE * ( 47.0 + 12.0), 0 );
+ }
+ else if ( n_share_amp > 1 ) { /* column selector */
+ /* drain cap of pass tx's */
+ Ccol_sel = ( n_share_amp + 1 ) * SIM_power_draincap( Wbitmuxn, NCH, 1 );
+ /* drain cap of column selector precharge tx's */
+ Ccol_sel += n_colsel_pre * SIM_power_draincap( pre_size, PCH, 1 );
+ /* FIXME: no way to count activity factor on gates of column selector */
+ }
+// printf("Col selector = %g\n", Ccol_sel);
+
+ Ctotal += Ccol_sel;
+
+ /* part 5: gate cap of sense amplifier or output driver */
+ if (end == 2) /* sense amplifier */
+ Csense = 2 * SIM_power_gatecap( WsenseQ1to4, 10 );
+ else if (outdrv_model) /* end == 1, output driver */
+ Csense = SIM_power_gatecap( Woutdrvnandn, 1 ) + SIM_power_gatecap( Woutdrvnandp, 1 ) +
+ SIM_power_gatecap( Woutdrvnorn, 1 ) + SIM_power_gatecap( Woutdrvnorp, 1 );
+// printf("Sense = %g\n", Csense);
+ Ctotal += Csense;
+
+ return Ctotal;
+}
+
+
+/*#
+ * compute switching cap of selecting 1 column selector
+ *
+ * Parameters:
+ *
+ * Return value: switching cap
+ *
+ * NOTES: select one, deselect another, so no 1/2
+ */
+static double SIM_array_column_select_cap( void )
+{
+ return SIM_power_gatecap( Wbitmuxn, 1 );
+}
+
+
+/*#
+ * compute switching cap of writing 1 separate bitline column
+ *
+ * Parameters:
+ * rows -- # of array rows, i.e. # of wordlines
+ * wire_cap -- bitline wire capacitance
+ *
+ * Return value: switching cap
+ *
+ * NOTES: bit and bitbar switch simultaneously, so no 1/2
+ */
+static double SIM_array_column_write_cap( unsigned rows, double wire_cap )
+{
+ double Ctotal=0, Cwire=0, Cpass=0, Cdriver=0, psize, nsize;
+
+ Cwire = wire_cap;
+// printf("WRITE wire cap = %g\n", Cwire);
+ Ctotal = Cwire;
+
+ /* part 1: line cap, including drain cap of pass tx's and metal cap */
+ Cpass = rows * SIM_power_draincap( Wmemcellw, NCH, 1 );
+// printf("WRITE pass tx cap = %g\n", Cpass);
+ Ctotal += Cpass;
+
+
+ /* part 2: write driver */
+ psize = SIM_power_driver_size( Ctotal, Period / 8 );
+ nsize = psize * Wdecinvn / Wdecinvp;
+ Cdriver = SIM_power_draincap( psize, PCH, 1 ) + SIM_power_draincap( nsize, NCH, 1 ) +
+ SIM_power_gatecap( psize + nsize, 1 );
+// printf("WRITE driver cap = %g\n", Cdriver);
+ Ctotal += Cdriver;
+
+ return Ctotal;
+}
+
+
+/* one bitline switches twice in one cycle, so no 1/2 */
+static double SIM_array_share_column_write_cap( unsigned rows, double wire_cap, unsigned n_share_amp, unsigned n_bitline_pre, double pre_size )
+{
+ double Ctotal, psize, nsize;
+
+ /* part 1: drain cap of precharge tx's */
+ Ctotal = n_bitline_pre * SIM_power_draincap( pre_size, PCH, 1 );
+
+ /* part 2: drain cap of pass tx's */
+ Ctotal += rows * SIM_power_draincap( Wmemcellr, NCH, 1 );
+
+ /* part 3: metal cap */
+ Ctotal += wire_cap;
+
+ /* part 4: column selector or sense amplifier */
+ if ( n_share_amp > 1 ) Ctotal += SIM_power_draincap( Wbitmuxn, NCH, 1 );
+ else Ctotal += 2 * SIM_power_gatecap( WsenseQ1to4, 10 );
+
+ /* part 5: write driver */
+ psize = SIM_power_driver_size( Ctotal, Period / 8 );
+ nsize = psize * Wdecinvn / Wdecinvp;
+ /* WHS: omit gate cap of driver due to modeling difficulty */
+ Ctotal += SIM_power_draincap( psize, PCH, 1 ) + SIM_power_draincap( nsize, NCH, 1 );
+
+ return Ctotal;
+}
+
+
+/* one bitline switches twice in one cycle, so no 1/2 */
+static double SIM_array_share_column_read_cap( unsigned rows, double wire_cap, unsigned n_share_amp, unsigned n_bitline_pre, unsigned n_colsel_pre, double pre_size )
+{
+ double Ctotal;
+
+ /* part 1: same portion as write */
+ Ctotal = SIM_array_share_column_write_cap( rows, wire_cap, n_share_amp, n_bitline_pre, pre_size );
+
+ /* part 2: column selector and sense amplifier */
+ if ( n_share_amp > 1 ) {
+ /* bottom part of drain cap of pass tx's */
+ Ctotal += n_share_amp * SIM_power_draincap( Wbitmuxn, NCH, 1 );
+ /* drain cap of column selector precharge tx's */
+ Ctotal += n_colsel_pre * SIM_power_draincap( pre_size, PCH, 1 );
+
+ /* part 3: gate cap of sense amplifier */
+ Ctotal += 2 * SIM_power_gatecap( WsenseQ1to4, 10 );
+ }
+
+ return Ctotal;
+}
+
+
+static int SIM_array_bitline_clear_stat(power_bitline *bitline)
+{
+ bitline->n_col_write = bitline->n_col_read = bitline->n_col_sel = 0;
+
+ return 0;
+}
+
+
+static int SIM_array_bitline_init(power_bitline *bitline, int model, int share_rw, unsigned end, unsigned rows, double wire_cap, unsigned n_share_amp, unsigned n_bitline_pre, unsigned n_colsel_pre, double pre_size, int outdrv_model)
+{
+ if ((bitline->model = model) && model < BITLINE_MAX_MODEL) {
+ bitline->end = end;
+ SIM_array_bitline_clear_stat(bitline);
+
+ switch ( model ) {
+ case RW_BITLINE:
+ if ( end == 2 )
+ bitline->e_col_sel = SIM_array_column_select_cap() * EnergyFactor;
+ else /* end == 1 implies register file */
+ bitline->e_col_sel = 0;
+// printf("BUFFER INTERNAL bitline sel energy = %g\n", bitline->e_col_sel);
+
+ if ( bitline->share_rw = share_rw ) {
+ /* shared bitlines are double-ended, so SenseEnergyFactor */
+ bitline->e_col_read = SIM_array_share_column_read_cap( rows, wire_cap, n_share_amp, n_bitline_pre, n_colsel_pre, pre_size ) * SenseEnergyFactor;
+ bitline->e_col_write = SIM_array_share_column_write_cap( rows, wire_cap, n_share_amp, n_bitline_pre, pre_size ) * EnergyFactor;
+ }
+ else {
+ bitline->e_col_read = SIM_array_column_read_cap(rows, wire_cap, end, n_share_amp, n_bitline_pre, n_colsel_pre, pre_size, outdrv_model) * (end == 2 ? SenseEnergyFactor : EnergyFactor);
+// printf("BUFFER INTERNAL bitline read energy = %g\n", bitline->e_col_read);
+ bitline->e_col_write = SIM_array_column_write_cap( rows, wire_cap ) * EnergyFactor;
+// printf("BUFFER INTERNAL bitline write energy = %g\n", bitline->e_col_write);
+
+ /* static power */
+ bitline->i_leakage = 2 * (Wdecinvn * NMOS_TAB[0] + Wdecinvp * PMOS_TAB[0]) / PARM_TECH_POINT * 100;
+// printf("BUFFER INTERNAL bitline leakage current = %g\n", bitline->i_leakage);
+ }
+
+ break;
+
+ case WO_BITLINE: /* only have write bitlines */
+ bitline->share_rw = 0;
+ bitline->e_col_sel = bitline->e_col_read = 0;
+ bitline->e_col_write = SIM_array_column_write_cap( rows, wire_cap ) * EnergyFactor;
+ break;
+
+ default: break;/* some error handler */
+ }
+
+ return 0;
+ }
+ else
+ return -1;
+}
+
+
+static int is_rw_bitline( int model )
+{
+ return ( model == RW_BITLINE );
+}
+
+
+/* WHS: no way to count activity factor on column selector gates */
+int SIM_array_bitline_record(power_bitline *bitline, int rw, unsigned cols, unsigned long int old_value, unsigned long int new_value )
+{
+ /* FIXME: should use variable rather than computing each time */
+ unsigned long int mask = HAMM_MASK(cols);
+
+ if ( rw ) { /* write */
+ if ( bitline->share_rw ) /* share R/W bitlines */
+ bitline->n_col_write += cols;
+ else /* separate R/W bitlines */
+ bitline->n_col_write += SIM_power_Hamming( old_value, new_value, mask );
+ }
+ else { /* read */
+ if ( bitline->end == 2 ) /* double-ended bitline */
+ bitline->n_col_read += cols;
+ else /* single-ended bitline */
+ /* WHS: read ~new_value due to the bitline inverter */
+ bitline->n_col_read += SIM_power_Hamming( mask, ~new_value, mask );
+ }
+
+ return 0;
+}
+
+
+double SIM_array_bitline_report(power_bitline *bitline )
+{
+ return ( bitline->n_col_write * bitline->e_col_write +
+ bitline->n_col_read * bitline->e_col_read +
+ bitline->n_col_sel * bitline->e_col_sel );
+}
+
+// ------- Bitlines end
+
+
+
+// ------- Sense amplifier begin
+
+/* estimate senseamp power dissipation in cache structures (Zyuban's method) */
+static double SIM_array_amp_energy( void )
+{
+ return ( (double)Vdd / 8.0 * (double )(Period) * (double )(PARM_amp_Idsat));
+}
+
+
+static int SIM_array_amp_clear_stat(power_amp *amp)
+{
+ amp->n_access = 0;
+
+ return 0;
+}
+
+
+static int SIM_array_amp_init(power_amp *amp, int model )
+{
+ if ((amp->model = model) && model < AMP_MAX_MODEL) {
+ SIM_array_amp_clear_stat(amp);
+ amp->e_access = SIM_array_amp_energy();
+
+ return 0;
+ }
+ else
+ return -1;
+}
+
+
+int SIM_array_amp_record(power_amp *amp, unsigned cols )
+{
+ amp->n_access += cols;
+
+ return 0;
+}
+
+
+double SIM_array_amp_report(power_amp *amp )
+{
+ return ( amp->n_access * amp->e_access );
+}
+
+// ------- Sense amplifier end
+
+
+// ------- Tag comparator begin
+
+/* eval switches twice per cycle, so no 1/2 */
+/* WHS: assume eval = 1 when no cache operation */
+static double SIM_array_comp_base_cap( void )
+{
+ /* eval tx's: 4 inverters */
+ return ( SIM_power_draincap( Wevalinvp, PCH, 1 ) + SIM_power_draincap( Wevalinvn, NCH, 1 ) +
+ SIM_power_gatecap( Wevalinvp, 1 ) + SIM_power_gatecap( Wevalinvn, 1 ) +
+ SIM_power_draincap( Wcompinvp1, PCH, 1 ) + SIM_power_draincap( Wcompinvn1, NCH, 1 ) +
+ SIM_power_gatecap( Wcompinvp1, 1 ) + SIM_power_gatecap( Wcompinvn1, 1 ) +
+ SIM_power_draincap( Wcompinvp2, PCH, 1 ) + SIM_power_draincap( Wcompinvn2, NCH, 1 ) +
+ SIM_power_gatecap( Wcompinvp2, 1 ) + SIM_power_gatecap( Wcompinvn2, 1 ) +
+ SIM_power_draincap( Wcompinvp3, PCH, 1 ) + SIM_power_draincap( Wcompinvn3, NCH, 1 ) +
+ SIM_power_gatecap( Wcompinvp3, 1 ) + SIM_power_gatecap( Wcompinvn3, 1 ));
+}
+
+
+/* no 1/2 for the same reason with SIM_array_comp_base_cap */
+static double SIM_array_comp_match_cap( unsigned n_bits )
+{
+ return ( n_bits * ( SIM_power_draincap( Wcompn, NCH, 1 ) + SIM_power_draincap( Wcompn, NCH, 2 )));
+}
+
+
+/* upon mismatch, select signal 1->0, then 0->1 on next precharging, so no 1/2 */
+static double SIM_array_comp_mismatch_cap( unsigned n_pre )
+{
+ double Ctotal;
+
+ /* part 1: drain cap of precharge tx */
+ Ctotal = n_pre * SIM_power_draincap( Wcomppreequ, PCH, 1 );
+
+ /* part 2: nor gate of valid output */
+ Ctotal += SIM_power_gatecap( WdecNORn, 1 ) + SIM_power_gatecap( WdecNORp, 3 );
+
+ return Ctotal;
+}
+
+
+/* upon miss, valid output switches twice in one cycle, so no 1/2 */
+static double SIM_array_comp_miss_cap( unsigned assoc )
+{
+ /* drain cap of valid output */
+ return ( assoc * SIM_power_draincap( WdecNORn, NCH, 1 ) + SIM_power_draincap( WdecNORp, PCH, assoc ));
+}
+
+
+/* no 1/2 for the same reason as base_cap */
+static double SIM_array_comp_bit_match_cap( void )
+{
+ return ( 2 * ( SIM_power_draincap( Wcompn, NCH, 1 ) + SIM_power_draincap( Wcompn, NCH, 2 )));
+}
+
+
+/* no 1/2 for the same reason as base_cap */
+static double SIM_array_comp_bit_mismatch_cap( void )
+{
+ return ( 3 * SIM_power_draincap( Wcompn, NCH, 1 ) + SIM_power_draincap( Wcompn, NCH, 2 ));
+}
+
+
+/* each addr bit drives 2 nmos pass transistors, so no 1/2 */
+static double SIM_array_comp_chgaddr_cap( void )
+{
+ return ( SIM_power_gatecap( Wcompn, 1 ));
+}
+
+
+static int SIM_array_comp_clear_stat(power_comp *comp)
+{
+ comp->n_access = comp->n_miss = comp->n_chg_addr = comp->n_match = 0;
+ comp->n_mismatch = comp->n_bit_match = comp->n_bit_mismatch = 0;
+
+ return 0;
+}
+
+
+static int SIM_array_comp_init(power_comp *comp, int model, unsigned n_bits, unsigned assoc, unsigned n_pre, double matchline_len, double tagline_len )
+{
+ if ((comp->model = model) && model < COMP_MAX_MODEL) {
+ comp->n_bits = n_bits;
+ comp->assoc = assoc;
+ /* redundant field */
+ comp->comp_mask = HAMM_MASK(n_bits);
+
+ SIM_array_comp_clear_stat(comp);
+
+ switch ( model ) {
+ case CACHE_COMPONENT:
+ comp->e_access = SIM_array_comp_base_cap() * EnergyFactor;
+ comp->e_match = SIM_array_comp_match_cap( n_bits ) * EnergyFactor;
+ comp->e_mismatch = SIM_array_comp_mismatch_cap( n_pre ) * EnergyFactor;
+ comp->e_miss = SIM_array_comp_miss_cap( assoc ) * EnergyFactor;
+ comp->e_bit_match = SIM_array_comp_bit_match_cap() * EnergyFactor;
+ comp->e_bit_mismatch = SIM_array_comp_bit_mismatch_cap() * EnergyFactor;
+ comp->e_chg_addr = SIM_array_comp_chgaddr_cap() * EnergyFactor;
+ break;
+
+ case CAM_COMP:
+ comp->e_access = comp->e_match = comp->e_chg_addr = 0;
+ comp->e_bit_match = comp->e_bit_mismatch = 0;
+ /* energy consumption of tagline */
+ comp->e_chg_addr = SIM_cam_comp_tagline_cap( assoc, tagline_len ) * EnergyFactor;
+ comp->e_mismatch = SIM_cam_comp_mismatch_cap( n_bits, n_pre, matchline_len ) * EnergyFactor;
+ comp->e_miss = SIM_cam_comp_miss_cap( assoc ) * EnergyFactor;
+ break;
+
+ default: break;/* some error handler */
+ }
+
+ return 0;
+ }
+ else
+ return -1;
+}
+
+
+int SIM_array_comp_global_record(power_comp *comp, unsigned long int prev_value, unsigned long int curr_value, int miss )
+{
+ if ( miss ) comp->n_miss ++;
+
+ switch ( comp->model ) {
+ case CACHE_COMPONENT:
+ comp->n_access ++;
+ comp->n_chg_addr += SIM_power_Hamming( prev_value, curr_value, comp->comp_mask ) * comp->assoc;
+ break;
+
+ case CAM_COMP:
+ comp->n_chg_addr += SIM_power_Hamming( prev_value, curr_value, comp->comp_mask );
+ break;
+
+ default: break;/* some error handler */
+ }
+
+ return 0;
+}
+
+
+/* recover means prev_tag will recover on next cycle, e.g. driven by sense amplifier */
+/* return value: 1 if miss, 0 if hit */
+int SIM_array_comp_local_record(power_comp *comp, unsigned long int prev_tag, unsigned long int curr_tag, unsigned long int input, int recover )
+{
+ unsigned H_dist;
+ int mismatch;
+
+ if ( mismatch = ( curr_tag != input )) comp->n_mismatch ++;
+
+ /* for cam, input changes are reflected in memory cells */
+ if ( comp->model == CACHE_COMPONENT ) {
+ if ( recover )
+ comp->n_chg_addr += 2 * SIM_power_Hamming( prev_tag, curr_tag, comp->comp_mask );
+ else
+ comp->n_chg_addr += SIM_power_Hamming( prev_tag, curr_tag, comp->comp_mask );
+
+ if ( mismatch ) {
+ H_dist = SIM_power_Hamming( curr_tag, input, comp->comp_mask );
+ comp->n_bit_mismatch += H_dist;
+ comp->n_bit_match += comp->n_bits - H_dist;
+ }
+ else comp->n_match ++;
+ }
+
+ return mismatch;
+}
+
+
+double SIM_array_comp_report(power_comp *comp )
+{
+ return ( comp->n_access * comp->e_access + comp->n_match * comp->e_match +
+ comp->n_mismatch * comp->e_mismatch + comp->n_miss * comp->e_miss +
+ comp->n_bit_match * comp->e_bit_match + comp->n_chg_addr * comp->e_chg_addr +
+ comp->n_bit_mismatch * comp->e_bit_mismatch );
+}
+
+// ------- Tag comparator end
+
+
+
+// ------- Multiplexor begin
+
+/* upon mismatch, 1 output of nor gates 1->0, then 0->1 on next cycle, so no 1/2 */
+static double SIM_array_mux_mismatch_cap( unsigned n_nor_gates )
+{
+ double Cmul;
+
+ /* stage 1: inverter */
+ Cmul = SIM_power_draincap( Wmuxdrv12n, NCH, 1 ) + SIM_power_draincap( Wmuxdrv12p, PCH, 1 ) +
+ SIM_power_gatecap( Wmuxdrv12n, 1 ) + SIM_power_gatecap( Wmuxdrv12p, 1 );
+
+ /* stage 2: nor gates */
+ /* gate cap of nor gates */
+ Cmul += n_nor_gates * ( SIM_power_gatecap( WmuxdrvNORn, 1 ) + SIM_power_gatecap( WmuxdrvNORp, 1 ));
+ /* drain cap of nor gates, only count 1 */
+ Cmul += SIM_power_draincap( WmuxdrvNORp, PCH, 2 ) + 2 * SIM_power_draincap( WmuxdrvNORn, NCH, 1 );
+
+ /* stage 3: output inverter */
+ Cmul += SIM_power_gatecap( Wmuxdrv3n, 1 ) + SIM_power_gatecap( Wmuxdrv3p, 1 ) +
+ SIM_power_draincap( Wmuxdrv3n, NCH, 1 ) + SIM_power_draincap( Wmuxdrv3p, PCH, 1 );
+
+ return Cmul;
+}
+
+
+/* 2 nor gates switch gate signals, so no 1/2 */
+/* WHS: assume address changes won't propagate until matched or mismatched */
+static double SIM_array_mux_chgaddr_cap( void )
+{
+ return ( SIM_power_gatecap( WmuxdrvNORn, 1 ) + SIM_power_gatecap( WmuxdrvNORp, 1 ));
+}
+
+
+static int SIM_array_mux_clear_stat(power_mux *mux)
+{
+ mux->n_mismatch = mux->n_chg_addr = 0;
+
+ return 0;
+}
+
+
+static int SIM_array_mux_init(power_mux *mux, int model, unsigned n_gates, unsigned assoc )
+{
+ if ((mux->model = model) && model < MUX_MAX_MODEL) {
+ mux->assoc = assoc;
+
+ SIM_array_mux_clear_stat(mux);
+
+ mux->e_mismatch = SIM_array_mux_mismatch_cap( n_gates ) * EnergyFactor;
+ mux->e_chg_addr = SIM_array_mux_chgaddr_cap() * EnergyFactor;
+
+ return 0;
+ }
+ else
+ return -1;
+}
+
+
+int SIM_array_mux_record(power_mux *mux, unsigned long int prev_addr, unsigned long int curr_addr, int miss )
+{
+ if ( prev_addr != curr_addr )
+ mux->n_chg_addr += mux->assoc;
+
+ if ( miss )
+ mux->n_mismatch += mux->assoc;
+ else
+ mux->n_mismatch += mux->assoc - 1;
+
+ return 0;
+}
+
+
+double SIM_array_mux_report(power_mux *mux )
+{
+ return ( mux->n_mismatch * mux->e_mismatch + mux->n_chg_addr * mux->e_chg_addr );
+}
+
+// ------- Multiplexor end
+
+
+// ------- Output driver begin
+
+/* output driver should be disabled somehow when no access occurs, so no 1/2 */
+static double SIM_array_outdrv_select_cap( unsigned data_width )
+{
+ double Ctotal;
+
+ /* stage 1: inverter */
+ Ctotal = SIM_power_gatecap( Woutdrvseln, 1 ) + SIM_power_gatecap( Woutdrvselp, 1 ) +
+ SIM_power_draincap( Woutdrvseln, NCH, 1 ) + SIM_power_draincap( Woutdrvselp, PCH, 1 );
+
+ /* stage 2: gate cap of nand gate and nor gate */
+ /* only consider 1 gate cap because another and drain cap switch depends on data value */
+ Ctotal += data_width *( SIM_power_gatecap( Woutdrvnandn, 1 ) + SIM_power_gatecap( Woutdrvnandp, 1 ) +
+ SIM_power_gatecap( Woutdrvnorn, 1 ) + SIM_power_gatecap( Woutdrvnorp, 1 ));
+
+ return Ctotal;
+}
+
+
+/* WHS: assume data changes won't propagate until enabled */
+static double SIM_array_outdrv_chgdata_cap( void )
+{
+ return (( SIM_power_gatecap( Woutdrvnandn, 1 ) + SIM_power_gatecap( Woutdrvnandp, 1 ) +
+ SIM_power_gatecap( Woutdrvnorn, 1 ) + SIM_power_gatecap( Woutdrvnorp, 1 )) / 2 );
+}
+
+
+/* no 1/2 for the same reason as outdrv_select_cap */
+static double SIM_array_outdrv_outdata_cap( unsigned value )
+{
+ double Ctotal;
+
+ /* stage 1: drain cap of nand gate or nor gate */
+ if ( value )
+ /* drain cap of nand gate */
+ Ctotal = SIM_power_draincap( Woutdrvnandn, NCH, 2 ) + 2 * SIM_power_draincap( Woutdrvnandp, PCH, 1 );
+ else
+ /* drain cap of nor gate */
+ Ctotal = 2 * SIM_power_draincap( Woutdrvnorn, NCH, 1 ) + SIM_power_draincap( Woutdrvnorp, PCH, 2 );
+
+ /* stage 2: gate cap of output inverter */
+ if ( value )
+ Ctotal += SIM_power_gatecap( Woutdriverp, 1 );
+ else
+ Ctotal += SIM_power_gatecap( Woutdrivern, 1 );
+
+ /* drain cap of output inverter should be included into bus cap */
+ return Ctotal;
+}
+
+
+static int SIM_array_outdrv_clear_stat(power_out *outdrv)
+{
+ outdrv->n_select = outdrv->n_chg_data = 0;
+ outdrv->n_out_0 = outdrv->n_out_1 = 0;
+
+ return 0;
+}
+
+
+static int SIM_array_outdrv_init(power_out *outdrv, int model, unsigned item_width )
+{
+ if ((outdrv->model = model) && model < OUTDRV_MAX_MODEL) {
+ outdrv->item_width = item_width;
+ /* redundant field */
+ outdrv->out_mask = HAMM_MASK(item_width);
+
+ SIM_array_outdrv_clear_stat(outdrv);
+
+ outdrv->e_select = SIM_array_outdrv_select_cap( item_width ) * EnergyFactor;
+ outdrv->e_out_1 = SIM_array_outdrv_outdata_cap( 1 ) * EnergyFactor;
+ outdrv->e_out_0 = SIM_array_outdrv_outdata_cap( 0 ) * EnergyFactor;
+
+ switch ( model ) {
+ case CACHE_OUTDRV:
+ outdrv->e_chg_data = SIM_array_outdrv_chgdata_cap() * EnergyFactor;
+ break;
+
+ case CAM_OUTDRV:
+ /* input changes are reflected in memory cells */
+ case REG_OUTDRV:
+ /* input changes are reflected in bitlines */
+ outdrv->e_chg_data = 0;
+ break;
+
+ default: break;/* some error handler */
+ }
+
+ return 0;
+ }
+ else
+ return -1;
+}
+
+
+int SIM_array_outdrv_global_record(power_out *outdrv, unsigned long int data )
+{
+ unsigned n_1;
+
+ outdrv->n_select ++;
+
+ n_1 = SIM_power_Hamming( data, 0, outdrv->out_mask );
+
+ outdrv->n_out_1 += n_1;
+ outdrv->n_out_0 += outdrv->item_width - n_1;
+
+ return 0;
+}
+
+
+/* recover means prev_data will recover on next cycle, e.g. driven by sense amplifier */
+/* NOTE: this function SHOULD not be called by a fully-associative cache */
+int SIM_array_outdrv_local_record(power_out *outdrv, unsigned long int prev_data, unsigned long int curr_data, int recover )
+{
+ if ( recover )
+ outdrv->n_chg_data += 2 * SIM_power_Hamming( prev_data, curr_data, outdrv->out_mask );
+ else
+ outdrv->n_chg_data += SIM_power_Hamming( prev_data, curr_data, outdrv->out_mask );
+
+ return 0;
+}
+
+
+double SIM_array_outdrv_report(power_out *outdrv )
+{
+ return ( outdrv->n_select * outdrv->e_select + outdrv->n_chg_data * outdrv->e_chg_data +
+ outdrv->n_out_1 * outdrv->e_out_1 + outdrv->n_out_0 * outdrv->e_out_0 );
+}
+
+// ------- Output driver end
+
+
+
+// ------- Memcory cell begin
+
+/* WHS: use Wmemcella and Wmemcellbscale to compute tx width of memory cell */
+static double SIM_array_mem_cap( unsigned read_ports, unsigned write_ports, int share_rw, unsigned end )
+{
+ double Ctotal;
+
+ /* part 1: drain capacitance of pass transistors */
+ Ctotal = SIM_power_draincap( Wmemcellr, NCH, 1 ) * read_ports * end / 2;
+ if ( ! share_rw )
+ Ctotal += SIM_power_draincap( Wmemcellw, NCH, 1 ) * write_ports;
+
+ /* has coefficient ( 1/2 * 2 ) */
+ /* part 2: drain capacitance of memory cell */
+ Ctotal += SIM_power_draincap( Wmemcella, NCH, 1 ) + SIM_power_draincap( Wmemcella * Wmemcellbscale, PCH, 1 );
+
+ /* has coefficient ( 1/2 * 2 ) */
+ /* part 3: gate capacitance of memory cell */
+ Ctotal += SIM_power_gatecap( Wmemcella, 1 ) + SIM_power_gatecap( Wmemcella * Wmemcellbscale, 1 );
+
+ return Ctotal;
+}
+
+
+static int SIM_array_mem_clear_stat(power_mem *mem)
+{
+ mem->n_switch = 0;
+
+ return 0;
+}
+
+
+static int SIM_array_mem_init(power_mem *mem, int model, unsigned read_ports, unsigned write_ports, int share_rw, unsigned end )
+{
+ double i_leakage;
+
+ if ((mem->model = model) && model < MEM_MAX_MODEL) {
+ mem->end = end;
+ SIM_array_mem_clear_stat(mem);
+
+ switch ( model ) {
+ case CAM_TAG_RW_MEM:
+ mem->e_switch = SIM_cam_tag_mem_cap( read_ports, write_ports, share_rw, end, SIM_ARRAY_RW ) * EnergyFactor;
+ break;
+
+ /* FIXME: it's only an approximation using CAM_TAG_WO_MEM to emulate CAM_ATTACH_MEM */
+ case CAM_ATTACH_MEM:
+ case CAM_TAG_WO_MEM:
+ mem->e_switch = SIM_cam_tag_mem_cap( read_ports, write_ports, share_rw, end, SIM_ARRAY_WO ) * EnergyFactor;
+ break;
+
+ case CAM_DATA_MEM:
+ mem->e_switch = SIM_cam_data_mem_cap( read_ports, write_ports ) * EnergyFactor;
+ break;
+
+ default: /* NORMAL_MEM */
+ mem->e_switch = SIM_array_mem_cap( read_ports, write_ports, share_rw, end ) * EnergyFactor;
+
+ /* static power */
+ i_leakage = 0;
+ /* memory cell */
+ i_leakage += (Wmemcella * NMOS_TAB[0] + Wmemcella * Wmemcellbscale * PMOS_TAB[0]) * 2;
+ /* read port pass tx */
+ i_leakage += Wmemcellr * NMOS_TAB[0] * end * read_ports;
+ /* write port pass tx */
+ if (! share_rw)
+ i_leakage += Wmemcellw * NMOS_TAB[0] * 2 * write_ports;
+
+ mem->i_leakage = i_leakage / PARM_TECH_POINT * 100;
+ }
+
+ return 0;
+ }
+ else
+ return -1;
+}
+
+
+int SIM_array_mem_record(power_mem *mem, unsigned long int prev_value, unsigned long int curr_value, unsigned width )
+{
+ mem->n_switch += SIM_power_Hamming( prev_value, curr_value, HAMM_MASK(width));
+
+ return 0;
+}
+
+
+double SIM_array_mem_report(power_mem *mem )
+{
+ return ( mem->n_switch * mem->e_switch );
+}
+
+// ------- Memcory cell end
+
+
+
+// ------- Precharge begin
+
+/* consider charge then discharge, so no 1/2 */
+static double SIM_array_pre_cap( double width, double length )
+{
+ return SIM_power_gatecap( width, length );
+}
+
+
+/* return # of precharging gates per column */
+static unsigned n_pre_gate( int model )
+{
+ switch ( model ) {
+ case SINGLE_BITLINE: return 2;
+ case EQU_BITLINE: return 3;
+ case SINGLE_OTHER: return 1;
+ default: break;/* some error handler */
+ }
+
+ return 0;
+}
+
+
+/* return # of precharging drains per line */
+static unsigned n_pre_drain( int model )
+{
+ switch ( model ) {
+ case SINGLE_BITLINE: return 1;
+ case EQU_BITLINE: return 2;
+ case SINGLE_OTHER: return 1;
+ default: break;/* some error handler */
+ }
+
+ return 0;
+}
+
+
+static int SIM_array_pre_clear_stat(power_arr_pre *pre)
+{
+ pre->n_charge = 0;
+
+ return 0;
+}
+
+
+static int SIM_array_pre_init(power_arr_pre *pre, int model, double pre_size )
+{
+ unsigned n_gate;
+
+ n_gate = n_pre_gate(model);
+
+ if ((pre->model = model) && model < PRE_MAX_MODEL) {
+ SIM_array_pre_clear_stat(pre);
+
+ /* WHS: 10 should go to PARM */
+ pre->e_charge = SIM_array_pre_cap( pre_size, 10 ) * n_gate * EnergyFactor;
+
+ /* static power */
+ pre->i_leakage = n_gate * pre_size * PMOS_TAB[0] / PARM_TECH_POINT * 100;
+
+ return 0;
+ }
+ else
+ return -1;
+}
+
+
+int SIM_array_pre_record(power_arr_pre *pre, unsigned long int n_charge )
+{
+ pre->n_charge += n_charge;
+
+ return 0;
+}
+
+
+double SIM_array_pre_report(power_arr_pre *pre )
+{
+ return ( pre->n_charge * pre->e_charge );
+}
+
+// ------- Precharge end
+
+/* ---------- buffer model end ------------ */
+
+
+
+
+
+
+
+
+/***** from SIM_array_internal_m.c *********/
+
+
+/* for now we simply initialize all fields to 0, which should not
+ * add too much error if the program runtime is long enough :) */
+int SIM_array_port_state_init(power_array_info *info, SIM_array_port_state_t *port )
+{
+ //if ( IS_FULLY_ASSOC( info ) || !(info->share_rw))
+ //bzero( port->data_line, port->data_line_size );
+
+ port->tag_line = 0;
+ port->row_addr = 0;
+ port->col_addr = 0;
+ port->tag_addr = 0;
+
+ return 0;
+}
+
+
+int SIM_array_set_state_init( power_array_info *info, SIM_array_set_state_t *set )
+{
+ set->entry = NULL;
+ set->entry_set = NULL;
+
+ if ( IS_FULLY_ASSOC( info )) {
+ set->write_flag = 0;
+ set->write_back_flag = 0;
+ }
+
+ /* no default value for other fields */
+ return 0;
+}
+
+
+/* record row decoder and wordline activity */
+/* only used by non-fully-associative array, but we check it anyway */
+int SIM_power_array_dec( power_array_info *info, power_array *arr, SIM_array_port_state_t *port, unsigned long int row_addr, int rw )
+{
+ if ( ! IS_FULLY_ASSOC( info )) {
+ /* record row decoder stats */
+ if (info->row_dec_model) {
+ SIM_array_dec_record( &arr->row_dec, port->row_addr, row_addr );
+
+ /* update state */
+ port->row_addr = row_addr;
+ }
+
+ /* record wordline stats */
+ SIM_array_wordline_record( &arr->data_wordline, rw, info->data_ndwl );
+ if ( HAVE_TAG( info ))
+ SIM_array_wordline_record( &arr->tag_wordline, rw, info->tag_ndwl );
+
+ return 0;
+ }
+ else
+ return -1;
+}
+
+
+/* record read data activity (including bitline and sense amplifier) */
+/* only used by non-fully-associative array, but we check it anyway */
+/* data only used by RF array */
+int SIM_power_array_data_read( power_array_info *info, power_array *arr, unsigned long int data )
+{
+ if (info->data_end == 1) {
+ SIM_array_bitline_record( &arr->data_bitline, SIM_ARRAY_READ, info->eff_data_cols, 0, data );
+
+ return 0;
+ }
+ else if ( ! IS_FULLY_ASSOC( info )) {
+ SIM_array_bitline_record( &arr->data_bitline, SIM_ARRAY_READ, info->eff_data_cols, 0, 0 );
+ SIM_array_amp_record( &arr->data_amp, info->eff_data_cols );
+
+ return 0;
+ }
+ else
+ return -1;
+}
+
+
+/* record write data bitline and memory cell activity */
+/* assume no alignment restriction on write, so (char *) */
+/* set only used by fully-associative array */
+/* data_line only used by fully-associative or RF array */
+int SIM_power_array_data_write( power_array_info *info, power_array *arr, SIM_array_set_state_t *set, unsigned n_item, char *data_line, char *old_data, char *new_data )
+{
+ unsigned i;
+
+ /* record bitline stats */
+ if ( IS_FULLY_ASSOC( info )) {
+ /* wordline should be driven only once */
+ if ( ! set->write_flag ) {
+ SIM_array_wordline_record( &arr->data_wordline, SIM_ARRAY_WRITE, 1 );
+ set->write_flag = 1;
+ }
+
+ /* for fully-associative array, data bank has no read
+ * bitlines, so bitlines not written have no activity */
+ for ( i = 0; i < n_item; i ++ ) {
+ SIM_array_bitline_record( &arr->data_bitline, SIM_ARRAY_WRITE, 8, data_line[i], new_data[i] );
+ /* update state */
+ data_line[i] = new_data[i];
+ }
+ }
+ else if (info->share_rw) {
+ /* there is some subtlety here: write width may not be as wide as block size,
+ * bitlines not written are actually read, but column selector should be off,
+ * so read energy per bitline is the same as write energy per bitline */
+ SIM_array_bitline_record( &arr->data_bitline, SIM_ARRAY_WRITE, info->eff_data_cols, 0, 0 );
+
+ /* write in all sub-arrays if direct-mapped, which implies 1 cycle write latency,
+ * in those sub-arrays wordlines are not driven, so only n items columns switch */
+ if ( IS_DIRECT_MAP( info ) && info->data_ndbl > 1 )
+ SIM_array_bitline_record( &arr->data_bitline, SIM_ARRAY_WRITE, n_item * 8 * ( info->data_ndbl - 1 ), 0, 0 );
+ }
+ else { /* separate R/W bitlines */
+ /* same arguments as in the previous case apply here, except that when we say
+ * read_energy = write_energy, we omit the energy of write driver gate cap */
+ for ( i = 0; i < n_item; i ++ ) {
+ SIM_array_bitline_record( &arr->data_bitline, SIM_ARRAY_WRITE, 8, data_line[i], new_data[i] );
+ /* update state */
+ data_line[i] = new_data[i];
+ }
+ }
+
+ /* record memory cell stats */
+ for ( i = 0; i < n_item; i ++ )
+ SIM_array_mem_record( &arr->data_mem, old_data[i], new_data[i], 8 );
+
+ return 0;
+}
+
+
+/* record read tag activity (including bitline and sense amplifier) */
+/* only used by non-RF array */
+/* set only used by fully-associative array */
+int SIM_power_array_tag_read( power_array_info *info, power_array *arr, SIM_array_set_state_t *set )
+{
+ if ( IS_FULLY_ASSOC( info )) {
+ /* the only reason to read a fully-associative array tag is writing back */
+ SIM_array_wordline_record( &arr->tag_wordline, SIM_ARRAY_READ, 1 );
+ set->write_back_flag = 1;
+ }
+
+ SIM_array_bitline_record( &arr->tag_bitline, SIM_ARRAY_READ, info->eff_tag_cols, 0, 0 );
+ SIM_array_amp_record( &arr->tag_amp, info->eff_tag_cols );
+
+ return 0;
+}
+
+
+/* record write tag bitline and memory cell activity */
+/* WHS: assume update of use bit, valid bit, dirty bit and tag will be coalesced */
+/* only used by non-RF array */
+/* port only used by fully-associative array */
+//int SIM_power_array_tag_update( power_array_info *info, power_array *arr, SIM_array_port_state_t *port, SIM_array_set_state_t *set )
+//{
+ //unsigned i;
+ //unsigned long int curr_tag;
+ //power_mem *tag_attach_mem;
+
+ /* get current tag */
+ //if ( set->entry )
+ //curr_tag = (*info->get_entry_tag)( set->entry );
+
+ // if ( IS_FULLY_ASSOC( info ))
+ // tag_attach_mem = &arr->tag_attach_mem;
+ //else
+ //tag_attach_mem = &arr->tag_mem;
+
+ /* record tag bitline stats */
+ //if ( IS_FULLY_ASSOC( info )) {
+ // if ( set->entry && curr_tag != set->tag_bak ) {
+ // /* shared wordline should be driven only once */
+ // if ( ! set->write_back_flag )
+ // SIM_array_wordline_record( &arr->tag_wordline, SIM_ARRAY_WRITE, 1 );
+
+ /* WHS: value of tag_line doesn't matter if not write_through */
+ //SIM_array_bitline_record( &arr->tag_bitline, SIM_ARRAY_WRITE, info->eff_tag_cols, port->tag_line, curr_tag );
+ /* update state */
+ //if ( IS_WRITE_THROUGH( info ))
+ // port->tag_line = curr_tag;
+ //}
+ //}
+ //else {
+ /* tag update cannot occur at the 1st cycle, so no other sub-arrays */
+ // SIM_array_bitline_record( &arr->tag_bitline, SIM_ARRAY_WRITE, info->eff_tag_cols, 0, 0 );
+ //}
+
+ /* record tag memory cell stats */
+ //if ( HAVE_USE_BIT( info ))
+ // for ( i = 0; i < info->assoc; i ++ )
+ // SIM_array_mem_record( tag_attach_mem, set->use_bak[i], (*info->get_set_use_bit)( set->entry_set, i ), info->use_bit_width );
+
+ //if ( set->entry ) {
+ // SIM_array_mem_record( tag_attach_mem, set->valid_bak, (*info->get_entry_valid_bit)( set->entry ), info->valid_bit_width );
+ //SIM_array_mem_record( &arr->tag_mem, set->tag_bak, curr_tag, info->tag_addr_width );
+
+ //if ( IS_WRITE_BACK( info ))
+ // SIM_array_mem_record( tag_attach_mem, set->dirty_bak, (*info->get_entry_dirty_bit)( set->entry ), 1 );
+ //}
+
+ //return 0;
+//}
+
+
+/* record tag compare activity (including tag comparator, column decoder and multiplexor) */
+/* NOTE: this function may be called twice during ONE array operation, remember to update
+ * states at the end so that call to *_record won't add erroneous extra energy */
+/* only used by non-RF array */
+//int SIM_power_array_tag_compare( power_array_info *info, power_array *arr, SIM_array_port_state_t *port, unsigned long int tag_input, unsigned long int col_addr, SIM_array_set_state_t *set )
+//{
+ //int miss = 0;
+ //unsigned i;
+
+ /* record tag comparator stats */
+ //for ( i = 0; i < info->assoc; i ++ ) {
+ /* WHS: sense amplifiers output 0 when idle */
+ //if ( SIM_array_comp_local_record( &arr->comp, 0, (*info->get_set_tag)( set->entry_set, i ), tag_input, SIM_ARRAY_RECOVER ))
+ // miss = 1;
+ //}
+
+ //SIM_array_comp_global_record( &arr->comp, port->tag_addr, tag_input, miss );
+
+ /* record column decoder stats */
+ //if ( HAVE_COL_DEC( info ))
+ //SIM_array_dec_record( &arr->col_dec, port->col_addr, col_addr );
+
+ /* record multiplexor stats */
+ //if ( HAVE_COL_MUX( info ))
+ //SIM_array_mux_record( &arr->mux, port->col_addr, col_addr, miss );
+
+ /* update state */
+ //port->tag_addr = tag_input;
+ //if ( HAVE_COL_DEC( info ))
+ //port->col_addr = col_addr;
+
+ //return 0;
+//}
+
+
+/* record output driver activity */
+/* assume alignment restriction on read, so specify data_size */
+/* WHS: it's really a mess to use data_size to specify data type */
+/* data_all only used by non-RF and non-fully-associative array */
+/* WHS: don't support 128-bit or wider integer */
+//int SIM_power_array_output( power_array_info *info, power_array *arr, unsigned data_size, unsigned length, void *data_out, void *data_all )
+//{
+ // unsigned i, j;
+
+ /* record output driver stats */
+ //for ( i = 0; i < length; i ++ ) {
+ // switch ( data_size ) {
+ // case 1: SIM_array_outdrv_global_record( &arr->outdrv, ((unsigned8_t *)data_out)[i] );
+ // break;
+ //case 2: SIM_array_outdrv_global_record( &arr->outdrv, ((unsigned16_t *)data_out)[i] );
+ // break;
+ //case 4: SIM_array_outdrv_global_record( &arr->outdrv, ((unsigned32_t *)data_out)[i] );
+ // break;
+ //case 8: SIM_array_outdrv_global_record( &arr->outdrv, ((unsigned64_t *)data_out)[i] );
+ // break;
+ //default: /* some error handler */
+ //}
+ //}
+
+ //if ( ! IS_FULLY_ASSOC( info )) {
+ //for ( i = 0; i < info->assoc; i ++ )
+ //for ( j = 0; j < info->n_item; j ++ )
+ /* sense amplifiers output 0 when idle */
+ //switch ( data_size ) {
+ //case 1: SIM_array_outdrv_local_record( &arr->outdrv, 0, ((unsigned8_t **)data_all)[i][j], SIM_ARRAY_RECOVER );
+ //break;
+ //case 2: SIM_array_outdrv_local_record( &arr->outdrv, 0, ((unsigned16_t **)data_all)[i][j], SIM_ARRAY_RECOVER );
+ //break;
+ //case 4: SIM_array_outdrv_local_record( &arr->outdrv, 0, ((unsigned32_t **)data_all)[i][j], SIM_ARRAY_RECOVER );
+ //break;
+ //case 8: SIM_array_outdrv_local_record( &arr->outdrv, 0, ((unsigned64_t **)data_all)[i][j], SIM_ARRAY_RECOVER );
+ //break;
+ //default: /* some error handler */
+ //}
+ //}
+
+ //return 0;
+//}
+
+
+/********* end from SIM_array_internal_m.c **********/
+
+
+// ------- Array init
+
+
+int power_array_init(power_array_info *info, power_array *arr )
+{
+ unsigned rows, cols, ports, dec_width, n_bitline_pre, n_colsel_pre;
+ double wordline_len, bitline_len, tagline_len, matchline_len;
+ double wordline_cmetal, bitline_cmetal;
+ double Cline, pre_size, comp_pre_size;
+
+ arr->i_leakage = 0;
+
+ /* sanity check */
+ if ( info->read_ports == 0 ) info->share_rw = 0;
+ if ( info->share_rw ) { //AMIT are read and write ports shared?
+ info->data_end = 2;
+ info->tag_end = 2;
+ }
+
+ if ( info->share_rw ) ports = info->read_ports;
+ else ports = info->read_ports + info->write_ports;
+
+ /* data array unit length wire cap */
+ if (ports > 1) {
+ /* 3x minimal spacing */
+ wordline_cmetal = CC3M3metal;
+ bitline_cmetal = CC3M2metal;
+ }
+ else if (info->data_end == 2) {
+ /* wordline infinite spacing, bitline 3x minimal spacing */
+ wordline_cmetal = CM3metal;
+ bitline_cmetal = CC3M2metal;
+ }
+ else {
+ /* both infinite spacing */
+ wordline_cmetal = CM3metal;
+ bitline_cmetal = CM2metal;
+ }
+
+ info->data_arr_width = 0;
+ info->tag_arr_width = 0;
+ info->data_arr_height = 0;
+ info->tag_arr_height = 0;
+
+ /* BEGIN: data array power initialization */
+ if (dec_width = SIM_power_logtwo(info->n_set)) { //AMIT: not fully associative, n->sets!=1
+ /* row decoder power initialization */
+ SIM_array_dec_init( &arr->row_dec, info->row_dec_model, dec_width );
+
+ /* row decoder precharging power initialization */
+ //if ( is_dynamic_dec( info->row_dec_model ))
+ /* FIXME: need real pre_size */
+ //SIM_array_pre_init( &arr->row_dec_pre, info->row_dec_pre_model, 0 );
+
+ rows = info->n_set / info->data_ndbl / info->data_nspd; //AMIT: n_set is the number of sets(fully associative n_sets=1)
+ cols = info->blk_bits * info->assoc * info->data_nspd / info->data_ndwl; //AMIT: blk_bits is the line size
+
+ bitline_len = rows * ( RegCellHeight + ports * WordlineSpacing );
+ if ( info->data_end == 2 )
+ wordline_len = cols * ( RegCellWidth + 2 * ports * BitlineSpacing );
+ else /* info->data_end == 1 */
+ wordline_len = cols * ( RegCellWidth + ( 2 * ports - info->read_ports ) * BitlineSpacing );
+ info->data_arr_width = wordline_len;
+ info->data_arr_height = bitline_len;
+
+ /* compute precharging size */
+ /* FIXME: should consider n_pre and pre_size simultaneously */
+ Cline = rows * SIM_power_draincap( Wmemcellr, NCH, 1 ) + bitline_cmetal * bitline_len;
+ pre_size = SIM_power_driver_size( Cline, Period / 8 );
+ /* WHS: ?? compensate for not having an nmos pre-charging */
+ pre_size += pre_size * Wdecinvn / Wdecinvp;
+
+ /* bitline power initialization */
+ n_bitline_pre = n_pre_drain( info->data_bitline_pre_model );
+ n_colsel_pre = ( info->data_n_share_amp > 1 ) ? n_pre_drain( info->data_colsel_pre_model ) : 0;
+ SIM_array_bitline_init(&arr->data_bitline, info->data_bitline_model, info->share_rw, info->data_end, rows, bitline_len * bitline_cmetal, info->data_n_share_amp, n_bitline_pre, n_colsel_pre, pre_size, info->outdrv_model);
+ /* static power */
+ arr->i_leakage += arr->data_bitline.i_leakage * cols * info->write_ports;
+
+ /* bitline precharging power initialization */
+ SIM_array_pre_init( &arr->data_bitline_pre, info->data_bitline_pre_model, pre_size );
+ /* static power */
+ arr->i_leakage += arr->data_bitline_pre.i_leakage * cols * info->read_ports;
+ /* bitline column selector precharging power initialization */
+ if ( info->data_n_share_amp > 1 )
+ SIM_array_pre_init( &arr->data_colsel_pre, info->data_colsel_pre_model, pre_size );
+
+ /* sense amplifier power initialization */
+ SIM_array_amp_init( &arr->data_amp, info->data_amp_model );
+ }
+ else {
+ /* info->n_set == 1 means this array is fully-associative */
+ rows = info->assoc;
+ cols = info->blk_bits;
+
+ /* WHS: no read wordlines or bitlines */
+ bitline_len = rows * ( RegCellHeight + info->write_ports * WordlineSpacing );
+ wordline_len = cols * ( RegCellWidth + 2 * info->write_ports * BitlineSpacing );
+ info->data_arr_width = wordline_len;
+ info->data_arr_height = bitline_len;
+
+ /* bitline power initialization */
+ SIM_array_bitline_init(&arr->data_bitline, info->data_bitline_model, 0, info->data_end, rows, bitline_len * bitline_cmetal, 1, 0, 0, 0, info->outdrv_model);
+ }
+
+ /* wordline power initialization */
+ SIM_array_wordline_init( &arr->data_wordline, info->data_wordline_model, info->share_rw, cols, wordline_len * wordline_cmetal, info->data_end );
+ /* static power */
+ arr->i_leakage += arr->data_wordline.i_leakage * rows * ports;
+
+ if (dec_width = SIM_power_logtwo(info->n_item)) {
+ /* multiplexor power initialization */
+ SIM_array_mux_init( &arr->mux, info->mux_model, info->n_item, info->assoc );
+
+ /* column decoder power initialization */
+ SIM_array_dec_init( &arr->col_dec, info->col_dec_model, dec_width );
+
+ /* column decoder precharging power initialization */
+ //if ( is_dynamic_dec( info->col_dec_model ))
+ /* FIXME: need real pre_size */
+ //SIM_array_pre_init( &arr->col_dec_pre, info->col_dec_pre_model, 0 );
+ }
+
+ /* memory cell power initialization */
+ SIM_array_mem_init( &arr->data_mem, info->data_mem_model, info->read_ports, info->write_ports, info->share_rw, info->data_end );
+ /* static power */
+ arr->i_leakage += arr->data_mem.i_leakage * rows * cols;
+
+ /* output driver power initialization */
+ SIM_array_outdrv_init( &arr->outdrv, info->outdrv_model, info->data_width );
+ /* END: data array power initialization */
+
+
+ /* BEGIN: tag array power initialization */
+ /* assume a tag array must have memory cells */
+ if ( info->tag_mem_model ) {
+ if ( info->n_set > 1 ) {
+ /* tag array unit length wire cap */
+ if (ports > 1) {
+ /* 3x minimal spacing */
+ wordline_cmetal = CC3M3metal;
+ bitline_cmetal = CC3M2metal;
+ }
+ else if (info->data_end == 2) {
+ /* wordline infinite spacing, bitline 3x minimal spacing */
+ wordline_cmetal = CM3metal;
+ bitline_cmetal = CC3M2metal;
+ }
+ else {
+ /* both infinite spacing */
+ wordline_cmetal = CM3metal;
+ bitline_cmetal = CM2metal;
+ }
+
+ rows = info->n_set / info->tag_ndbl / info->tag_nspd;
+ cols = info->tag_line_width * info->assoc * info->tag_nspd / info->tag_ndwl;
+
+ bitline_len = rows * ( RegCellHeight + ports * WordlineSpacing );
+ if ( info->tag_end == 2 )
+ wordline_len = cols * ( RegCellWidth + 2 * ports * BitlineSpacing );
+ else /* info->tag_end == 1 */
+ wordline_len = cols * ( RegCellWidth + ( 2 * ports - info->read_ports ) * BitlineSpacing );
+ info->tag_arr_width = wordline_len;
+ info->tag_arr_height = bitline_len;
+
+ /* compute precharging size */
+ /* FIXME: should consider n_pre and pre_size simultaneously */
+ Cline = rows * SIM_power_draincap( Wmemcellr, NCH, 1 ) + bitline_cmetal * bitline_len;
+ pre_size = SIM_power_driver_size( Cline, Period / 8 );
+ /* WHS: ?? compensate for not having an nmos pre-charging */
+ pre_size += pre_size * Wdecinvn / Wdecinvp;
+
+ /* bitline power initialization */
+ n_bitline_pre = n_pre_drain( info->tag_bitline_pre_model );
+ n_colsel_pre = ( info->tag_n_share_amp > 1 ) ? n_pre_drain( info->tag_colsel_pre_model ) : 0;
+ SIM_array_bitline_init(&arr->tag_bitline, info->tag_bitline_model, info->share_rw, info->tag_end, rows, bitline_len * bitline_cmetal, info->tag_n_share_amp, n_bitline_pre, n_colsel_pre, pre_size, SIM_NO_MODEL);
+
+ /* bitline precharging power initialization */
+ SIM_array_pre_init( &arr->tag_bitline_pre, info->tag_bitline_pre_model, pre_size );
+ /* bitline column selector precharging power initialization */
+ if ( info->tag_n_share_amp > 1 )
+ SIM_array_pre_init( &arr->tag_colsel_pre, info->tag_colsel_pre_model, pre_size );
+
+ /* sense amplifier power initialization */
+ SIM_array_amp_init( &arr->tag_amp, info->tag_amp_model );
+
+ /* prepare for comparator initialization */
+ tagline_len = matchline_len = 0;
+ comp_pre_size = Wcomppreequ;
+ }
+ else { /* info->n_set == 1 */
+ /* cam cells are big enough, so infinite spacing */
+ wordline_cmetal = CM3metal;
+ bitline_cmetal = CM2metal;
+
+ rows = info->assoc;
+ /* FIXME: operations of valid bit, use bit and dirty bit are not modeled */
+ cols = info->tag_addr_width;
+
+ bitline_len = rows * ( CamCellHeight + ports * WordlineSpacing + info->read_ports * MatchlineSpacing );
+ if ( info->tag_end == 2 )
+ wordline_len = cols * ( CamCellWidth + 2 * ports * BitlineSpacing + 2 * info->read_ports * TaglineSpacing );
+ else /* info->tag_end == 1 */
+ wordline_len = cols * ( CamCellWidth + ( 2 * ports - info->read_ports ) * BitlineSpacing + 2 * info->read_ports * TaglineSpacing );
+ info->tag_arr_width = wordline_len;
+ info->tag_arr_height = bitline_len;
+
+ if ( is_rw_bitline ( info->tag_bitline_model )) {
+ /* compute precharging size */
+ /* FIXME: should consider n_pre and pre_size simultaneously */
+ Cline = rows * SIM_power_draincap( Wmemcellr, NCH, 1 ) + bitline_cmetal * bitline_len;
+ pre_size = SIM_power_driver_size( Cline, Period / 8 );
+ /* WHS: ?? compensate for not having an nmos pre-charging */
+ pre_size += pre_size * Wdecinvn / Wdecinvp;
+
+ /* bitline power initialization */
+ n_bitline_pre = n_pre_drain( info->tag_bitline_pre_model );
+ SIM_array_bitline_init(&arr->tag_bitline, info->tag_bitline_model, info->share_rw, info->tag_end, rows, bitline_len * bitline_cmetal, 1, n_bitline_pre, 0, pre_size, SIM_NO_MODEL);
+
+ /* bitline precharging power initialization */
+ SIM_array_pre_init( &arr->tag_bitline_pre, info->tag_bitline_pre_model, pre_size );
+
+ /* sense amplifier power initialization */
+ SIM_array_amp_init( &arr->tag_amp, info->tag_amp_model );
+ }
+ else {
+ /* bitline power initialization */
+ SIM_array_bitline_init(&arr->tag_bitline, info->tag_bitline_model, 0, info->tag_end, rows, bitline_len * bitline_cmetal, 1, 0, 0, 0, SIM_NO_MODEL);
+ }
+
+ /* memory cell power initialization */
+ SIM_array_mem_init( &arr->tag_attach_mem, info->tag_attach_mem_model, info->read_ports, info->write_ports, info->share_rw, info->tag_end );
+
+ /* prepare for comparator initialization */
+ tagline_len = bitline_len;
+ matchline_len = wordline_len;
+ comp_pre_size = Wmatchpchg;
+ }
+
+ /* wordline power initialization */
+ SIM_array_wordline_init( &arr->tag_wordline, info->tag_wordline_model, info->share_rw, cols, wordline_len * wordline_cmetal, info->tag_end );
+
+ /* comparator power initialization */
+ SIM_array_comp_init( &arr->comp, info->comp_model, info->tag_addr_width, info->assoc, n_pre_drain( info->comp_pre_model ), matchline_len, tagline_len );
+
+ /* comparator precharging power initialization */
+ SIM_array_pre_init( &arr->comp_pre, info->comp_pre_model, comp_pre_size );
+
+ /* memory cell power initialization */
+ SIM_array_mem_init( &arr->tag_mem, info->tag_mem_model, info->read_ports, info->write_ports, info->share_rw, info->tag_end );
+ }
+ /* END: tag array power initialization */
+
+ return 0;
+}
+
+double array_report(power_array_info *info, power_array *arr)
+{
+ double epart, etotal = 0;
+
+ if (info->row_dec_model) {
+ epart = SIM_array_dec_report(&arr->row_dec);
+ //fprintf(stderr, "row decoder: %g\n", epart);
+ etotal += epart;
+ }
+ if (info->col_dec_model) {
+ epart = SIM_array_dec_report(&arr->col_dec);
+ //fprintf(stderr, "col decoder: %g\n", epart);
+ etotal += epart;
+ }
+ if (info->data_wordline_model) {
+ epart = SIM_array_wordline_report(&arr->data_wordline);
+ //fprintf(stderr, "data wordline: %g\n", epart);
+ etotal += epart;
+ }
+ if (info->tag_wordline_model) {
+ epart = SIM_array_wordline_report(&arr->tag_wordline);
+ //fprintf(stderr, "tag wordline: %g\n", epart);
+ etotal += epart;
+ }
+ if (info->data_bitline_model) {
+ epart = SIM_array_bitline_report(&arr->data_bitline);
+ //fprintf(stderr, "data bitline: %g\n", epart);
+ etotal += epart;
+ }
+ if (info->data_bitline_pre_model) {
+ epart = SIM_array_pre_report(&arr->data_bitline_pre);
+ //fprintf(stderr, "data bitline precharge: %g\n", epart);
+ etotal += epart;
+ }
+ if (info->tag_bitline_model) {
+ epart = SIM_array_bitline_report(&arr->tag_bitline);
+ //fprintf(stderr, "tag bitline: %g\n", epart);
+ etotal += epart;
+ }
+ if (info->data_mem_model) {
+ epart = SIM_array_mem_report(&arr->data_mem);
+ //fprintf(stderr, "data memory: %g\n", epart);
+ etotal += epart;
+ }
+ if (info->tag_mem_model) {
+ epart = SIM_array_mem_report(&arr->tag_mem);
+ //fprintf(stderr, "tag memory: %g\n", epart);
+ etotal += epart;
+ }
+ if (info->data_amp_model) {
+ epart = SIM_array_amp_report(&arr->data_amp);
+ //fprintf(stderr, "data amp: %g\n", epart);
+ etotal += epart;
+ }
+ if (info->tag_amp_model) {
+ epart = SIM_array_amp_report(&arr->tag_amp);
+ //fprintf(stderr, "tag amp: %g\n", epart);
+ etotal += epart;
+ }
+ if (info->comp_model) {
+ epart = SIM_array_comp_report(&arr->comp);
+ //fprintf(stderr, "comparator: %g\n", epart);
+ etotal += epart;
+ }
+ if (info->mux_model) {
+ epart = SIM_array_mux_report(&arr->mux);
+ //fprintf(stderr, "multiplexor: %g\n", epart);
+ etotal += epart;
+ }
+ if (info->outdrv_model) {
+ epart = SIM_array_outdrv_report(&arr->outdrv);
+ //fprintf(stderr, "output driver: %g\n", epart);
+ etotal += epart;
+ }
+ /* ignore other precharging for now */
+
+ //fprintf(stderr, "total energy: %g\n", etotal);
+
+ return etotal;
+}
+
+/* ==================== buffer (wrapper/record functions) ==================== */
+
+/* record read data activity */
+int SIM_buf_power_data_read(power_array_info *info, power_array *arr, unsigned long int data)
+{
+ /* precharge */
+ SIM_array_pre_record(&arr->data_bitline_pre, info->blk_bits);
+ /* drive the wordline */
+ SIM_power_array_dec(info, arr, NULL, 0, SIM_ARRAY_READ);
+ /* read data */
+ SIM_power_array_data_read(info, arr, data);
+
+ return 0;
+}
+
+
+/* record write data bitline and memory cell activity */
+int SIM_buf_power_data_write(power_array_info *info, power_array *arr, char *data_line, char *old_data, char *new_data)
+{
+#define N_ITEM (PARM_flit_width / 8 + (PARM_flit_width % 8 ? 1:0))
+ /* drive the wordline */
+ SIM_power_array_dec(info, arr, NULL, 0, SIM_ARRAY_WRITE);
+ /* write data */
+ SIM_power_array_data_write(info, arr, NULL, N_ITEM, data_line, old_data, new_data);
+
+ return 0;
+}
+
+/* WHS: missing data output wrapper function */
+
+/* ==================== buffer (wrapper/record functions) end ==================== */
+
+
+
+
+
+int SIM_array_clear_stat(power_array *arr)
+{
+ SIM_array_dec_clear_stat(&arr->row_dec);
+ SIM_array_dec_clear_stat(&arr->col_dec);
+ SIM_array_wordline_clear_stat(&arr->data_wordline);
+ SIM_array_wordline_clear_stat(&arr->tag_wordline);
+ SIM_array_bitline_clear_stat(&arr->data_bitline);
+ SIM_array_bitline_clear_stat(&arr->tag_bitline);
+ SIM_array_mem_clear_stat(&arr->data_mem);
+ SIM_array_mem_clear_stat(&arr->tag_mem);
+ SIM_array_mem_clear_stat(&arr->tag_attach_mem);
+ SIM_array_amp_clear_stat(&arr->data_amp);
+ SIM_array_amp_clear_stat(&arr->tag_amp);
+ SIM_array_comp_clear_stat(&arr->comp);
+ SIM_array_mux_clear_stat(&arr->mux);
+ SIM_array_outdrv_clear_stat(&arr->outdrv);
+ SIM_array_pre_clear_stat(&arr->row_dec_pre);
+ SIM_array_pre_clear_stat(&arr->col_dec_pre);
+ SIM_array_pre_clear_stat(&arr->data_bitline_pre);
+ SIM_array_pre_clear_stat(&arr->tag_bitline_pre);
+ SIM_array_pre_clear_stat(&arr->data_colsel_pre);
+ SIM_array_pre_clear_stat(&arr->tag_colsel_pre);
+ SIM_array_pre_clear_stat(&arr->comp_pre);
+
+ return 0;
+}
+
+
+
+
+
diff --git a/src/mem/ruby/network/orion/power_array.hh b/src/mem/ruby/network/orion/power_array.hh
new file mode 100644
index 000000000..dbd2733c9
--- /dev/null
+++ b/src/mem/ruby/network/orion/power_array.hh
@@ -0,0 +1,394 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _POWER_ARRAY_H
+#define _POWER_ARRAY_H
+
+
+#define SIM_ARRAY_NO_MODEL 0
+#define SIM_ARRAY_READ 0
+#define SIM_ARRAY_WRITE 1
+
+#define SIM_ARRAY_RECOVER 1
+
+/* read/write */
+#define SIM_ARRAY_RW 0
+/* only write */
+#define SIM_ARRAY_WO 1
+
+typedef enum {
+ GENERIC_DEC =1,
+ DEC_MAX_MODEL
+} power_dec_model;
+
+typedef enum {
+ GENERIC_MUX =1,
+ MUX_MAX_MODEL
+} power_mux_model;
+
+typedef enum {
+ GENERIC_AMP =1,
+ AMP_MAX_MODEL
+} power_amp_model;
+
+typedef enum {
+ CACHE_RW_WORDLINE =1,
+ CACHE_WO_WORDLINE,
+ CAM_RW_WORDLINE,
+ CAM_WO_WORDLINE,
+ WORDLINE_MAX_MODEL
+} power_wordline_model;
+
+typedef enum {
+ RW_BITLINE =1,
+ WO_BITLINE,
+ BITLINE_MAX_MODEL
+} power_bitline_model;
+
+typedef enum {
+ SINGLE_BITLINE =1,
+ EQU_BITLINE,
+ SINGLE_OTHER,
+ PRE_MAX_MODEL
+} power_pre_model;
+
+typedef enum {
+ NORMAL_MEM =1,
+ CAM_TAG_RW_MEM,
+ CAM_TAG_WO_MEM,
+ CAM_DATA_MEM,
+ CAM_ATTACH_MEM,
+ MEM_MAX_MODEL
+} power_mem_model;
+
+typedef enum {
+ CACHE_COMPONENT =1,
+ CAM_COMP,
+ COMP_MAX_MODEL
+} power_comp_model;
+
+typedef enum {
+ CACHE_OUTDRV =1,
+ CAM_OUTDRV,
+ REG_OUTDRV,
+ OUTDRV_MAX_MODEL
+} power_outdrv_model;
+
+
+
+typedef struct {
+ int model;
+ unsigned n_bits;
+ unsigned long int n_chg_output;
+ unsigned long int n_chg_addr;
+ unsigned long int n_chg_l1;
+ double e_chg_output;
+ double e_chg_addr;
+ double e_chg_l1;
+ unsigned n_in_1st;
+ unsigned n_in_2nd;
+ unsigned n_out_0th;
+ unsigned n_out_1st;
+ unsigned long int addr_mask;
+} power_decoder;
+
+typedef struct {
+ int model;
+ int share_rw;
+ unsigned long int n_read;
+ unsigned long int n_write;
+ double e_read;
+ double e_write;
+ double i_leakage;
+} power_wordline;
+
+typedef struct {
+ int model;
+ int share_rw;
+ unsigned end;
+ unsigned long int n_col_write;
+ unsigned long int n_col_read;
+ unsigned long int n_col_sel;
+ double e_col_write;
+ double e_col_read;
+ double e_col_sel;
+ double i_leakage;
+} power_bitline;
+
+typedef struct {
+ int model;
+ unsigned long int n_access;
+ double e_access;
+} power_amp;
+
+typedef struct {
+ int model;
+ unsigned n_bits;
+ unsigned assoc;
+ unsigned long int n_access;
+ unsigned long int n_match;
+ unsigned long int n_mismatch;
+ unsigned long int n_miss;
+ unsigned long int n_bit_match;
+ unsigned long int n_bit_mismatch;
+ unsigned long int n_chg_addr;
+ double e_access;
+ double e_match;
+ double e_mismatch;
+ double e_miss;
+ double e_bit_match;
+ double e_bit_mismatch;
+ double e_chg_addr;
+ unsigned long int comp_mask;
+} power_comp;
+
+typedef struct {
+ int model;
+ unsigned end;
+ unsigned long int n_switch;
+ double e_switch;
+ double i_leakage;
+} power_mem;
+
+typedef struct {
+ int model;
+ unsigned assoc;
+ unsigned long int n_mismatch;
+ unsigned long int n_chg_addr;
+ double e_mismatch;
+ double e_chg_addr;
+} power_mux;
+
+typedef struct {
+ int model;
+ unsigned item_width;
+ unsigned long int n_select;
+ unsigned long int n_chg_data;
+ unsigned long int n_out_1;
+ unsigned long int n_out_0;
+ double e_select;
+ double e_chg_data;
+ double e_out_1;
+ double e_out_0;
+ unsigned long int out_mask;
+} power_out;
+
+typedef struct {
+ int model;
+ unsigned long int n_charge;
+ double e_charge;
+ double i_leakage;
+} power_arr_pre;
+
+
+/*@
+ * data type: array port state
+ *
+ * - row_addr -- input to row decoder
+ * col_addr -- input to column decoder, if any
+ * tag_addr -- input to tag comparator
+ * $+ tag_line -- value of tag bitline
+ * # data_line_size -- size of data_line in char
+ * # data_line -- value of data bitline
+ *
+ * legend:
+ * -: only used by non-fully-associative array
+ * +: only used by fully-associative array
+ * #: only used by fully-associative array or RF array
+ * $: only used by write-through array
+ *
+ * NOTE:
+ * (1) *_addr may not necessarily be an address
+ * (2) data_line_size is the allocated size of data_line in simulator,
+ * which must be no less than the physical size of data line
+ * (3) each instance of module should define an instance-specific data
+ * type with non-zero-length data_line and cast it to this type
+ */
+typedef struct {
+ unsigned long int row_addr;
+ unsigned long int col_addr;
+ unsigned long int tag_addr;
+ unsigned long int tag_line;
+ unsigned data_line_size;
+ char data_line[0];
+} SIM_array_port_state_t;
+
+/*@
+ * data type: array set state
+ *
+ * entry -- pointer to some entry structure if an entry is selected for
+ * r/w, NULL otherwise
+ * entry_set -- pointer to corresponding set structure
+ * + write_flag -- 1 if entry is already written once, 0 otherwise
+ * + write_back_flag -- 1 if entry is already written back, 0 otherwise
+ * valid_bak -- valid bit of selected entry before operation
+ * dirty_bak -- dirty bit of selected entry, if any, before operation
+ * tag_bak -- tag of selected entry before operation
+ * use_bak -- use bits of all entries before operation
+ *
+ * legend:
+ * +: only used by fully-associative array
+ *
+ * NOTE:
+ * (1) entry is interpreted by modules, if some module has no "entry structure",
+ * then make sure this field is non-zero if some entry is selected
+ * (2) tag_addr may not necessarily be an address
+ * (3) each instance of module should define an instance-specific data
+ * type with non-zero-length use_bit and cast it to this type
+ */
+typedef struct {
+ void *entry;
+ void *entry_set;
+ int write_flag;
+ int write_back_flag;
+ unsigned valid_bak;
+ unsigned dirty_bak;
+ unsigned long int tag_bak;
+ unsigned use_bak[0];
+} SIM_array_set_state_t;
+
+
+
+
+
+// Array
+
+typedef struct {
+ power_decoder row_dec;
+ power_decoder col_dec;
+ power_wordline data_wordline;
+ power_wordline tag_wordline;
+ power_bitline data_bitline;
+ power_bitline tag_bitline;
+ power_mem data_mem;
+ power_mem tag_mem;
+ power_mem tag_attach_mem;
+ power_amp data_amp;
+ power_amp tag_amp;
+ power_comp comp;
+ power_mux mux;
+ power_out outdrv;
+ power_arr_pre row_dec_pre;
+ power_arr_pre col_dec_pre;
+ power_arr_pre data_bitline_pre;
+ power_arr_pre tag_bitline_pre;
+ power_arr_pre data_colsel_pre;
+ power_arr_pre tag_colsel_pre;
+ power_arr_pre comp_pre;
+ double i_leakage;
+} power_array;
+
+typedef struct {
+ //common for data and tag array
+ int share_rw;
+ unsigned read_ports;
+ unsigned write_ports;
+ unsigned n_set;
+ unsigned blk_bits;
+ unsigned assoc;
+ int row_dec_model;
+ //for data array
+ unsigned data_width;
+ int col_dec_model;
+ int mux_model;
+ int outdrv_model;
+ //for tag array
+ unsigned tag_addr_width;
+ unsigned tag_line_width;
+ int comp_model;
+ //data common
+ unsigned data_ndwl;
+ unsigned data_ndbl;
+ unsigned data_nspd;
+ unsigned data_n_share_amp;
+ unsigned data_end;
+ int data_wordline_model;
+ int data_bitline_model;
+ int data_amp_model;
+ int data_mem_model;
+ //tag common
+ unsigned tag_ndwl;
+ unsigned tag_ndbl;
+ unsigned tag_nspd;
+ unsigned tag_n_share_amp;
+ unsigned tag_end;
+ unsigned tag_wordline_model;
+ unsigned tag_bitline_model;
+ unsigned tag_amp_model;
+ unsigned tag_mem_model;
+ unsigned tag_attach_mem_model;
+ //precharging parameters
+ int row_dec_pre_model;
+ int col_dec_pre_model;
+ int data_bitline_pre_model;
+ int tag_bitline_pre_model;
+ int data_colsel_pre_model;
+ int tag_colsel_pre_model;
+ int comp_pre_model;
+ //derived
+ unsigned n_item;
+ unsigned eff_data_cols;
+ unsigned eff_tag_cols;
+ //flags used by prototype array model
+ unsigned use_bit_width;
+ unsigned valid_bit_width;
+ int write_policy;
+ //fields filled up during initialization
+ double data_arr_width;
+ double tag_arr_width;
+ double data_arr_height;
+ double tag_arr_height;
+} power_array_info;
+
+
+extern int power_array_init(power_array_info *info, power_array *arr );
+
+extern double array_report(power_array_info *info, power_array *arr);
+
+extern int SIM_buf_power_data_read(power_array_info *info, power_array *arr, unsigned long int data);
+
+extern int SIM_buf_power_data_write(power_array_info *info, power_array *arr, char *data_line, char *old_data, char *new_data);
+
+extern int SIM_array_clear_stat(power_array *arr);
+
+extern int SIM_power_array_dec( power_array_info *info, power_array *arr, SIM_array_port_state_t *port, unsigned long int row_addr, int rw );
+extern int SIM_power_array_data_read( power_array_info *info, power_array *arr, unsigned long int data );
+extern int SIM_power_array_data_write( power_array_info *info, power_array *arr, SIM_array_set_state_t *set, unsigned n_item, char *data_line, char *old_data, char *new_data );
+extern int power_array_tag_read( power_array_info *info, power_array *arr, SIM_array_set_state_t *set );
+extern int power_array_tag_update( power_array_info *info, power_array *arr, SIM_array_port_state_t *port, SIM_array_set_state_t *set );
+extern int power_array_tag_compare( power_array_info *info, power_array *arr, SIM_array_port_state_t *port, unsigned long int tag_input, unsigned long int col_addr, SIM_array_set_state_t *set );
+extern int SIM_power_array_output( power_array_info *info, power_array *arr, unsigned data_size, unsigned length, void *data_out, void *data_all );
+
+extern int SIM_array_port_state_init( power_array_info *info, SIM_array_port_state_t *port );
+extern int SIM_array_set_state_init( power_array_info *info, SIM_array_set_state_t *set );
+
+#endif
+
+
+
+
diff --git a/src/mem/ruby/network/orion/power_bus.cc b/src/mem/ruby/network/orion/power_bus.cc
new file mode 100644
index 000000000..032c3c519
--- /dev/null
+++ b/src/mem/ruby/network/orion/power_bus.cc
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+
+#include "power_bus.hh"
+#include "power_ll.hh"
+#include "parm_technology.hh"
+#include "SIM_port.hh"
+#include "power_static.hh"
+#include "power_utils.hh"
+
+/* ------- bus(link) model ---------- */
+
+static int SIM_bus_bitwidth(int encoding, unsigned data_width, unsigned grp_width)
+{
+ if (encoding && encoding < BUS_MAX_ENC)
+ switch (encoding) {
+ case IDENT_ENC:
+ case TRANS_ENC: return data_width;
+ case BUSINV_ENC: return data_width + data_width / grp_width + (data_width % grp_width ? 1:0);
+ default: return 0;/* some error handler */
+ }
+ else
+ return -1;
+}
+
+
+/*
+ * this function is provided to upper layers to compute the exact binary bus representation
+ * only correct when grp_width divides data_width
+ */
+unsigned long int SIM_bus_state(power_bus *bus, unsigned long int old_data, unsigned long int old_state, unsigned long int new_data)
+{
+ unsigned long int mask_bus, mask_data;
+ unsigned long int new_state = 0;
+ unsigned done_width = 0;
+
+ switch (bus->encoding) {
+ case IDENT_ENC: return new_data;
+ case TRANS_ENC: return new_data ^ old_data;
+
+ case BUSINV_ENC:
+ /* FIXME: this function should be re-written for boundary checking */
+ mask_data = (BIGONE << bus->grp_width) - 1;
+ mask_bus = (mask_data << 1) + 1;
+
+ while (bus->data_width > done_width) {
+ if (SIM_power_Hamming(old_state & mask_bus, new_data & mask_data, mask_bus) > bus->grp_width / 2)
+ new_state += (~(new_data & mask_data) & mask_bus) << done_width + done_width / bus->grp_width;
+ else
+ new_state += (new_data & mask_data) << done_width + done_width / bus->grp_width;
+
+ done_width += bus->grp_width;
+ old_state >>= bus->grp_width + 1;
+ new_data >>= bus->grp_width;
+ }
+
+ return new_state;
+
+ default: return 0;/* some error handler */
+ }
+}
+
+
+static double SIM_resultbus_cap(void)
+{
+ double Cline, reg_height;
+
+ /* compute size of result bus tags */
+ reg_height = PARM_RUU_size * (RegCellHeight + WordlineSpacing * 3 * PARM_ruu_issue_width);
+
+ /* assume num alu's = ialu */
+ /* FIXME: generate a more detailed result bus network model */
+ /* WHS: 3200 should go to PARM */
+ /* WHS: use minimal pitch for buses */
+ Cline = CCmetal * (reg_height + 0.5 * PARM_res_ialu * 3200 * LSCALE);
+
+ /* or use result bus length measured from 21264 die photo */
+ // Cline = CCmetal * 3.3 * 1000;
+
+ return Cline;
+}
+
+
+static double SIM_generic_bus_cap(unsigned n_snd, unsigned n_rcv, double length, double time)
+{
+ double Ctotal = 0;
+ double n_size, p_size;
+
+ /* part 1: wire cap */
+ /* WHS: use minimal pitch for buses */
+ Ctotal += CC2metal * length;
+
+ if ((n_snd == 1) && (n_rcv == 1)) {
+ /* directed bus if only one sender and one receiver */
+
+ /* part 2: repeater cap */
+ /* FIXME: ratio taken from Raw, does not scale now */
+ n_size = Lamda * 10;
+ p_size = n_size * 2;
+
+ Ctotal += SIM_power_gatecap(n_size + p_size, 0) + SIM_power_draincap(n_size, NCH, 1) + SIM_power_draincap(p_size, PCH, 1);
+
+ n_size *= 2.5;
+ p_size *= 2.5;
+
+ Ctotal += SIM_power_gatecap(n_size + p_size, 0) + SIM_power_draincap(n_size, NCH, 1) + SIM_power_draincap(p_size, PCH, 1);
+ }
+ else {
+ /* otherwise, broadcasting bus */
+
+ /* part 2: input cap */
+ /* WHS: no idea how input interface is, use an inverter for now */
+ Ctotal += n_rcv * SIM_power_gatecap(Wdecinvn + Wdecinvp, 0);
+
+ /* part 3: output driver cap */
+ if (time) {
+ p_size = SIM_power_driver_size(Ctotal, time);
+ n_size = p_size / 2;
+ }
+ else {
+ p_size = Wbusdrvp;
+ n_size = Wbusdrvn;
+ }
+
+ Ctotal += n_snd * (SIM_power_draincap(Wdecinvn, NCH, 1) + SIM_power_draincap(Wdecinvp, PCH, 1));
+ }
+
+ return Ctotal;
+}
+
+
+/*
+ * n_snd -> # of senders
+ * n_rcv -> # of receivers
+ * time -> rise and fall time, 0 means using default transistor sizes
+ * grp_width only matters for BUSINV_ENC
+ */
+int power_bus_init(power_bus *bus, int model, int encoding, unsigned width, unsigned grp_width, unsigned n_snd, unsigned n_rcv, double length, double time)
+{
+ if ((bus->model = model) && model < BUS_MAX_MODEL) {
+ bus->data_width = width;
+ bus->grp_width = grp_width;
+ bus->n_switch = 0;
+
+ switch (model) {
+ case RESULT_BUS:
+ /* assume result bus uses identity encoding */
+ bus->encoding = IDENT_ENC;
+ bus->e_switch = SIM_resultbus_cap() / 2 * EnergyFactor;
+ break;
+
+ case GENERIC_BUS:
+ if ((bus->encoding = encoding) && encoding < BUS_MAX_ENC) {
+ bus->e_switch = SIM_generic_bus_cap(n_snd, n_rcv, length, time) / 2 * EnergyFactor;
+ /* sanity check */
+ if (!grp_width || grp_width > width)
+ bus->grp_width = width;
+ }
+ else return -1;
+
+ default: break;/* some error handler */
+ }
+
+ bus->bit_width = SIM_bus_bitwidth(bus->encoding, width, bus->grp_width);
+ bus->bus_mask = HAMM_MASK(bus->bit_width);
+
+ return 0;
+ }
+ else
+ return -1;
+}
+
+
+int bus_record(power_bus *bus, unsigned long int old_state, unsigned long int new_state)
+{
+ bus->n_switch += SIM_power_Hamming(new_state, old_state, bus->bus_mask);
+ return 0;
+}
+
+
+double bus_report(power_bus *bus)
+{
+ return (bus->n_switch * bus->e_switch);
+}
+
+/* ------- bus(link) model ---------- */
+
+
diff --git a/src/mem/ruby/network/orion/power_bus.hh b/src/mem/ruby/network/orion/power_bus.hh
new file mode 100644
index 000000000..e1c3ef565
--- /dev/null
+++ b/src/mem/ruby/network/orion/power_bus.hh
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _POWER_BUS_H
+#define _POWER_BUS_H
+
+typedef enum {
+ RESULT_BUS = 1,
+ GENERIC_BUS,
+ BUS_MAX_MODEL
+} power_bus_model;
+
+typedef enum {
+ IDENT_ENC = 1, /* identity encoding */
+ TRANS_ENC, /* transition encoding */
+ BUSINV_ENC, /* bus inversion encoding */
+ BUS_MAX_ENC
+} power_bus_enc;
+
+
+typedef struct {
+ int model;
+ int encoding;
+ unsigned data_width;
+ unsigned grp_width;
+ unsigned long int n_switch;
+ double e_switch;
+ /* redundant field */
+ unsigned bit_width;
+ unsigned long int bus_mask;
+} power_bus;
+
+extern int power_bus_init(power_bus *bus, int model, int encoding, unsigned width, unsigned grp_width, unsigned n_snd, unsigned n_rcv, double length, double time);
+
+extern int bus_record(power_bus *bus, unsigned long int old_state, unsigned long int new_state);
+
+extern double bus_report(power_bus *bus);
+
+#endif
diff --git a/src/mem/ruby/network/orion/power_crossbar.cc b/src/mem/ruby/network/orion/power_crossbar.cc
new file mode 100644
index 000000000..d3e2232ae
--- /dev/null
+++ b/src/mem/ruby/network/orion/power_crossbar.cc
@@ -0,0 +1,365 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <math.h>
+
+#include "power_ll.hh"
+#include "power_crossbar.hh"
+#include "parm_technology.hh"
+#include "SIM_port.hh"
+#include "power_static.hh"
+#include "power_utils.hh"
+
+/*-------------------- CROSSBAR power model -------------------*/
+
+static double crossbar_in_cap(double wire_cap, unsigned n_out, int connect_type, int trans_type, double *Nsize)
+{
+ double Ctotal = 0, Ctrans = 0, psize, nsize, Cdriver = 0;
+
+ /* part 1: wire cap */
+ Ctotal += wire_cap;
+ //printf("CROSSBAR_INTERNAL: input wire cap = %g\n", wire_cap);
+
+ /* part 2: drain cap of transmission gate or gate cap of tri-state gate */
+ if (connect_type == TRANS_GATE) {
+ /* FIXME: resizing strategy */
+ nsize = Nsize ? *Nsize : Wmemcellr;
+ psize = nsize * Wdecinvp / Wdecinvn;
+ Ctrans = SIM_power_draincap(nsize, NCH, 1);
+ if (trans_type == NP_GATE)
+ Ctrans += SIM_power_draincap(psize, PCH, 1);
+ }
+ else if (connect_type == TRISTATE_GATE) {
+ Ctrans = SIM_power_gatecap(Woutdrvnandn + Woutdrvnandp, 0) +
+ SIM_power_gatecap(Woutdrvnorn + Woutdrvnorp, 0);
+ }
+ else {/* some error handler */}
+
+ //printf("CROSSBAR_INTERNAL: input connector cap = %g\n", (n_out * Ctrans));
+ Ctotal += n_out * Ctrans;
+
+ /* part 3: input driver */
+ /* FIXME: how to specify timing? */
+ psize = SIM_power_driver_size(Ctotal, Period / 3);
+ nsize = psize * Wdecinvn / Wdecinvp;
+ Cdriver = SIM_power_draincap(nsize, NCH, 1) + SIM_power_draincap(psize, PCH, 1) +
+ SIM_power_gatecap(nsize + psize, 0);
+
+ //printf("CROSSBAR_INTERNAL: input driver cap = %g\n", Cdriver);
+
+ Ctotal += Cdriver;
+
+ return Ctotal / 2;
+}
+
+
+static double crossbar_out_cap(double length, unsigned n_in, int connect_type, int trans_type, double *Nsize)
+{
+ double Ctotal = 0, Cwire = 0, Ctrans = 0, Cdriver = 0, psize, nsize;
+
+ /* part 1: wire cap */
+ Cwire += CC3metal * length;
+ //printf("CROSSBAR_INTERNAL: output wire cap = %g\n", Cwire);
+
+ Ctotal += Cwire;
+
+ /* part 2: drain cap of transmission gate or tri-state gate */
+ if (connect_type == TRANS_GATE) {
+ /* FIXME: resizing strategy */
+ if (Nsize) {
+ /* FIXME: how to specify timing? */
+ psize = SIM_power_driver_size(Ctotal, Period / 3);
+ *Nsize = nsize = psize * Wdecinvn / Wdecinvp;
+ }
+ else {
+ nsize = Wmemcellr;
+ psize = nsize * Wdecinvp / Wdecinvn;
+ }
+ Ctrans = SIM_power_draincap(nsize, NCH, 1);
+ if (trans_type == NP_GATE)
+ Ctrans += SIM_power_draincap(psize, PCH, 1);
+ }
+ else if (connect_type == TRISTATE_GATE) {
+ Ctrans = SIM_power_draincap(Woutdrivern, NCH, 1) + SIM_power_draincap(Woutdriverp, PCH, 1);
+ }
+ else {/* some error handler */}
+
+ //printf("CROSSBAR_INTERNAL: output connector cap = %g\n", (n_in * Ctrans));
+ Ctotal += n_in * Ctrans;
+
+ /* part 3: output driver */
+ Cdriver += SIM_power_draincap(Woutdrivern, NCH, 1) + SIM_power_draincap(Woutdriverp, PCH, 1) +
+ SIM_power_gatecap(Woutdrivern + Woutdriverp, 0);
+
+ //printf("CROSSBAR_INTERNAL: output driver cap = %g\n", Cdriver);
+
+ Ctotal += Cdriver;
+
+ return Ctotal / 2;
+}
+
+
+/* cut-through crossbar only supports 4x4 now */
+static double crossbar_io_cap(double length)
+{
+ double Ctotal = 0, psize, nsize;
+
+ /* part 1: wire cap */
+ Ctotal += CC3metal * length;
+
+ /* part 2: gate cap of tri-state gate */
+ Ctotal += 2 * (SIM_power_gatecap(Woutdrvnandn + Woutdrvnandp, 0) +
+ SIM_power_gatecap(Woutdrvnorn + Woutdrvnorp, 0));
+
+ /* part 3: drain cap of tri-state gate */
+ Ctotal += 2 * (SIM_power_draincap(Woutdrivern, NCH, 1) + SIM_power_draincap(Woutdriverp, PCH, 1));
+
+ /* part 4: input driver */
+ /* FIXME: how to specify timing? */
+ psize = SIM_power_driver_size(Ctotal, Period * 0.8);
+ nsize = psize * Wdecinvn / Wdecinvp;
+ Ctotal += SIM_power_draincap(nsize, NCH, 1) + SIM_power_draincap(psize, PCH, 1) +
+ SIM_power_gatecap(nsize + psize, 0);
+
+ /* part 5: output driver */
+ Ctotal += SIM_power_draincap(Woutdrivern, NCH, 1) + SIM_power_draincap(Woutdriverp, PCH, 1) +
+ SIM_power_gatecap(Woutdrivern + Woutdriverp, 0);
+
+ /* HACK HACK HACK */
+ /* this HACK is to count a 1:4 mux and a 4:1 mux, so we have a 5x5 crossbar */
+ return Ctotal / 2 * 1.32;
+}
+
+
+static double crossbar_int_cap(unsigned degree, int connect_type, int trans_type)
+{
+ double Ctotal = 0, Ctrans;
+
+ if (connect_type == TRANS_GATE) {
+ /* part 1: drain cap of transmission gate */
+ /* FIXME: Wmemcellr and resize */
+ Ctrans = SIM_power_draincap(Wmemcellr, NCH, 1);
+ if (trans_type == NP_GATE)
+ Ctrans += SIM_power_draincap(Wmemcellr * Wdecinvp / Wdecinvn, PCH, 1);
+ Ctotal += (degree + 1) * Ctrans;
+ }
+ else if (connect_type == TRISTATE_GATE) {
+ /* part 1: drain cap of tri-state gate */
+ Ctotal += degree * (SIM_power_draincap(Woutdrivern, NCH, 1) + SIM_power_draincap(Woutdriverp, PCH, 1));
+
+ /* part 2: gate cap of tri-state gate */
+ Ctotal += SIM_power_gatecap(Woutdrvnandn + Woutdrvnandp, 0) +
+ SIM_power_gatecap(Woutdrvnorn + Woutdrvnorp, 0);
+ }
+ else {/* some error handler */}
+
+ return Ctotal / 2;
+}
+
+
+/* FIXME: segment control signals are not handled yet */
+static double crossbar_ctr_cap(double length, unsigned data_width, int prev_ctr, int next_ctr, unsigned degree, int connect_type, int trans_type)
+{
+ double Ctotal = 0, Cgate;
+
+ /* part 1: wire cap */
+ Ctotal = Cmetal * length;
+
+ /* part 2: gate cap of transmission gate or tri-state gate */
+ if (connect_type == TRANS_GATE) {
+ /* FIXME: Wmemcellr and resize */
+ Cgate = SIM_power_gatecap(Wmemcellr, 0);
+ if (trans_type == NP_GATE)
+ Cgate += SIM_power_gatecap(Wmemcellr * Wdecinvp / Wdecinvn, 0);
+ }
+ else if (connect_type == TRISTATE_GATE) {
+ Cgate = SIM_power_gatecap(Woutdrvnandn + Woutdrvnandp, 0) +
+ SIM_power_gatecap(Woutdrvnorn + Woutdrvnorp, 0);
+ }
+ else {/* some error handler */}
+
+ Ctotal += data_width * Cgate;
+
+ /* part 3: inverter */
+ if (!(connect_type == TRANS_GATE && trans_type == N_GATE && !prev_ctr))
+ /* FIXME: need accurate size, use minimal size for now */
+ Ctotal += SIM_power_draincap(Wdecinvn, NCH, 1) + SIM_power_draincap(Wdecinvp, PCH, 1) +
+ SIM_power_gatecap(Wdecinvn + Wdecinvp, 0);
+
+ /* part 4: drain cap of previous level control signal */
+ if (prev_ctr)
+ /* FIXME: need actual size, use decoder data for now */
+ Ctotal += degree * SIM_power_draincap(WdecNORn, NCH, 1) + SIM_power_draincap(WdecNORp, PCH, degree);
+
+ /* part 5: gate cap of next level control signal */
+ if (next_ctr)
+ /* FIXME: need actual size, use decoder data for now */
+ Ctotal += SIM_power_gatecap(WdecNORn + WdecNORp, degree * 40 + 20);
+
+ return Ctotal;
+}
+
+
+int power_crossbar_init(power_crossbar *crsbar, int model, unsigned n_in, unsigned n_out, unsigned data_width, unsigned degree, int connect_type, int trans_type, double in_len, double out_len, double *req_len)
+{
+ double in_length, out_length, ctr_length, Nsize, in_wire_cap, i_leakage;
+
+ if ((crsbar->model = model) && model < CROSSBAR_MAX_MODEL) {
+ crsbar->n_in = n_in;
+ crsbar->n_out = n_out;
+ crsbar->data_width = data_width;
+ crsbar->degree = degree;
+ crsbar->connect_type = connect_type;
+ crsbar->trans_type = trans_type;
+ /* redundant field */
+ crsbar->mask = HAMM_MASK(data_width);
+
+ crsbar->n_chg_in = crsbar->n_chg_int = crsbar->n_chg_out = crsbar->n_chg_ctr = 0;
+
+ switch (model) {
+ case MATRIX_CROSSBAR:
+
+ /* FIXME: need accurate spacing */
+ in_length = n_out * data_width * CrsbarCellWidth;
+ out_length = n_in * data_width * CrsbarCellHeight;
+ if (in_length < in_len) in_length = in_len;
+ if (out_length < out_len) out_length = out_len;
+ ctr_length = in_length / 2;
+ if (req_len) *req_len = in_length;
+
+ in_wire_cap = in_length * CC3metal;
+
+ crsbar->e_chg_out = crossbar_out_cap(out_length, n_in, connect_type, trans_type, &Nsize) * EnergyFactor;
+ crsbar->e_chg_in = crossbar_in_cap(in_wire_cap, n_out, connect_type, trans_type, &Nsize) * EnergyFactor;
+ /* FIXME: wire length estimation, really reset? */
+ /* control signal should reset after transmission is done, so no 1/2 */
+ crsbar->e_chg_ctr = crossbar_ctr_cap(ctr_length, data_width, 0, 0, 0, connect_type, trans_type) * EnergyFactor;
+ crsbar->e_chg_int = 0;
+
+ /* static power */
+ i_leakage = 0;
+ /* tri-state buffers */
+ i_leakage += ((Woutdrvnandp * (NAND2_TAB[0] + NAND2_TAB[1] + NAND2_TAB[2]) + Woutdrvnandn * NAND2_TAB[3]) / 4 +
+ (Woutdrvnorp * NOR2_TAB[0] + Woutdrvnorn * (NOR2_TAB[1] + NOR2_TAB[2] + NOR2_TAB[3])) / 4 +
+ Woutdrivern * NMOS_TAB[0] + Woutdriverp * PMOS_TAB[0]) * n_in * n_out * data_width;
+ /* input driver */
+ i_leakage += (Wdecinvn * NMOS_TAB[0] + Wdecinvp * PMOS_TAB[0]) * n_in * data_width;
+ /* output driver */
+ i_leakage += (Woutdrivern * NMOS_TAB[0] + Woutdriverp * PMOS_TAB[0]) * n_out * data_width;
+ /* control signal inverter */
+ i_leakage += (Wdecinvn * NMOS_TAB[0] + Wdecinvp * PMOS_TAB[0]) * n_in * n_out;
+ crsbar->i_leakage = i_leakage / PARM_TECH_POINT * 100;
+ break;
+
+ case MULTREE_CROSSBAR:
+ /* input wire horizontal segment length */
+ in_length = n_in * data_width * CrsbarCellWidth * (n_out / 2);
+ in_wire_cap = in_length * CCmetal;
+ /* input wire vertical segment length */
+ in_length = n_in * data_width * (5 * Lamda) * (n_out / 2);
+ in_wire_cap += in_length * CC3metal;
+
+ ctr_length = n_in * data_width * CrsbarCellWidth * (n_out / 2) / 2;
+
+ crsbar->e_chg_out = crossbar_out_cap(0, degree, connect_type, trans_type, NULL) * EnergyFactor;
+ crsbar->e_chg_in = crossbar_in_cap(in_wire_cap, n_out, connect_type, trans_type, NULL) * EnergyFactor;
+ crsbar->e_chg_int = crossbar_int_cap(degree, connect_type, trans_type) * EnergyFactor;
+
+ /* redundant field */
+ crsbar->depth = (unsigned)ceil(log(n_in) / log(degree));
+
+ /* control signal should reset after transmission is done, so no 1/2 */
+ if (crsbar->depth == 1)
+ /* only one level of control signal */
+ crsbar->e_chg_ctr = crossbar_ctr_cap(ctr_length, data_width, 0, 0, degree, connect_type, trans_type) * EnergyFactor;
+ else {
+ /* first level and last level control signals */
+ crsbar->e_chg_ctr = crossbar_ctr_cap(ctr_length, data_width, 0, 1, degree, connect_type, trans_type) * EnergyFactor +
+ crossbar_ctr_cap(0, data_width, 1, 0, degree, connect_type, trans_type) * EnergyFactor;
+ /* intermediate control signals */
+ if (crsbar->depth > 2)
+ crsbar->e_chg_ctr += (crsbar->depth - 2) * crossbar_ctr_cap(0, data_width, 1, 1, degree, connect_type, trans_type) * EnergyFactor;
+ }
+
+ /* static power */
+ i_leakage = 0;
+ /* input driver */
+ i_leakage += (Wdecinvn * NMOS_TAB[0] + Wdecinvp * PMOS_TAB[0]) * n_in * data_width;
+ /* output driver */
+ i_leakage += (Woutdrivern * NMOS_TAB[0] + Woutdriverp * PMOS_TAB[0]) * n_out * data_width;
+ /* mux */
+ i_leakage += (WdecNORp * NOR2_TAB[0] + WdecNORn * (NOR2_TAB[1] + NOR2_TAB[2] + NOR2_TAB[3])) / 4 * (2 * n_in - 1) * n_out * data_width;
+ /* control signal inverter */
+ i_leakage += (Wdecinvn * NMOS_TAB[0] + Wdecinvp * PMOS_TAB[0]) * n_in * n_out;
+ crsbar->i_leakage = i_leakage / PARM_TECH_POINT * 100;
+ break;
+
+ default: break;/* some error handler */
+ }
+
+ return 0;
+ }
+ else
+ return -1;
+}
+
+
+/* FIXME: MULTREE_CROSSBAR record missing */
+int crossbar_record(power_crossbar *xb, int io, unsigned long int new_data, unsigned long int old_data, unsigned new_port, unsigned old_port)
+{
+ switch (xb->model) {
+ case MATRIX_CROSSBAR:
+ if (io) /* input port */
+ xb->n_chg_in += SIM_power_Hamming(new_data, old_data, xb->mask);
+ else { /* output port */
+ xb->n_chg_out += SIM_power_Hamming(new_data, old_data, xb->mask);
+ xb->n_chg_ctr += new_port != old_port;
+ }
+ break;
+
+ case MULTREE_CROSSBAR:
+ break;
+
+ default: break;/* some error handler */
+ }
+
+ return 0;
+}
+
+
+double crossbar_report(power_crossbar *crsbar)
+{
+ return (crsbar->n_chg_in * crsbar->e_chg_in + crsbar->n_chg_out * crsbar->e_chg_out +
+ crsbar->n_chg_int * crsbar->e_chg_int + crsbar->n_chg_ctr * crsbar->e_chg_ctr);
+}
+
+/* ---------- crossbar model ---------- */
+
+
+
diff --git a/src/mem/ruby/network/orion/power_crossbar.hh b/src/mem/ruby/network/orion/power_crossbar.hh
new file mode 100644
index 000000000..1dc0220e7
--- /dev/null
+++ b/src/mem/ruby/network/orion/power_crossbar.hh
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Crossbar
+
+#ifndef _POWER_CROSSBAR_H
+#define _POWER_CROSSBAR_H
+
+typedef enum {
+ TRANS_GATE,
+ TRISTATE_GATE
+} power_connect_model;
+
+/* transmission gate type */
+typedef enum {
+ N_GATE,
+ NP_GATE
+} power_trans;
+
+typedef enum {
+ MATRIX_CROSSBAR =1,
+ MULTREE_CROSSBAR,
+ CUT_THRU_CROSSBAR,
+ CROSSBAR_MAX_MODEL
+} power_crossbar_model;
+
+
+typedef struct {
+ int model;
+ unsigned n_in;
+ unsigned n_out;
+ unsigned data_width;
+ unsigned degree; //used only for multree xbar
+ unsigned connect_type;
+ unsigned trans_type;
+ unsigned long int n_chg_in;
+ unsigned long int n_chg_int;
+ unsigned long int n_chg_out;
+ unsigned long int n_chg_ctr;
+ unsigned long int mask;
+ double e_chg_in;
+ double e_chg_int;
+ double e_chg_out;
+ double e_chg_ctr;
+ unsigned depth; //used only for multree xbar
+ double i_leakage;
+} power_crossbar;
+
+
+extern int crossbar_record(power_crossbar *xb, int io, unsigned long int new_data, unsigned long int old_data, unsigned new_port, unsigned old_port);
+
+extern int power_crossbar_init(power_crossbar *crsbar, int model, unsigned n_in, unsigned n_out, unsigned data_width, unsigned degree, int connect_type, int trans_type, double in_len, double out_len, double *req_len);
+
+extern double crossbar_report(power_crossbar *crsbar);
+
+#endif
diff --git a/src/mem/ruby/network/orion/power_ll.cc b/src/mem/ruby/network/orion/power_ll.cc
new file mode 100644
index 000000000..3628989d0
--- /dev/null
+++ b/src/mem/ruby/network/orion/power_ll.cc
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*------------------------------------------------------------
+ * Copyright 1994 Digital Equipment Corporation and Steve Wilton
+ * All Rights Reserved
+ *
+ * Permission to use, copy, and modify this software and its documentation is
+ * hereby granted only under the following terms and conditions. Both the
+ * above copyright notice and this permission notice must appear in all copies
+ * of the software, derivative works or modified versions, and any portions
+ * thereof, and both notices must appear in supporting documentation.
+ *
+ * Users of this software agree to the terms and conditions set forth herein,
+ * and hereby grant back to Digital a non-exclusive, unrestricted, royalty-
+ * free right and license under any changes, enhancements or extensions
+ * made to the core functions of the software, including but not limited to
+ * those affording compatibility with other hardware or software
+ * environments, but excluding applications which incorporate this software.
+ * Users further agree to use their best efforts to return to Digital any
+ * such changes, enhancements or extensions that they make and inform Digital
+ * of noteworthy uses of this software. Correspondence should be provided
+ * to Digital at:
+ *
+ * Director of Licensing
+ * Western Research Laboratory
+ * Digital Equipment Corporation
+ * 100 Hamilton Avenue
+ * Palo Alto, California 94301
+ *
+ * This software may be distributed (but not offered for sale or transferred
+ * for compensation) to third parties, provided such third parties agree to
+ * abide by the terms and conditions of this notice.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND DIGITAL EQUIPMENT CORP. DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DIGITAL EQUIPMENT
+ * CORPORATION BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ *------------------------------------------------------------*/
+
+#include <math.h>
+#include <assert.h>
+
+#include "parm_technology.hh"
+#include "SIM_port.hh"
+#include "power_static.hh"
+#include "power_ll.hh"
+
+/*----------------------------------------------------------------------*/
+
+double SIM_power_gatecap(double width,double wirelength) /* returns gate capacitance in Farads */
+//double width; /* gate width in um (length is Leff) */
+//double wirelength; /* poly wire length going to gate in lambda */
+{
+
+ double overlapCap;
+ double gateCap;
+ double l = 0.1525;
+
+#if defined(Pdelta_w)
+ overlapCap = (width - 2*Pdelta_w) * PCov;
+ gateCap = ((width - 2*Pdelta_w) * (l * LSCALE - 2*Pdelta_l) *
+ PCg) + 2.0 * overlapCap;
+
+ return gateCap;
+#endif
+ return(width*Leff*PARM_Cgate+wirelength*Cpolywire*Leff * SCALE_T);
+ /* return(width*Leff*PARM_Cgate); */
+ /* return(width*CgateLeff+wirelength*Cpolywire*Leff);*/
+}
+
+
+double SIM_power_gatecappass(double width,double wirelength) /* returns gate capacitance in Farads */
+//double width; /* gate width in um (length is Leff) */
+//double wirelength; /* poly wire length going to gate in lambda */
+{
+ return(SIM_power_gatecap(width,wirelength));
+ /* return(width*Leff*PARM_Cgatepass+wirelength*Cpolywire*Leff); */
+}
+
+
+/*----------------------------------------------------------------------*/
+
+/* Routine for calculating drain capacitances. The draincap routine
+ * folds transistors larger than 10um */
+double SIM_power_draincap(double width,int nchannel,int stack) /* returns drain cap in Farads */
+//double width; /* um */
+//int nchannel; /* whether n or p-channel (boolean) */
+//int stack; /* number of transistors in series that are on */
+{
+ double Cdiffside,Cdiffarea,Coverlap,cap;
+
+ double overlapCap;
+ double swAreaUnderGate;
+ double area_peri;
+ double diffArea;
+ double diffPeri;
+ double l = 0.4 * LSCALE;
+
+
+ diffArea = l * width;
+ diffPeri = 2 * l + 2 * width;
+
+#if defined(Pdelta_w)
+ if(nchannel == 0) {
+ overlapCap = (width - 2 * Pdelta_w) * PCov;
+ swAreaUnderGate = (width - 2 * Pdelta_w) * PCjswA;
+ area_peri = ((diffArea * PCja)
+ + (diffPeri * PCjsw));
+
+ return(stack*(area_peri + overlapCap + swAreaUnderGate));
+ }
+ else {
+ overlapCap = (width - 2 * Ndelta_w) * NCov;
+ swAreaUnderGate = (width - 2 * Ndelta_w) * NCjswA;
+ area_peri = ((diffArea * NCja * LSCALE)
+ + (diffPeri * NCjsw * LSCALE));
+
+ return(stack*(area_peri + overlapCap + swAreaUnderGate));
+ }
+#endif
+
+ Cdiffside = (nchannel) ? PARM_Cndiffside : PARM_Cpdiffside;
+ Cdiffarea = (nchannel) ? PARM_Cndiffarea : PARM_Cpdiffarea;
+ Coverlap = (nchannel) ? (PARM_Cndiffovlp+PARM_Cnoxideovlp) :
+ (PARM_Cpdiffovlp+PARM_Cpoxideovlp);
+ /* calculate directly-connected (non-stacked) capacitance */
+ /* then add in capacitance due to stacking */
+ if (width >= 10) {
+ cap = 3.0*Leff*width/2.0*Cdiffarea + 6.0*Leff*Cdiffside +
+ width*Coverlap;
+ cap += (double)(stack-1)*(Leff*width*Cdiffarea +
+ 4.0*Leff*Cdiffside + 2.0*width*Coverlap);
+ } else {
+ cap = 3.0*Leff*width*Cdiffarea + (6.0*Leff+width)*Cdiffside +
+ width*Coverlap;
+ cap += (double)(stack-1)*(Leff*width*Cdiffarea +
+ 2.0*Leff*Cdiffside + 2.0*width*Coverlap);
+ }
+ return(cap * SCALE_T);
+}
+
+
+/*----------------------------------------------------------------------*/
+
+/* The following routines estimate the effective resistance of an
+ on transistor as described in the tech report. The first routine
+ gives the "switching" resistance, and the second gives the
+ "full-on" resistance */
+double SIM_power_transresswitch(double width,int nchannel,int stack) /* returns resistance in ohms */
+//double width; /* um */
+//int nchannel; /* whether n or p-channel (boolean) */
+//int stack; /* number of transistors in series */
+{
+ double restrans;
+ restrans = (nchannel) ? (Rnchannelstatic):
+ (Rpchannelstatic);
+ /* calculate resistance of stack - assume all but switching trans
+ have 0.8X the resistance since they are on throughout switching */
+ return((1.0+((stack-1.0)*0.8))*restrans/width);
+}
+
+
+/*----------------------------------------------------------------------*/
+
+double SIM_power_transreson(double width,int nchannel,int stack) /* returns resistance in ohms */
+//double width; /* um */
+//int nchannel; /* whether n or p-channel (boolean) */
+//int stack; /* number of transistors in series */
+{
+ double restrans;
+ restrans = (nchannel) ? Rnchannelon : Rpchannelon;
+
+ /* calculate resistance of stack. Unlike transres, we don't
+ multiply the stacked transistors by 0.8 */
+ return(stack*restrans/width);
+}
+
+
+/*----------------------------------------------------------------------*/
+
+/* This routine operates in reverse: given a resistance, it finds
+ * the transistor width that would have this R. It is used in the
+ * data wordline to estimate the wordline driver size. */
+double SIM_power_restowidth(double res,int nchannel) /* returns width in um */
+//double res; /* resistance in ohms */
+//int nchannel; /* whether N-channel or P-channel */
+{
+ double restrans;
+
+ restrans = (nchannel) ? Rnchannelon : Rpchannelon;
+
+ return(restrans/res);
+}
+
+
+/*----------------------------------------------------------------------*/
+
+double SIM_power_horowitz(double inputramptime,double tf,double vs1,double vs2,int rise)
+//double inputramptime, /* input rise time */
+ // tf, /* time constant of gate */
+ // vs1,vs2; /* threshold voltages */
+//int rise; /* whether INPUT rise or fall (boolean) */
+{
+ double a,b,td;
+
+ a = inputramptime/tf;
+ if (rise==RISE) {
+ b = 0.5;
+ td = tf*sqrt(fabs( log(vs1)*log(vs1)+2*a*b*(1.0-vs1))) +
+ tf*(log(vs1)-log(vs2));
+ } else {
+ b = 0.4;
+ td = tf*sqrt(fabs( log(1.0-vs1)*log(1.0-vs1)+2*a*b*(vs1))) +
+ tf*(log(1.0-vs1)-log(1.0-vs2));
+ }
+
+ return(td);
+}
+
+
+
+
+double SIM_power_driver_size(double driving_cap, double desiredrisetime)
+{
+ double nsize, psize;
+ double Rpdrive;
+
+ Rpdrive = desiredrisetime/(driving_cap*log(PARM_VSINV)*-1.0);
+ psize = SIM_power_restowidth(Rpdrive,PCH);
+ nsize = SIM_power_restowidth(Rpdrive,NCH);
+ if (psize > Wworddrivemax) {
+ psize = Wworddrivemax;
+ }
+ if (psize < 4.0 * LSCALE)
+ psize = 4.0 * LSCALE;
+
+ return (psize);
+}
+
+
diff --git a/src/mem/ruby/network/orion/power_ll.hh b/src/mem/ruby/network/orion/power_ll.hh
new file mode 100644
index 000000000..4cae89dcc
--- /dev/null
+++ b/src/mem/ruby/network/orion/power_ll.hh
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef POWER_LL_H_
+#define POWER_LL_H_
+
+extern double SIM_power_driver_size(double driving_cap, double desiredrisetime);
+extern int SIM_power_init(void);
+extern double SIM_power_gatecap(double width, double wirelength) ;
+extern double SIM_power_gatecappass(double width, double wirelength);
+extern double SIM_power_draincap(double width, int nchannel, int stack);
+extern double SIM_power_transresswitch(double width, int nchannel, int stack);
+extern double SIM_power_transreson(double width,int nchannel,int stack);
+extern double SIM_power_restowidth(double res,int nchannel);
+extern double SIM_power_horowitz(double inputramptime,double tf,double vs1,double vs2,int rise);
+
+
+
+
+
+
+
+
+
+
+
+
+#endif
diff --git a/src/mem/ruby/network/orion/power_router_init.cc b/src/mem/ruby/network/orion/power_router_init.cc
new file mode 100644
index 000000000..be58fbdbf
--- /dev/null
+++ b/src/mem/ruby/network/orion/power_router_init.cc
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+
+#include "power_router_init.hh"
+#include "power_array.hh"
+#include "power_arbiter.hh"
+#include "power_crossbar.hh"
+#include "power_ll.hh"
+#include "parm_technology.hh"
+#include "SIM_port.hh"
+#include "power_static.hh"
+#include "power_utils.hh"
+
+/* -------------------------------------------------------------------------------------------- */
+// Set buffer parameters
+int buf_set_para(power_array_info *info, int is_fifo, unsigned n_read_port, unsigned n_write_port, unsigned n_entry, unsigned line_width, int outdrv)
+{
+ //general parameters
+ info->share_rw = 0;
+ info->read_ports = n_read_port;
+ info->write_ports = n_write_port;
+ info->n_set = n_entry;
+ info->blk_bits = line_width;
+ info->assoc = 1;
+ info->data_width = line_width;
+ info->data_end = PARM_data_end;
+
+ //no array subpartition
+ info->data_ndwl = 1;
+ info->data_ndbl = 1;
+ info->data_nspd = 1;
+
+ info->data_n_share_amp =1;
+
+ //MODEL parameters
+ if(is_fifo) {
+ info->row_dec_model = SIM_NO_MODEL;
+ info->row_dec_pre_model = SIM_NO_MODEL;
+ }
+ else {
+ info->row_dec_model = PARM_row_dec_model;
+ info->row_dec_pre_model = PARM_row_dec_pre_model;
+ }
+
+ info->data_wordline_model = PARM_wordline_model;
+ info->data_bitline_model = PARM_bitline_model;
+ info->data_bitline_pre_model = PARM_bitline_pre_model;
+ info->data_mem_model = PARM_mem_model;
+
+ if(PARM_data_end == 2)
+ info->data_amp_model = PARM_amp_model;
+ else
+ info->data_amp_model = SIM_NO_MODEL;
+ if(outdrv)
+ info->outdrv_model = PARM_outdrv_model;
+ else
+ info->outdrv_model = SIM_NO_MODEL;
+
+ info->data_colsel_pre_model = SIM_NO_MODEL;
+ info->col_dec_model = SIM_NO_MODEL;
+ info->col_dec_pre_model = SIM_NO_MODEL;
+ info->mux_model = SIM_NO_MODEL;
+
+ //no tag array
+
+ info->tag_wordline_model = SIM_NO_MODEL;
+ info->tag_bitline_model = SIM_NO_MODEL;
+ info->tag_bitline_pre_model = SIM_NO_MODEL;
+ info->tag_mem_model = SIM_NO_MODEL;
+ info->tag_attach_mem_model = SIM_NO_MODEL;
+ info->tag_amp_model = SIM_NO_MODEL;
+ info->tag_colsel_pre_model = SIM_NO_MODEL;
+ info->comp_model = SIM_NO_MODEL;
+ info->comp_pre_model = SIM_NO_MODEL;
+
+ info->write_policy = 0; //no dirty bit
+
+ //derived
+ if(info->data_width != 0){
+ info->n_item = info->blk_bits / info->data_width;
+ }
+ else{
+ info->eff_data_cols = info->blk_bits * info->assoc * info->data_nspd;
+ }
+
+ return 0;
+}
+/* -------------------------------------------------------------------------------------------- */
+
+
+/* --------------- Router init --------------------------------------*/
+
+
+int power_router_init(power_router *router, power_router_info *info)
+{
+ int outdrv;
+ double req_len = 0;
+
+ //general
+// info->n_in = PARM_in_port;
+ info->n_total_in = info->n_in;
+// info->n_out = PARM_out_port;
+ info->n_total_out = info->n_out;
+// info->flit_width = PARM_flit_width;
+
+ //vc
+// info->n_v_channel = MAX(PARM_v_channel, 1);
+// info->n_v_class = MAX(info->n_v_channel, PARM_v_class);
+
+ if(info->n_v_class > 1) {
+ info->in_share_buf = PARM_in_share_buf;
+ info->out_share_buf = PARM_out_share_buf;
+ info->in_share_switch = PARM_in_share_switch;
+ info->out_share_switch = PARM_out_share_switch;
+ }
+ else {
+ info->in_share_buf = 0;
+ info->out_share_buf = 0;
+ info->in_share_switch = 0;
+ info->out_share_switch = 0;
+ }
+
+ //xbar
+ info->crossbar_model = PARM_crossbar_model;
+ info->degree = PARM_crsbar_degree;
+ info->connect_type = PARM_connect_type;
+ info->trans_type = PARM_trans_type;
+ info->crossbar_in_len = PARM_crossbar_in_len;
+ info->crossbar_out_len = PARM_crossbar_out_len;
+
+ //input buffer
+ info->in_buf = PARM_in_buf;
+ outdrv = !info->in_share_buf && info->in_share_switch;
+ buf_set_para(&info->in_buf_info, 0, PARM_in_buf_rport, 1, PARM_in_buf_set, info->flit_width, outdrv);
+
+ //vc arbiter
+ if(info->n_v_class > 1) {
+ info->vc_in_arb_model = PARM_vc_in_arb_model;
+ info->vc_out_arb_model = PARM_vc_out_arb_model;
+ if(info->vc_in_arb_model == QUEUE_ARBITER) {
+ buf_set_para(&info->vc_in_arb_queue_info, 1, 1, 1, info->n_v_class, SIM_power_logtwo(info->n_v_class), 0);
+ info->vc_in_arb_ff_model = SIM_NO_MODEL;
+ }
+ else
+ info->vc_in_arb_ff_model = PARM_vc_in_arb_ff_model;
+
+ if(info->vc_out_arb_model == QUEUE_ARBITER) {
+ buf_set_para(&info->vc_out_arb_queue_info, 1, 1, 1, info->n_total_in - 1, SIM_power_logtwo(info->n_total_in - 1), 0);
+ info->vc_out_arb_ff_model = SIM_NO_MODEL;
+ }
+ else
+ info->vc_out_arb_ff_model = PARM_vc_out_arb_ff_model;
+ }
+ else {
+ info->vc_in_arb_model = SIM_NO_MODEL;
+ info->vc_in_arb_ff_model = SIM_NO_MODEL;
+ info->vc_out_arb_model = SIM_NO_MODEL;
+ info->vc_out_arb_ff_model = SIM_NO_MODEL;
+ }
+
+ //switch arbiter
+ if (info->n_in > 2) {
+ info->sw_in_arb_model = PARM_sw_in_arb_model;
+ info->sw_out_arb_model = PARM_sw_out_arb_model;
+ if(info->sw_in_arb_model == QUEUE_ARBITER) {
+ buf_set_para(&info->sw_in_arb_queue_info, 1, 1, 1, info->n_v_class, SIM_power_logtwo(info->n_v_class), 0);
+ info->sw_in_arb_ff_model = SIM_NO_MODEL;
+ }
+ else
+ info->sw_in_arb_ff_model = PARM_sw_in_arb_ff_model;
+
+ if(info->sw_out_arb_model == QUEUE_ARBITER) {
+ buf_set_para(&info->sw_out_arb_queue_info, 1, 1, 1, info->n_total_in - 1, SIM_power_logtwo(info->n_total_in - 1), 0);
+ info->sw_out_arb_ff_model = SIM_NO_MODEL;
+ }
+ else
+ info->sw_out_arb_ff_model = PARM_sw_out_arb_ff_model;
+ }
+ else {
+ info->sw_in_arb_model = SIM_NO_MODEL;
+ info->sw_in_arb_ff_model = SIM_NO_MODEL;
+ info->sw_out_arb_model = SIM_NO_MODEL;
+ info->sw_out_arb_ff_model = SIM_NO_MODEL;
+ }
+
+ if(info->in_buf) {
+ if(info->in_share_buf)
+ info->in_n_switch = info->in_buf_info.read_ports;
+ else if(info->in_share_switch)
+ info->in_n_switch = 1;
+ else
+ info->in_n_switch = info->n_v_class;
+ }
+ else
+ info->in_n_switch = 1;
+
+ info->n_switch_in = info->n_in * info->in_n_switch;
+
+ info->n_switch_out = info->n_out;
+
+ //-------- call initialize functions -----------
+
+ router->i_leakage = 0;
+
+ //initialize crossbar
+ power_crossbar_init(&router->crossbar, info->crossbar_model, info->n_switch_in, info->n_switch_out, info->flit_width, info->degree, info->connect_type, info->trans_type, info->crossbar_in_len, info->crossbar_out_len, &req_len);
+ router->i_leakage += router->crossbar.i_leakage;
+// printf("xbar_leak %g", router->crossbar.i_leakage);
+
+ //initialize input buffer
+ if(info->in_buf) {
+ power_array_init(&info->in_buf_info, &router->in_buf);
+ router->i_leakage += router->in_buf.i_leakage * info->n_in;
+// printf("buffer_leak %g", router->in_buf.i_leakage);
+ }
+// printf("initialize in buffer over\n");
+
+ //initialize vc arbiter
+ if(info->vc_in_arb_model)
+ power_arbiter_init(&router->vc_in_arb, info->vc_in_arb_model, info->vc_in_arb_ff_model, PARM_VC_per_MC, 0, &info->vc_in_arb_queue_info);
+
+ if(info->vc_out_arb_model)
+ power_arbiter_init(&router->vc_out_arb, info->vc_out_arb_model, info->vc_out_arb_ff_model, info->n_total_in - 1, req_len, &info->vc_out_arb_queue_info);
+
+ //initialize switch arbiter
+ if(info->sw_in_arb_model)
+ power_arbiter_init(&router->sw_in_arb, info->sw_in_arb_model, info->sw_in_arb_ff_model, info->n_v_class, 0, &info->sw_in_arb_queue_info);
+
+ if(info->sw_out_arb_model)
+ power_arbiter_init(&router->sw_out_arb, info->sw_out_arb_model, info->sw_out_arb_ff_model, info->n_total_in - 1, req_len, &info->sw_out_arb_queue_info);
+
+ return 0;
+}
diff --git a/src/mem/ruby/network/orion/power_router_init.hh b/src/mem/ruby/network/orion/power_router_init.hh
new file mode 100644
index 000000000..2d95cea0b
--- /dev/null
+++ b/src/mem/ruby/network/orion/power_router_init.hh
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _POWER_ROUTER_INIT_H
+#define _POWER_ROUTER_INIT_H
+
+#include "power_array.hh"
+#include "power_arbiter.hh"
+#include "power_crossbar.hh"
+
+/* ------------ Models ------------------------ */
+/*typedef enum {
+ GENERIC_SEL = 1,
+ SEL_MAX_MODEL
+} power_sel_model;
+*/
+
+/* ------------ Misc --------------------------- */
+
+/*typedef struct {
+ int model;
+ unsigned width;
+ unsigned long long n_anyreq;
+ unsigned long long n_chgreq;
+ unsigned long long n_grant;
+ unsigned long long n_enc[MAX_SEL_LEVEL];
+ double e_anyreq;
+ double e_chgreq;
+ double e_grant;
+ double e_enc[MAX_SEL_LEVEL];
+} power_sel;
+*/
+
+/* --------------- Loading --------------- */
+typedef enum {
+ ACTUAL_LOADING =1,
+ HALF_LOADING,
+ MAX_LOADING
+}loading;
+
+/* ----------------- Router --------------- */
+
+typedef struct {
+ power_crossbar crossbar;
+ power_array in_buf;
+ power_arbiter vc_in_arb;
+ power_arbiter vc_out_arb;
+ power_arbiter sw_in_arb;
+ power_arbiter sw_out_arb;
+ double i_leakage;
+} power_router;
+
+typedef struct {
+ //general
+ unsigned n_in;
+ unsigned n_out;
+ unsigned flit_width;
+ //vc
+ unsigned n_v_channel;
+ unsigned n_v_class;
+ int in_share_buf;
+ int out_share_buf;
+ int in_share_switch;
+ int out_share_switch;
+ //xbar
+ int crossbar_model;
+ int degree;
+ int connect_type;
+ int trans_type;
+ double crossbar_in_len;
+ double crossbar_out_len;
+
+ int in_buf;
+
+ //buffer
+ power_array_info in_buf_info;
+ unsigned pipe_depth;
+ //arbiter
+ int vc_in_arb_model;
+ int vc_out_arb_model;
+ int vc_in_arb_ff_model;
+ int vc_out_arb_ff_model;
+ int sw_in_arb_model;
+ int sw_out_arb_model;
+ int sw_in_arb_ff_model;
+ int sw_out_arb_ff_model;
+
+ power_array_info vc_in_arb_queue_info;
+ power_array_info vc_out_arb_queue_info;
+ power_array_info sw_in_arb_queue_info;
+ power_array_info sw_out_arb_queue_info;
+ //derived
+ unsigned n_total_in;
+ unsigned n_total_out;
+ unsigned in_n_switch;
+ unsigned n_switch_in;
+ unsigned n_switch_out;
+} power_router_info;
+
+extern int power_router_init(power_router *router, power_router_info *info);
+#endif
diff --git a/src/mem/ruby/network/orion/power_static.cc b/src/mem/ruby/network/orion/power_static.cc
new file mode 100644
index 000000000..c0ae394a6
--- /dev/null
+++ b/src/mem/ruby/network/orion/power_static.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "power_static.hh"
+
+#if (PARM_TECH_POINT == 18)
+double NMOS_TAB[1] = {20.5e-9};
+double PMOS_TAB[1] = {9.2e-9};
+double NAND2_TAB[4] = {6.4e-10, 20.4e-9, 12.6e-9, 18.4e-9};
+double NOR2_TAB[4] ={40.9e-9, 8.32e-9, 9.2e-9, 2.3e-10};
+#elif (PARM_TECH_POINT == 10)
+double NMOS_TAB[1] = {22.7e-9};
+double PMOS_TAB[1] = {18.0e-9};
+double NAND2_TAB[4] = {1.2e-9, 22.6e-9, 11.4e-9, 35.9e-9};
+double NOR2_TAB[4] ={45.1e-9, 11.5e-9, 17.9e-9, 1.8e-9};
+#elif (PARM_TECH_POINT == 7)
+double NMOS_TAB[1] = {118.1e-9};
+double PMOS_TAB[1] = {135.2e-9};
+double NAND2_TAB[4] = {19.7e-9, 115.3e-9, 83.0e-9, 267.6e-9};
+double NOR2_TAB[4] ={232.4e-9, 79.6e-9, 127.9e-9, 12.3e-9};
+#endif
diff --git a/src/mem/ruby/network/orion/power_static.hh b/src/mem/ruby/network/orion/power_static.hh
new file mode 100644
index 000000000..6bc58ca01
--- /dev/null
+++ b/src/mem/ruby/network/orion/power_static.hh
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _POWER_STATIC_H
+#define _POWER_STATIC_H
+
+#include "parm_technology.hh"
+
+extern double NMOS_TAB[1];
+extern double PMOS_TAB[1];
+extern double NAND2_TAB[4];
+extern double NOR2_TAB[4];
+
+#endif
diff --git a/src/mem/ruby/network/orion/power_utils.cc b/src/mem/ruby/network/orion/power_utils.cc
new file mode 100644
index 000000000..1f592fff0
--- /dev/null
+++ b/src/mem/ruby/network/orion/power_utils.cc
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include "parm_technology.hh"
+#include "power_utils.hh"
+#include <assert.h>
+#include <math.h>
+
+/* ----------- from SIM_power_util.c ------------ */
+
+/* Hamming distance table */
+static char h_tab[256] = {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8};
+
+
+static unsigned SIM_power_Hamming_slow( unsigned long int old_val, unsigned long int new_val, unsigned long int mask )
+{
+ /* old slow code, I don't understand the new fast code though */
+ /* unsigned long int dist;
+ unsigned Hamming = 0;
+
+ dist = ( old_val ^ new_val ) & mask;
+ mask = (mask >> 1) + 1;
+
+ while ( mask ) {
+ if ( mask & dist ) Hamming ++;
+ mask = mask >> 1;
+ }
+
+ return Hamming; */
+
+#define TWO(k) (BIGONE << (k))
+#define CYCL(k) (BIGNONE/(1 + (TWO(TWO(k)))))
+#define BSUM(x,k) ((x)+=(x) >> TWO(k), (x) &= CYCL(k))
+ unsigned long int x;
+
+ x = (old_val ^ new_val) & mask;
+ x = (x & CYCL(0)) + ((x>>TWO(0)) & CYCL(0));
+ x = (x & CYCL(1)) + ((x>>TWO(1)) & CYCL(1));
+ BSUM(x,2);
+ BSUM(x,3);
+ BSUM(x,4);
+ BSUM(x,5);
+
+ return x;
+}
+
+
+int SIM_power_init(void)
+{
+ unsigned i;
+
+ /* initialize Hamming distance table */
+ for (i = 0; i < 256; i++)
+ h_tab[i] = SIM_power_Hamming_slow(i, 0, 0xFF);
+
+ return 0;
+}
+
+
+/* assume unsigned long int is unsigned64_t */
+unsigned SIM_power_Hamming(unsigned long int old_val, unsigned long int new_val, unsigned long int mask)
+{
+ union {
+ unsigned long int x;
+ char id[8];
+ } u;
+ unsigned rval;
+
+ u.x = (old_val ^ new_val) & mask;
+
+ rval = h_tab[u.id[0]];
+ rval += h_tab[u.id[1]];
+ rval += h_tab[u.id[2]];
+ rval += h_tab[u.id[3]];
+ rval += h_tab[u.id[4]];
+ rval += h_tab[u.id[5]];
+ rval += h_tab[u.id[6]];
+ rval += h_tab[u.id[7]];
+
+ return rval;
+}
+
+
+unsigned SIM_power_Hamming_group(unsigned long int d1_new, unsigned long int d1_old, unsigned long int d2_new, unsigned long int d2_old, unsigned width, unsigned n_grp)
+{
+ unsigned rval = 0;
+ unsigned long int g1_new, g1_old, g2_new, g2_old, mask;
+
+ mask = HAMM_MASK(width);
+
+ while (n_grp--) {
+ g1_new = d1_new & mask;
+ g1_old = d1_old & mask;
+ g2_new = d2_new & mask;
+ g2_old = d2_old & mask;
+
+ if (g1_new != g1_old || g2_new != g2_old)
+ rval ++;
+
+ d1_new >>= width;
+ d1_old >>= width;
+ d2_new >>= width;
+ d2_old >>= width;
+ }
+
+ return rval;
+}
+
+/* ---------------------------------------------- */
+
+/* ----------------------------------------------- */
+
+
+
+
+double logtwo(double x)
+{
+ assert(x > 0);
+ return log10(x)/log10(2);
+}
+
+unsigned SIM_power_logtwo(unsigned long int x)
+{
+ unsigned rval = 0;
+
+ while (x >> rval && rval < sizeof(unsigned long int) << 3) rval++;
+ if (x == (BIGONE << rval - 1)) rval--;
+
+ return rval;
+}
+
diff --git a/src/mem/ruby/network/orion/power_utils.hh b/src/mem/ruby/network/orion/power_utils.hh
new file mode 100644
index 000000000..59123c1f7
--- /dev/null
+++ b/src/mem/ruby/network/orion/power_utils.hh
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _POWER_UTILS_H
+#define _POWER_UTILS_H
+extern unsigned SIM_power_Hamming(unsigned long int old_val, unsigned long int new_val, unsigned long int mask);
+extern double logtwo(double x);
+extern unsigned SIM_power_logtwo(unsigned long int x);
+
+#endif
diff --git a/src/mem/ruby/network/simple/Network_Files/GarnetFileMaker.py b/src/mem/ruby/network/simple/Network_Files/GarnetFileMaker.py
new file mode 100644
index 000000000..b47bb0161
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/GarnetFileMaker.py
@@ -0,0 +1,45 @@
+#!/s/std/bin/python
+import sys, os, string, re, math
+
+rows = 0
+cols =0
+
+if len(sys.argv) == 3:
+ rows = int(sys.argv[1])
+ cols = int(sys.argv[2])
+else:
+ sys.stderr.write("usage : GarnetFileMaker.py <rows> <cols> \n\n")
+
+banks = rows*cols
+bank = 0
+while bank < banks:
+ sys.stdout.write("ext_node:L1Cache:%d int_node:%d link_latency:1 \n" % (bank, bank))
+ sys.stdout.write("ext_node:L2Cache:%d int_node:%d link_latency:1 \n" % (bank, bank))
+ bank += 1
+
+sys.stdout.write("\n")
+
+col = 0
+while col < cols:
+ row = 1
+ bank = col*rows
+ while row < rows:
+ sys.stdout.write("int_node:%d int_node:%d link_latency:1 link_weight:1\n" % (bank, bank+1))
+ bank += 1
+ row += 1
+ sys.stdout.write("\n")
+ col += 1
+
+sys.stdout.write("\n")
+
+row = 0
+while row < rows:
+ col = 1
+ bank = row
+ while col < cols:
+ sys.stdout.write("int_node:%d int_node:%d link_latency:1 link_weight:2\n" % (bank, rows+bank))
+ bank += rows
+ col += 1
+ sys.stdout.write("\n")
+ row += 1
+
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-16_L2Banks-16_Memories-16.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-16_L2Banks-16_Memories-16.txt
new file mode 100644
index 000000000..1304a5e0a
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-16_L2Banks-16_Memories-16.txt
@@ -0,0 +1,78 @@
+
+processors:16
+procs_per_chip:16
+L2banks:16
+memories:16
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:1 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:2 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:3 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:4 int_node:4 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:5 int_node:5 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:6 int_node:6 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:7 int_node:7 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:8 int_node:8 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:9 int_node:9 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:10 int_node:10 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:11 int_node:11 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:12 int_node:12 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:13 int_node:13 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:14 int_node:14 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:15 int_node:15 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:1 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:2 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:3 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:4 int_node:4 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:5 int_node:5 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:6 int_node:6 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:7 int_node:7 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:8 int_node:8 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:9 int_node:9 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:10 int_node:10 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:11 int_node:11 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:12 int_node:12 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:13 int_node:13 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:14 int_node:14 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:15 int_node:15 link_latency:1 bw_multiplier:72
+ext_node:Directory:0 int_node:0 link_latency:20 bw_multiplier:80
+ext_node:Directory:1 int_node:1 link_latency:20 bw_multiplier:80
+ext_node:Directory:2 int_node:2 link_latency:20 bw_multiplier:80
+ext_node:Directory:3 int_node:3 link_latency:20 bw_multiplier:80
+ext_node:Directory:4 int_node:4 link_latency:20 bw_multiplier:80
+ext_node:Directory:5 int_node:5 link_latency:20 bw_multiplier:80
+ext_node:Directory:6 int_node:6 link_latency:20 bw_multiplier:80
+ext_node:Directory:7 int_node:7 link_latency:20 bw_multiplier:80
+ext_node:Directory:8 int_node:8 link_latency:20 bw_multiplier:80
+ext_node:Directory:9 int_node:9 link_latency:20 bw_multiplier:80
+ext_node:Directory:10 int_node:10 link_latency:20 bw_multiplier:80
+ext_node:Directory:11 int_node:11 link_latency:20 bw_multiplier:80
+ext_node:Directory:12 int_node:12 link_latency:20 bw_multiplier:80
+ext_node:Directory:13 int_node:13 link_latency:20 bw_multiplier:80
+ext_node:Directory:14 int_node:14 link_latency:20 bw_multiplier:80
+ext_node:Directory:15 int_node:15 link_latency:20 bw_multiplier:80
+int_node:0 int_node:1 link_latency:1 bw_multiplier:72
+int_node:1 int_node:2 link_latency:1 bw_multiplier:72
+int_node:2 int_node:3 link_latency:1 bw_multiplier:72
+int_node:0 int_node:4 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:1 int_node:5 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:2 int_node:6 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:3 int_node:7 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:4 int_node:5 link_latency:1 bw_multiplier:72
+int_node:5 int_node:6 link_latency:1 bw_multiplier:72
+int_node:6 int_node:7 link_latency:1 bw_multiplier:72
+int_node:4 int_node:8 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:5 int_node:9 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:6 int_node:10 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:7 int_node:11 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:8 int_node:9 link_latency:1 bw_multiplier:72
+int_node:9 int_node:10 link_latency:1 bw_multiplier:72
+int_node:10 int_node:11 link_latency:1 bw_multiplier:72
+int_node:8 int_node:12 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:9 int_node:13 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:10 int_node:14 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:11 int_node:15 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:12 int_node:13 link_latency:1 bw_multiplier:72
+int_node:13 int_node:14 link_latency:1 bw_multiplier:72
+int_node:14 int_node:15 link_latency:1 bw_multiplier:72
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-16_L2Banks-16_Memories-4.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-16_L2Banks-16_Memories-4.txt
new file mode 100644
index 000000000..329156a33
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-16_L2Banks-16_Memories-4.txt
@@ -0,0 +1,56 @@
+
+processors:16
+procs_per_chip:16
+L2banks:16
+memories:4
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:1 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:2 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:3 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:4 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:5 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:6 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:7 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:8 int_node:4 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:9 int_node:4 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:10 int_node:5 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:11 int_node:5 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:12 int_node:6 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:13 int_node:6 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:14 int_node:7 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:15 int_node:7 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:1 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:2 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:3 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:4 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:5 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:6 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:7 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:8 int_node:4 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:9 int_node:4 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:10 int_node:5 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:11 int_node:5 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:12 int_node:6 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:13 int_node:6 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:14 int_node:7 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:15 int_node:7 link_latency:1 bw_multiplier:72
+int_node:11 int_node:12 link_latency:1 bw_multiplier:72
+int_node:8 int_node:13 link_latency:1 bw_multiplier:72
+int_node:0 int_node:8 link_latency:1 bw_multiplier:72
+int_node:4 int_node:8 link_latency:1 bw_multiplier:72
+int_node:1 int_node:9 link_latency:1 bw_multiplier:72
+int_node:5 int_node:9 link_latency:1 bw_multiplier:72
+int_node:2 int_node:10 link_latency:1 bw_multiplier:72
+int_node:6 int_node:10 link_latency:1 bw_multiplier:72
+int_node:3 int_node:11 link_latency:1 bw_multiplier:72
+int_node:7 int_node:11 link_latency:1 bw_multiplier:72
+int_node:8 int_node:9 link_latency:1 bw_multiplier:72
+int_node:9 int_node:10 link_latency:1 bw_multiplier:72
+int_node:10 int_node:11 link_latency:1 bw_multiplier:72
+ext_node:Directory:0 int_node:12 link_latency:20 bw_multiplier:80
+ext_node:Directory:1 int_node:12 link_latency:20 bw_multiplier:80
+ext_node:Directory:2 int_node:13 link_latency:20 bw_multiplier:80
+ext_node:Directory:3 int_node:13 link_latency:20 bw_multiplier:80
+
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-16_L2Banks-16_Memories-8.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-16_L2Banks-16_Memories-8.txt
new file mode 100644
index 000000000..7b714cdc1
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-16_L2Banks-16_Memories-8.txt
@@ -0,0 +1,61 @@
+
+processors:16
+procs_per_chip:16
+L2banks:16
+memories:8
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:1 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:2 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:3 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:4 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:5 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:6 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:7 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:8 int_node:4 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:9 int_node:4 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:10 int_node:5 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:11 int_node:5 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:12 int_node:6 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:13 int_node:6 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:14 int_node:7 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:15 int_node:7 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:1 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:2 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:3 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:4 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:5 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:6 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:7 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:8 int_node:4 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:9 int_node:4 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:10 int_node:5 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:11 int_node:5 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:12 int_node:6 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:13 int_node:6 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:14 int_node:7 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:15 int_node:7 link_latency:1 bw_multiplier:72
+int_node:12 int_node:13 link_latency:20 bw_multiplier:10
+int_node:9 int_node:12 link_latency:1 bw_multiplier:72
+int_node:10 int_node:12 link_latency:1 bw_multiplier:72
+int_node:0 int_node:8 link_latency:1 bw_multiplier:72
+int_node:4 int_node:8 link_latency:1 bw_multiplier:72
+int_node:1 int_node:9 link_latency:1 bw_multiplier:72
+int_node:5 int_node:9 link_latency:1 bw_multiplier:72
+int_node:2 int_node:10 link_latency:1 bw_multiplier:72
+int_node:6 int_node:10 link_latency:1 bw_multiplier:72
+int_node:3 int_node:11 link_latency:1 bw_multiplier:72
+int_node:7 int_node:11 link_latency:1 bw_multiplier:72
+int_node:8 int_node:9 link_latency:1 bw_multiplier:72
+int_node:9 int_node:10 link_latency:1 bw_multiplier:72
+int_node:10 int_node:11 link_latency:1 bw_multiplier:72
+ext_node:Directory:0 int_node:13 link_latency:20 bw_multiplier:80
+ext_node:Directory:1 int_node:13 link_latency:20 bw_multiplier:80
+ext_node:Directory:2 int_node:13 link_latency:20 bw_multiplier:80
+ext_node:Directory:3 int_node:13 link_latency:20 bw_multiplier:80
+ext_node:Directory:4 int_node:13 link_latency:20 bw_multiplier:80
+ext_node:Directory:5 int_node:13 link_latency:20 bw_multiplier:80
+ext_node:Directory:6 int_node:13 link_latency:20 bw_multiplier:80
+ext_node:Directory:7 int_node:13 link_latency:20 bw_multiplier:80
+
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-1_L2Banks-16_Memories-16.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-1_L2Banks-16_Memories-16.txt
new file mode 100644
index 000000000..b1a262d42
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-1_L2Banks-16_Memories-16.txt
@@ -0,0 +1,190 @@
+
+processors:16
+procs_per_chip:1
+L2banks:16
+memories:16
+bw_unit:1000
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:0 link_latency:40 bw_multiplier:10
+int_node:0 int_node:1 link_latency:40 bw_multiplier:16
+int_node:0 int_node:2 link_latency:40 bw_multiplier:16
+int_node:0 int_node:3 link_latency:40 bw_multiplier:16
+int_node:0 int_node:4 link_latency:40 bw_multiplier:16
+int_node:0 int_node:5 link_latency:40 bw_multiplier:16
+int_node:0 int_node:6 link_latency:40 bw_multiplier:16
+int_node:0 int_node:7 link_latency:40 bw_multiplier:16
+int_node:0 int_node:8 link_latency:40 bw_multiplier:16
+int_node:0 int_node:9 link_latency:40 bw_multiplier:16
+int_node:0 int_node:10 link_latency:40 bw_multiplier:16
+int_node:0 int_node:11 link_latency:40 bw_multiplier:16
+int_node:0 int_node:12 link_latency:40 bw_multiplier:16
+int_node:0 int_node:13 link_latency:40 bw_multiplier:16
+int_node:0 int_node:14 link_latency:40 bw_multiplier:16
+int_node:0 int_node:15 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:Directory:1 int_node:1 link_latency:40 bw_multiplier:10
+int_node:1 int_node:2 link_latency:40 bw_multiplier:16
+int_node:1 int_node:3 link_latency:40 bw_multiplier:16
+int_node:1 int_node:4 link_latency:40 bw_multiplier:16
+int_node:1 int_node:5 link_latency:40 bw_multiplier:16
+int_node:1 int_node:6 link_latency:40 bw_multiplier:16
+int_node:1 int_node:7 link_latency:40 bw_multiplier:16
+int_node:1 int_node:8 link_latency:40 bw_multiplier:16
+int_node:1 int_node:9 link_latency:40 bw_multiplier:16
+int_node:1 int_node:10 link_latency:40 bw_multiplier:16
+int_node:1 int_node:11 link_latency:40 bw_multiplier:16
+int_node:1 int_node:12 link_latency:40 bw_multiplier:16
+int_node:1 int_node:13 link_latency:40 bw_multiplier:16
+int_node:1 int_node:14 link_latency:40 bw_multiplier:16
+int_node:1 int_node:15 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:2 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:2 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:Directory:2 int_node:2 link_latency:40 bw_multiplier:10
+int_node:2 int_node:3 link_latency:40 bw_multiplier:16
+int_node:2 int_node:4 link_latency:40 bw_multiplier:16
+int_node:2 int_node:5 link_latency:40 bw_multiplier:16
+int_node:2 int_node:6 link_latency:40 bw_multiplier:16
+int_node:2 int_node:7 link_latency:40 bw_multiplier:16
+int_node:2 int_node:8 link_latency:40 bw_multiplier:16
+int_node:2 int_node:9 link_latency:40 bw_multiplier:16
+int_node:2 int_node:10 link_latency:40 bw_multiplier:16
+int_node:2 int_node:11 link_latency:40 bw_multiplier:16
+int_node:2 int_node:12 link_latency:40 bw_multiplier:16
+int_node:2 int_node:13 link_latency:40 bw_multiplier:16
+int_node:2 int_node:14 link_latency:40 bw_multiplier:16
+int_node:2 int_node:15 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:3 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:3 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:Directory:3 int_node:3 link_latency:40 bw_multiplier:10
+int_node:3 int_node:4 link_latency:40 bw_multiplier:16
+int_node:3 int_node:5 link_latency:40 bw_multiplier:16
+int_node:3 int_node:6 link_latency:40 bw_multiplier:16
+int_node:3 int_node:7 link_latency:40 bw_multiplier:16
+int_node:3 int_node:8 link_latency:40 bw_multiplier:16
+int_node:3 int_node:9 link_latency:40 bw_multiplier:16
+int_node:3 int_node:10 link_latency:40 bw_multiplier:16
+int_node:3 int_node:11 link_latency:40 bw_multiplier:16
+int_node:3 int_node:12 link_latency:40 bw_multiplier:16
+int_node:3 int_node:13 link_latency:40 bw_multiplier:16
+int_node:3 int_node:14 link_latency:40 bw_multiplier:16
+int_node:3 int_node:15 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:4 int_node:4 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:4 int_node:4 link_latency:1 bw_multiplier:64
+ext_node:Directory:4 int_node:4 link_latency:40 bw_multiplier:10
+int_node:4 int_node:5 link_latency:40 bw_multiplier:16
+int_node:4 int_node:6 link_latency:40 bw_multiplier:16
+int_node:4 int_node:7 link_latency:40 bw_multiplier:16
+int_node:4 int_node:8 link_latency:40 bw_multiplier:16
+int_node:4 int_node:9 link_latency:40 bw_multiplier:16
+int_node:4 int_node:10 link_latency:40 bw_multiplier:16
+int_node:4 int_node:11 link_latency:40 bw_multiplier:16
+int_node:4 int_node:12 link_latency:40 bw_multiplier:16
+int_node:4 int_node:13 link_latency:40 bw_multiplier:16
+int_node:4 int_node:14 link_latency:40 bw_multiplier:16
+int_node:4 int_node:15 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:5 int_node:5 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:5 int_node:5 link_latency:1 bw_multiplier:64
+ext_node:Directory:5 int_node:5 link_latency:40 bw_multiplier:10
+int_node:5 int_node:6 link_latency:40 bw_multiplier:16
+int_node:5 int_node:7 link_latency:40 bw_multiplier:16
+int_node:5 int_node:8 link_latency:40 bw_multiplier:16
+int_node:5 int_node:9 link_latency:40 bw_multiplier:16
+int_node:5 int_node:10 link_latency:40 bw_multiplier:16
+int_node:5 int_node:11 link_latency:40 bw_multiplier:16
+int_node:5 int_node:12 link_latency:40 bw_multiplier:16
+int_node:5 int_node:13 link_latency:40 bw_multiplier:16
+int_node:5 int_node:14 link_latency:40 bw_multiplier:16
+int_node:5 int_node:15 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:6 int_node:6 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:6 int_node:6 link_latency:1 bw_multiplier:64
+ext_node:Directory:6 int_node:6 link_latency:40 bw_multiplier:10
+int_node:6 int_node:7 link_latency:40 bw_multiplier:16
+int_node:6 int_node:8 link_latency:40 bw_multiplier:16
+int_node:6 int_node:9 link_latency:40 bw_multiplier:16
+int_node:6 int_node:10 link_latency:40 bw_multiplier:16
+int_node:6 int_node:11 link_latency:40 bw_multiplier:16
+int_node:6 int_node:12 link_latency:40 bw_multiplier:16
+int_node:6 int_node:13 link_latency:40 bw_multiplier:16
+int_node:6 int_node:14 link_latency:40 bw_multiplier:16
+int_node:6 int_node:15 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:7 int_node:7 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:7 int_node:7 link_latency:1 bw_multiplier:64
+ext_node:Directory:7 int_node:7 link_latency:40 bw_multiplier:10
+int_node:7 int_node:8 link_latency:40 bw_multiplier:16
+int_node:7 int_node:9 link_latency:40 bw_multiplier:16
+int_node:7 int_node:10 link_latency:40 bw_multiplier:16
+int_node:7 int_node:11 link_latency:40 bw_multiplier:16
+int_node:7 int_node:12 link_latency:40 bw_multiplier:16
+int_node:7 int_node:13 link_latency:40 bw_multiplier:16
+int_node:7 int_node:14 link_latency:40 bw_multiplier:16
+int_node:7 int_node:15 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:8 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:8 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:Directory:8 int_node:8 link_latency:40 bw_multiplier:10
+int_node:8 int_node:9 link_latency:40 bw_multiplier:16
+int_node:8 int_node:10 link_latency:40 bw_multiplier:16
+int_node:8 int_node:11 link_latency:40 bw_multiplier:16
+int_node:8 int_node:12 link_latency:40 bw_multiplier:16
+int_node:8 int_node:13 link_latency:40 bw_multiplier:16
+int_node:8 int_node:14 link_latency:40 bw_multiplier:16
+int_node:8 int_node:15 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:9 int_node:9 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:9 int_node:9 link_latency:1 bw_multiplier:64
+ext_node:Directory:9 int_node:9 link_latency:40 bw_multiplier:10
+int_node:9 int_node:10 link_latency:40 bw_multiplier:16
+int_node:9 int_node:11 link_latency:40 bw_multiplier:16
+int_node:9 int_node:12 link_latency:40 bw_multiplier:16
+int_node:9 int_node:13 link_latency:40 bw_multiplier:16
+int_node:9 int_node:14 link_latency:40 bw_multiplier:16
+int_node:9 int_node:15 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:10 int_node:10 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:10 int_node:10 link_latency:1 bw_multiplier:64
+ext_node:Directory:10 int_node:10 link_latency:40 bw_multiplier:10
+int_node:10 int_node:11 link_latency:40 bw_multiplier:16
+int_node:10 int_node:12 link_latency:40 bw_multiplier:16
+int_node:10 int_node:13 link_latency:40 bw_multiplier:16
+int_node:10 int_node:14 link_latency:40 bw_multiplier:16
+int_node:10 int_node:15 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:11 int_node:11 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:11 int_node:11 link_latency:1 bw_multiplier:64
+ext_node:Directory:11 int_node:11 link_latency:40 bw_multiplier:10
+int_node:11 int_node:12 link_latency:40 bw_multiplier:16
+int_node:11 int_node:13 link_latency:40 bw_multiplier:16
+int_node:11 int_node:14 link_latency:40 bw_multiplier:16
+int_node:11 int_node:15 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:12 int_node:12 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:12 int_node:12 link_latency:1 bw_multiplier:64
+ext_node:Directory:12 int_node:12 link_latency:40 bw_multiplier:10
+int_node:12 int_node:13 link_latency:40 bw_multiplier:16
+int_node:12 int_node:14 link_latency:40 bw_multiplier:16
+int_node:12 int_node:15 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:13 int_node:13 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:13 int_node:13 link_latency:1 bw_multiplier:64
+ext_node:Directory:13 int_node:13 link_latency:40 bw_multiplier:10
+int_node:13 int_node:14 link_latency:40 bw_multiplier:16
+int_node:13 int_node:15 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:14 int_node:14 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:14 int_node:14 link_latency:1 bw_multiplier:64
+ext_node:Directory:14 int_node:14 link_latency:40 bw_multiplier:10
+int_node:14 int_node:15 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:15 int_node:15 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:15 int_node:15 link_latency:1 bw_multiplier:64
+ext_node:Directory:15 int_node:15 link_latency:40 bw_multiplier:10
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-16_Memories-16.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-16_Memories-16.txt
new file mode 100644
index 000000000..3e0030c43
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-16_Memories-16.txt
@@ -0,0 +1,90 @@
+
+processors:16
+procs_per_chip:4
+L2banks:16
+memories:16
+bw_unit:1000
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:2 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:3 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:2 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:3 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:6 link_latency:20 bw_multiplier:10
+ext_node:Directory:1 int_node:6 link_latency:20 bw_multiplier:10
+ext_node:Directory:2 int_node:6 link_latency:20 bw_multiplier:10
+ext_node:Directory:3 int_node:6 link_latency:20 bw_multiplier:10
+int_node:0 int_node:1 link_latency:1 bw_multiplier:16
+int_node:1 int_node:4 link_latency:1 bw_multiplier:16
+int_node:4 int_node:3 link_latency:1 bw_multiplier:16
+int_node:3 int_node:2 link_latency:1 bw_multiplier:16
+int_node:5 int_node:4 link_latency:1 bw_multiplier:16
+int_node:5 int_node:6 link_latency:20 bw_multiplier:10
+
+int_node:5 int_node:12 link_latency:20 bw_multiplier:10
+int_node:5 int_node:19 link_latency:20 bw_multiplier:10
+
+ext_node:L1Cache:4 int_node:7 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:5 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:6 int_node:9 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:7 int_node:10 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:4 int_node:7 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:5 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:6 int_node:9 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:7 int_node:10 link_latency:1 bw_multiplier:64
+ext_node:Directory:4 int_node:13 link_latency:20 bw_multiplier:10
+ext_node:Directory:5 int_node:13 link_latency:20 bw_multiplier:10
+ext_node:Directory:6 int_node:13 link_latency:20 bw_multiplier:10
+ext_node:Directory:7 int_node:13 link_latency:20 bw_multiplier:10
+int_node:7 int_node:8 link_latency:1 bw_multiplier:16
+int_node:8 int_node:11 link_latency:1 bw_multiplier:16
+int_node:11 int_node:10 link_latency:1 bw_multiplier:16
+int_node:10 int_node:9 link_latency:1 bw_multiplier:16
+int_node:12 int_node:11 link_latency:1 bw_multiplier:16
+int_node:12 int_node:13 link_latency:20 bw_multiplier:10
+
+int_node:12 int_node:26 link_latency:20 bw_multiplier:10
+
+ext_node:L1Cache:8 int_node:14 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:9 int_node:15 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:10 int_node:16 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:11 int_node:17 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:8 int_node:14 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:9 int_node:15 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:10 int_node:16 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:11 int_node:17 link_latency:1 bw_multiplier:64
+ext_node:Directory:8 int_node:20 link_latency:20 bw_multiplier:10
+ext_node:Directory:9 int_node:20 link_latency:20 bw_multiplier:10
+ext_node:Directory:10 int_node:20 link_latency:20 bw_multiplier:10
+ext_node:Directory:11 int_node:20 link_latency:20 bw_multiplier:10
+int_node:14 int_node:15 link_latency:1 bw_multiplier:16
+int_node:15 int_node:18 link_latency:1 bw_multiplier:16
+int_node:18 int_node:17 link_latency:1 bw_multiplier:16
+int_node:17 int_node:16 link_latency:1 bw_multiplier:16
+int_node:19 int_node:18 link_latency:1 bw_multiplier:16
+int_node:19 int_node:20 link_latency:20 bw_multiplier:10
+
+int_node:19 int_node:26 link_latency:20 bw_multiplier:10
+
+ext_node:L1Cache:12 int_node:21 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:13 int_node:22 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:14 int_node:23 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:15 int_node:24 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:12 int_node:21 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:13 int_node:22 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:14 int_node:23 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:15 int_node:24 link_latency:1 bw_multiplier:64
+ext_node:Directory:12 int_node:27 link_latency:20 bw_multiplier:10
+ext_node:Directory:13 int_node:27 link_latency:20 bw_multiplier:10
+ext_node:Directory:14 int_node:27 link_latency:20 bw_multiplier:10
+ext_node:Directory:15 int_node:27 link_latency:20 bw_multiplier:10
+int_node:21 int_node:22 link_latency:1 bw_multiplier:16
+int_node:22 int_node:25 link_latency:1 bw_multiplier:16
+int_node:25 int_node:24 link_latency:1 bw_multiplier:16
+int_node:24 int_node:23 link_latency:1 bw_multiplier:16
+int_node:26 int_node:25 link_latency:1 bw_multiplier:16
+int_node:26 int_node:27 link_latency:20 bw_multiplier:10
+
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-16_Memories-4.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-16_Memories-4.txt
new file mode 100644
index 000000000..b7ef403ff
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-16_Memories-4.txt
@@ -0,0 +1,78 @@
+
+processors:16
+procs_per_chip:4
+L2banks:16
+memories:4
+bw_unit:1000
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:2 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:3 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:2 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:3 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:6 link_latency:20 bw_multiplier:10
+int_node:0 int_node:1 link_latency:1 bw_multiplier:16
+int_node:1 int_node:4 link_latency:1 bw_multiplier:16
+int_node:4 int_node:3 link_latency:1 bw_multiplier:16
+int_node:3 int_node:2 link_latency:1 bw_multiplier:16
+int_node:5 int_node:4 link_latency:1 bw_multiplier:16
+int_node:5 int_node:6 link_latency:20 bw_multiplier:10
+
+int_node:5 int_node:12 link_latency:20 bw_multiplier:10
+int_node:5 int_node:19 link_latency:20 bw_multiplier:10
+
+ext_node:L1Cache:4 int_node:7 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:5 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:6 int_node:9 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:7 int_node:10 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:4 int_node:7 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:5 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:6 int_node:9 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:7 int_node:10 link_latency:1 bw_multiplier:64
+ext_node:Directory:1 int_node:13 link_latency:20 bw_multiplier:10
+int_node:7 int_node:8 link_latency:1 bw_multiplier:16
+int_node:8 int_node:11 link_latency:1 bw_multiplier:16
+int_node:11 int_node:10 link_latency:1 bw_multiplier:16
+int_node:10 int_node:9 link_latency:1 bw_multiplier:16
+int_node:12 int_node:11 link_latency:1 bw_multiplier:16
+int_node:12 int_node:13 link_latency:20 bw_multiplier:10
+
+int_node:12 int_node:26 link_latency:20 bw_multiplier:10
+
+ext_node:L1Cache:8 int_node:14 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:9 int_node:15 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:10 int_node:16 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:11 int_node:17 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:8 int_node:14 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:9 int_node:15 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:10 int_node:16 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:11 int_node:17 link_latency:1 bw_multiplier:64
+ext_node:Directory:2 int_node:20 link_latency:20 bw_multiplier:10
+int_node:14 int_node:15 link_latency:1 bw_multiplier:16
+int_node:15 int_node:18 link_latency:1 bw_multiplier:16
+int_node:18 int_node:17 link_latency:1 bw_multiplier:16
+int_node:17 int_node:16 link_latency:1 bw_multiplier:16
+int_node:19 int_node:18 link_latency:1 bw_multiplier:16
+int_node:19 int_node:20 link_latency:20 bw_multiplier:10
+
+int_node:19 int_node:26 link_latency:20 bw_multiplier:10
+
+ext_node:L1Cache:12 int_node:21 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:13 int_node:22 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:14 int_node:23 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:15 int_node:24 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:12 int_node:21 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:13 int_node:22 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:14 int_node:23 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:15 int_node:24 link_latency:1 bw_multiplier:64
+ext_node:Directory:3 int_node:27 link_latency:20 bw_multiplier:10
+int_node:21 int_node:22 link_latency:1 bw_multiplier:16
+int_node:22 int_node:25 link_latency:1 bw_multiplier:16
+int_node:25 int_node:24 link_latency:1 bw_multiplier:16
+int_node:24 int_node:23 link_latency:1 bw_multiplier:16
+int_node:26 int_node:25 link_latency:1 bw_multiplier:16
+int_node:26 int_node:27 link_latency:20 bw_multiplier:10
+
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-32_Memories-4.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-32_Memories-4.txt
new file mode 100644
index 000000000..4fbcfb467
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-32_Memories-4.txt
@@ -0,0 +1,123 @@
+
+processors:16
+procs_per_chip:4
+L2banks:32
+memories:4
+bw_unit:1000
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:2 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:3 int_node:9 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:0:bank:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:0:bank:1 int_node:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:1:bank:0 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:1:bank:1 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:2:bank:0 int_node:6 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:2:bank:1 int_node:7 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:3:bank:0 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:3:bank:1 int_node:9 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:5 link_latency:40 bw_multiplier:10
+int_node:0 int_node:1 link_latency:1 bw_multiplier:16
+int_node:0 int_node:2 link_latency:1 bw_multiplier:16
+int_node:1 int_node:3 link_latency:1 bw_multiplier:16
+int_node:2 int_node:4 link_latency:1 bw_multiplier:16
+int_node:2 int_node:3 link_latency:1 bw_multiplier:16
+int_node:3 int_node:5 link_latency:1 bw_multiplier:16
+int_node:4 int_node:6 link_latency:1 bw_multiplier:16
+int_node:4 int_node:5 link_latency:1 bw_multiplier:16
+int_node:5 int_node:7 link_latency:1 bw_multiplier:16
+int_node:6 int_node:8 link_latency:1 bw_multiplier:16
+int_node:6 int_node:7 link_latency:1 bw_multiplier:16
+int_node:7 int_node:9 link_latency:1 bw_multiplier:16
+int_node:8 int_node:9 link_latency:1 bw_multiplier:16
+
+int_node:5 int_node:15 link_latency:40 bw_multiplier:10
+int_node:5 int_node:25 link_latency:40 bw_multiplier:10
+
+ext_node:L1Cache:4 int_node:10 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:5 int_node:11 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:6 int_node:18 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:7 int_node:19 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:4:bank:0 int_node:10 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:4:bank:1 int_node:11 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:5:bank:0 int_node:12 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:5:bank:1 int_node:13 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:6:bank:0 int_node:16 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:6:bank:1 int_node:17 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:7:bank:0 int_node:18 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:7:bank:1 int_node:19 link_latency:1 bw_multiplier:64
+ext_node:Directory:1 int_node:15 link_latency:40 bw_multiplier:10
+int_node:10 int_node:11 link_latency:1 bw_multiplier:16
+int_node:10 int_node:12 link_latency:1 bw_multiplier:16
+int_node:11 int_node:13 link_latency:1 bw_multiplier:16
+int_node:12 int_node:14 link_latency:1 bw_multiplier:16
+int_node:12 int_node:13 link_latency:1 bw_multiplier:16
+int_node:13 int_node:15 link_latency:1 bw_multiplier:16
+int_node:14 int_node:16 link_latency:1 bw_multiplier:16
+int_node:14 int_node:15 link_latency:1 bw_multiplier:16
+int_node:15 int_node:17 link_latency:1 bw_multiplier:16
+int_node:16 int_node:18 link_latency:1 bw_multiplier:16
+int_node:16 int_node:17 link_latency:1 bw_multiplier:16
+int_node:17 int_node:19 link_latency:1 bw_multiplier:16
+int_node:18 int_node:19 link_latency:1 bw_multiplier:16
+
+int_node:15 int_node:35 link_latency:40 bw_multiplier:10
+
+ext_node:L1Cache:8 int_node:20 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:9 int_node:21 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:10 int_node:28 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:11 int_node:29 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:8:bank:0 int_node:20 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:8:bank:1 int_node:21 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:9:bank:0 int_node:22 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:9:bank:1 int_node:23 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:10:bank:0 int_node:26 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:10:bank:1 int_node:27 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:11:bank:0 int_node:28 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:11:bank:1 int_node:29 link_latency:1 bw_multiplier:64
+ext_node:Directory:2 int_node:25 link_latency:40 bw_multiplier:10
+int_node:20 int_node:21 link_latency:1 bw_multiplier:16
+int_node:20 int_node:22 link_latency:1 bw_multiplier:16
+int_node:21 int_node:23 link_latency:1 bw_multiplier:16
+int_node:22 int_node:24 link_latency:1 bw_multiplier:16
+int_node:22 int_node:23 link_latency:1 bw_multiplier:16
+int_node:23 int_node:25 link_latency:1 bw_multiplier:16
+int_node:24 int_node:26 link_latency:1 bw_multiplier:16
+int_node:24 int_node:25 link_latency:1 bw_multiplier:16
+int_node:25 int_node:27 link_latency:1 bw_multiplier:16
+int_node:26 int_node:28 link_latency:1 bw_multiplier:16
+int_node:26 int_node:27 link_latency:1 bw_multiplier:16
+int_node:27 int_node:29 link_latency:1 bw_multiplier:16
+int_node:28 int_node:29 link_latency:1 bw_multiplier:16
+
+int_node:25 int_node:35 link_latency:40 bw_multiplier:10
+
+ext_node:L1Cache:12 int_node:30 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:13 int_node:31 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:14 int_node:38 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:15 int_node:39 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:12:bank:0 int_node:30 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:12:bank:1 int_node:31 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:13:bank:0 int_node:32 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:13:bank:1 int_node:33 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:14:bank:0 int_node:36 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:14:bank:1 int_node:37 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:15:bank:0 int_node:38 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:15:bank:1 int_node:39 link_latency:1 bw_multiplier:64
+ext_node:Directory:3 int_node:35 link_latency:40 bw_multiplier:10
+int_node:30 int_node:31 link_latency:1 bw_multiplier:16
+int_node:30 int_node:32 link_latency:1 bw_multiplier:16
+int_node:31 int_node:33 link_latency:1 bw_multiplier:16
+int_node:32 int_node:34 link_latency:1 bw_multiplier:16
+int_node:32 int_node:33 link_latency:1 bw_multiplier:16
+int_node:33 int_node:35 link_latency:1 bw_multiplier:16
+int_node:34 int_node:36 link_latency:1 bw_multiplier:16
+int_node:34 int_node:35 link_latency:1 bw_multiplier:16
+int_node:35 int_node:37 link_latency:1 bw_multiplier:16
+int_node:36 int_node:38 link_latency:1 bw_multiplier:16
+int_node:36 int_node:37 link_latency:1 bw_multiplier:16
+int_node:37 int_node:39 link_latency:1 bw_multiplier:16
+int_node:38 int_node:39 link_latency:1 bw_multiplier:16
+
+
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-4_Memories-16.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-4_Memories-16.txt
new file mode 100644
index 000000000..fc1cef27a
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-4_Memories-16.txt
@@ -0,0 +1,78 @@
+
+processors:16
+procs_per_chip:4
+L2banks:4
+memories:16
+bw_unit:1000
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:2 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:3 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:0 int_node:5 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:6 link_latency:20 bw_multiplier:10
+ext_node:Directory:1 int_node:6 link_latency:20 bw_multiplier:10
+ext_node:Directory:2 int_node:6 link_latency:20 bw_multiplier:10
+ext_node:Directory:3 int_node:6 link_latency:20 bw_multiplier:10
+int_node:0 int_node:1 link_latency:1 bw_multiplier:16
+int_node:1 int_node:4 link_latency:1 bw_multiplier:16
+int_node:4 int_node:3 link_latency:1 bw_multiplier:16
+int_node:3 int_node:2 link_latency:1 bw_multiplier:16
+int_node:5 int_node:4 link_latency:1 bw_multiplier:16
+int_node:5 int_node:6 link_latency:20 bw_multiplier:10
+
+int_node:5 int_node:12 link_latency:20 bw_multiplier:10
+int_node:5 int_node:19 link_latency:20 bw_multiplier:10
+
+ext_node:L1Cache:4 int_node:7 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:5 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:6 int_node:9 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:7 int_node:10 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:1 int_node:12 link_latency:1 bw_multiplier:64
+ext_node:Directory:4 int_node:13 link_latency:20 bw_multiplier:10
+ext_node:Directory:5 int_node:13 link_latency:20 bw_multiplier:10
+ext_node:Directory:6 int_node:13 link_latency:20 bw_multiplier:10
+ext_node:Directory:7 int_node:13 link_latency:20 bw_multiplier:10
+int_node:7 int_node:8 link_latency:1 bw_multiplier:16
+int_node:8 int_node:11 link_latency:1 bw_multiplier:16
+int_node:11 int_node:10 link_latency:1 bw_multiplier:16
+int_node:10 int_node:9 link_latency:1 bw_multiplier:16
+int_node:12 int_node:11 link_latency:1 bw_multiplier:16
+int_node:12 int_node:13 link_latency:20 bw_multiplier:10
+
+int_node:12 int_node:26 link_latency:20 bw_multiplier:10
+
+ext_node:L1Cache:8 int_node:14 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:9 int_node:15 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:10 int_node:16 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:11 int_node:17 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:2 int_node:19 link_latency:1 bw_multiplier:64
+ext_node:Directory:8 int_node:20 link_latency:20 bw_multiplier:10
+ext_node:Directory:9 int_node:20 link_latency:20 bw_multiplier:10
+ext_node:Directory:10 int_node:20 link_latency:20 bw_multiplier:10
+ext_node:Directory:11 int_node:20 link_latency:20 bw_multiplier:10
+int_node:14 int_node:15 link_latency:1 bw_multiplier:16
+int_node:15 int_node:18 link_latency:1 bw_multiplier:16
+int_node:18 int_node:17 link_latency:1 bw_multiplier:16
+int_node:17 int_node:16 link_latency:1 bw_multiplier:16
+int_node:19 int_node:18 link_latency:1 bw_multiplier:16
+int_node:19 int_node:20 link_latency:20 bw_multiplier:10
+
+int_node:19 int_node:26 link_latency:20 bw_multiplier:10
+
+ext_node:L1Cache:12 int_node:21 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:13 int_node:22 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:14 int_node:23 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:15 int_node:24 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:3 int_node:26 link_latency:1 bw_multiplier:64
+ext_node:Directory:12 int_node:27 link_latency:20 bw_multiplier:10
+ext_node:Directory:13 int_node:27 link_latency:20 bw_multiplier:10
+ext_node:Directory:14 int_node:27 link_latency:20 bw_multiplier:10
+ext_node:Directory:15 int_node:27 link_latency:20 bw_multiplier:10
+int_node:21 int_node:22 link_latency:1 bw_multiplier:16
+int_node:22 int_node:25 link_latency:1 bw_multiplier:16
+int_node:25 int_node:24 link_latency:1 bw_multiplier:16
+int_node:24 int_node:23 link_latency:1 bw_multiplier:16
+int_node:26 int_node:25 link_latency:1 bw_multiplier:16
+int_node:26 int_node:27 link_latency:20 bw_multiplier:10
+
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-4_Memories-4.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-4_Memories-4.txt
new file mode 100644
index 000000000..3b71e62a5
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-16_ProcsPerChip-4_L2Banks-4_Memories-4.txt
@@ -0,0 +1,66 @@
+
+processors:16
+procs_per_chip:4
+L2banks:4
+memories:4
+bw_unit:1000
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:2 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:3 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:0 int_node:5 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:6 link_latency:20 bw_multiplier:10
+int_node:0 int_node:1 link_latency:1 bw_multiplier:16
+int_node:1 int_node:4 link_latency:1 bw_multiplier:16
+int_node:4 int_node:3 link_latency:1 bw_multiplier:16
+int_node:3 int_node:2 link_latency:1 bw_multiplier:16
+int_node:5 int_node:4 link_latency:1 bw_multiplier:16
+int_node:5 int_node:6 link_latency:20 bw_multiplier:10
+
+int_node:5 int_node:12 link_latency:20 bw_multiplier:10
+int_node:5 int_node:19 link_latency:20 bw_multiplier:10
+
+ext_node:L1Cache:4 int_node:7 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:5 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:6 int_node:9 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:7 int_node:10 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:1 int_node:12 link_latency:1 bw_multiplier:64
+ext_node:Directory:1 int_node:13 link_latency:20 bw_multiplier:10
+int_node:7 int_node:8 link_latency:1 bw_multiplier:16
+int_node:8 int_node:11 link_latency:1 bw_multiplier:16
+int_node:11 int_node:10 link_latency:1 bw_multiplier:16
+int_node:10 int_node:9 link_latency:1 bw_multiplier:16
+int_node:12 int_node:11 link_latency:1 bw_multiplier:16
+int_node:12 int_node:13 link_latency:20 bw_multiplier:10
+
+int_node:12 int_node:26 link_latency:20 bw_multiplier:10
+
+ext_node:L1Cache:8 int_node:14 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:9 int_node:15 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:10 int_node:16 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:11 int_node:17 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:2 int_node:19 link_latency:1 bw_multiplier:64
+ext_node:Directory:2 int_node:20 link_latency:20 bw_multiplier:10
+int_node:14 int_node:15 link_latency:1 bw_multiplier:16
+int_node:15 int_node:18 link_latency:1 bw_multiplier:16
+int_node:18 int_node:17 link_latency:1 bw_multiplier:16
+int_node:17 int_node:16 link_latency:1 bw_multiplier:16
+int_node:19 int_node:18 link_latency:1 bw_multiplier:16
+int_node:19 int_node:20 link_latency:20 bw_multiplier:10
+
+int_node:19 int_node:26 link_latency:20 bw_multiplier:10
+
+ext_node:L1Cache:12 int_node:21 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:13 int_node:22 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:14 int_node:23 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:15 int_node:24 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:3 int_node:26 link_latency:1 bw_multiplier:64
+ext_node:Directory:3 int_node:27 link_latency:20 bw_multiplier:10
+int_node:21 int_node:22 link_latency:1 bw_multiplier:16
+int_node:22 int_node:25 link_latency:1 bw_multiplier:16
+int_node:25 int_node:24 link_latency:1 bw_multiplier:16
+int_node:24 int_node:23 link_latency:1 bw_multiplier:16
+int_node:26 int_node:25 link_latency:1 bw_multiplier:16
+int_node:26 int_node:27 link_latency:20 bw_multiplier:10
+
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-1_ProcsPerChip-1_L2Banks-1_Memories-1.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-1_ProcsPerChip-1_L2Banks-1_Memories-1.txt
new file mode 100644
index 000000000..a4462c3df
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-1_ProcsPerChip-1_L2Banks-1_Memories-1.txt
@@ -0,0 +1,10 @@
+
+processors:1
+procs_per_chip:1
+L2banks:1
+memories:1
+bw_unit:1000
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:0 link_latency:40 bw_multiplier:10
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-1_ProcsPerChip-1_L2Banks-256_Memories-1.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-1_ProcsPerChip-1_L2Banks-256_Memories-1.txt
new file mode 100644
index 000000000..b1c9de652
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-1_ProcsPerChip-1_L2Banks-256_Memories-1.txt
@@ -0,0 +1,780 @@
+
+processors:1
+procs_per_chip:1
+L2banks:256
+memories:1
+bw_unit:1000
+
+ext_node:L1Cache:0 int_node:248 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:248 link_latency:80 bw_multiplier:10
+
+ext_node:L2Cache:0:bank:0 int_node:0 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:1 int_node:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:2 int_node:2 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:3 int_node:3 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:4 int_node:4 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:5 int_node:5 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:6 int_node:6 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:7 int_node:7 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:8 int_node:8 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:9 int_node:9 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:10 int_node:10 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:11 int_node:11 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:12 int_node:12 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:13 int_node:13 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:14 int_node:14 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:15 int_node:15 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:16 int_node:16 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:17 int_node:17 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:18 int_node:18 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:19 int_node:19 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:20 int_node:20 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:21 int_node:21 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:22 int_node:22 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:23 int_node:23 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:24 int_node:24 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:25 int_node:25 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:26 int_node:26 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:27 int_node:27 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:28 int_node:28 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:29 int_node:29 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:30 int_node:30 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:31 int_node:31 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:32 int_node:32 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:33 int_node:33 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:34 int_node:34 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:35 int_node:35 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:36 int_node:36 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:37 int_node:37 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:38 int_node:38 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:39 int_node:39 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:40 int_node:40 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:41 int_node:41 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:42 int_node:42 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:43 int_node:43 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:44 int_node:44 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:45 int_node:45 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:46 int_node:46 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:47 int_node:47 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:48 int_node:48 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:49 int_node:49 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:50 int_node:50 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:51 int_node:51 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:52 int_node:52 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:53 int_node:53 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:54 int_node:54 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:55 int_node:55 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:56 int_node:56 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:57 int_node:57 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:58 int_node:58 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:59 int_node:59 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:60 int_node:60 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:61 int_node:61 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:62 int_node:62 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:63 int_node:63 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:64 int_node:64 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:65 int_node:65 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:66 int_node:66 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:67 int_node:67 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:68 int_node:68 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:69 int_node:69 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:70 int_node:70 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:71 int_node:71 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:72 int_node:72 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:73 int_node:73 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:74 int_node:74 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:75 int_node:75 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:76 int_node:76 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:77 int_node:77 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:78 int_node:78 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:79 int_node:79 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:80 int_node:80 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:81 int_node:81 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:82 int_node:82 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:83 int_node:83 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:84 int_node:84 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:85 int_node:85 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:86 int_node:86 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:87 int_node:87 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:88 int_node:88 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:89 int_node:89 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:90 int_node:90 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:91 int_node:91 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:92 int_node:92 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:93 int_node:93 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:94 int_node:94 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:95 int_node:95 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:96 int_node:96 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:97 int_node:97 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:98 int_node:98 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:99 int_node:99 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:100 int_node:100 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:101 int_node:101 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:102 int_node:102 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:103 int_node:103 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:104 int_node:104 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:105 int_node:105 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:106 int_node:106 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:107 int_node:107 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:108 int_node:108 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:109 int_node:109 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:110 int_node:110 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:111 int_node:111 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:112 int_node:112 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:113 int_node:113 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:114 int_node:114 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:115 int_node:115 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:116 int_node:116 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:117 int_node:117 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:118 int_node:118 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:119 int_node:119 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:120 int_node:120 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:121 int_node:121 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:122 int_node:122 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:123 int_node:123 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:124 int_node:124 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:125 int_node:125 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:126 int_node:126 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:127 int_node:127 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:128 int_node:128 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:129 int_node:129 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:130 int_node:130 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:131 int_node:131 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:132 int_node:132 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:133 int_node:133 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:134 int_node:134 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:135 int_node:135 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:136 int_node:136 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:137 int_node:137 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:138 int_node:138 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:139 int_node:139 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:140 int_node:140 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:141 int_node:141 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:142 int_node:142 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:143 int_node:143 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:144 int_node:144 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:145 int_node:145 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:146 int_node:146 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:147 int_node:147 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:148 int_node:148 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:149 int_node:149 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:150 int_node:150 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:151 int_node:151 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:152 int_node:152 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:153 int_node:153 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:154 int_node:154 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:155 int_node:155 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:156 int_node:156 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:157 int_node:157 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:158 int_node:158 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:159 int_node:159 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:160 int_node:160 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:161 int_node:161 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:162 int_node:162 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:163 int_node:163 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:164 int_node:164 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:165 int_node:165 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:166 int_node:166 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:167 int_node:167 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:168 int_node:168 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:169 int_node:169 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:170 int_node:170 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:171 int_node:171 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:172 int_node:172 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:173 int_node:173 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:174 int_node:174 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:175 int_node:175 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:176 int_node:176 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:177 int_node:177 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:178 int_node:178 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:179 int_node:179 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:180 int_node:180 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:181 int_node:181 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:182 int_node:182 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:183 int_node:183 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:184 int_node:184 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:185 int_node:185 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:186 int_node:186 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:187 int_node:187 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:188 int_node:188 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:189 int_node:189 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:190 int_node:190 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:191 int_node:191 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:192 int_node:192 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:193 int_node:193 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:194 int_node:194 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:195 int_node:195 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:196 int_node:196 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:197 int_node:197 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:198 int_node:198 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:199 int_node:199 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:200 int_node:200 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:201 int_node:201 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:202 int_node:202 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:203 int_node:203 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:204 int_node:204 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:205 int_node:205 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:206 int_node:206 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:207 int_node:207 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:208 int_node:208 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:209 int_node:209 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:210 int_node:210 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:211 int_node:211 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:212 int_node:212 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:213 int_node:213 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:214 int_node:214 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:215 int_node:215 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:216 int_node:216 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:217 int_node:217 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:218 int_node:218 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:219 int_node:219 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:220 int_node:220 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:221 int_node:221 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:222 int_node:222 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:223 int_node:223 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:224 int_node:224 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:225 int_node:225 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:226 int_node:226 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:227 int_node:227 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:228 int_node:228 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:229 int_node:229 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:230 int_node:230 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:231 int_node:231 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:232 int_node:232 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:233 int_node:233 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:234 int_node:234 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:235 int_node:235 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:236 int_node:236 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:237 int_node:237 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:238 int_node:238 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:239 int_node:239 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:240 int_node:240 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:241 int_node:241 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:242 int_node:242 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:243 int_node:243 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:244 int_node:244 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:245 int_node:245 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:246 int_node:246 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:247 int_node:247 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:248 int_node:248 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:249 int_node:249 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:250 int_node:250 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:251 int_node:251 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:252 int_node:252 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:253 int_node:253 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:254 int_node:254 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:255 int_node:255 link_latency:1 bw_multiplier:16
+
+int_node:0 int_node:1 link_latency:1 bw_multiplier:16
+int_node:1 int_node:2 link_latency:1 bw_multiplier:16
+int_node:2 int_node:3 link_latency:1 bw_multiplier:16
+int_node:3 int_node:4 link_latency:1 bw_multiplier:16
+int_node:4 int_node:5 link_latency:1 bw_multiplier:16
+int_node:5 int_node:6 link_latency:1 bw_multiplier:16
+int_node:6 int_node:7 link_latency:1 bw_multiplier:16
+int_node:7 int_node:8 link_latency:1 bw_multiplier:16
+int_node:8 int_node:9 link_latency:1 bw_multiplier:16
+int_node:9 int_node:10 link_latency:1 bw_multiplier:16
+int_node:10 int_node:11 link_latency:1 bw_multiplier:16
+int_node:11 int_node:12 link_latency:1 bw_multiplier:16
+int_node:12 int_node:13 link_latency:1 bw_multiplier:16
+int_node:13 int_node:14 link_latency:1 bw_multiplier:16
+int_node:14 int_node:15 link_latency:1 bw_multiplier:16
+
+int_node:16 int_node:17 link_latency:1 bw_multiplier:16
+int_node:17 int_node:18 link_latency:1 bw_multiplier:16
+int_node:18 int_node:19 link_latency:1 bw_multiplier:16
+int_node:19 int_node:20 link_latency:1 bw_multiplier:16
+int_node:20 int_node:21 link_latency:1 bw_multiplier:16
+int_node:21 int_node:22 link_latency:1 bw_multiplier:16
+int_node:22 int_node:23 link_latency:1 bw_multiplier:16
+int_node:23 int_node:24 link_latency:1 bw_multiplier:16
+int_node:24 int_node:25 link_latency:1 bw_multiplier:16
+int_node:25 int_node:26 link_latency:1 bw_multiplier:16
+int_node:26 int_node:27 link_latency:1 bw_multiplier:16
+int_node:27 int_node:28 link_latency:1 bw_multiplier:16
+int_node:28 int_node:29 link_latency:1 bw_multiplier:16
+int_node:29 int_node:30 link_latency:1 bw_multiplier:16
+int_node:30 int_node:31 link_latency:1 bw_multiplier:16
+
+int_node:32 int_node:33 link_latency:1 bw_multiplier:16
+int_node:33 int_node:34 link_latency:1 bw_multiplier:16
+int_node:34 int_node:35 link_latency:1 bw_multiplier:16
+int_node:35 int_node:36 link_latency:1 bw_multiplier:16
+int_node:36 int_node:37 link_latency:1 bw_multiplier:16
+int_node:37 int_node:38 link_latency:1 bw_multiplier:16
+int_node:38 int_node:39 link_latency:1 bw_multiplier:16
+int_node:39 int_node:40 link_latency:1 bw_multiplier:16
+int_node:40 int_node:41 link_latency:1 bw_multiplier:16
+int_node:41 int_node:42 link_latency:1 bw_multiplier:16
+int_node:42 int_node:43 link_latency:1 bw_multiplier:16
+int_node:43 int_node:44 link_latency:1 bw_multiplier:16
+int_node:44 int_node:45 link_latency:1 bw_multiplier:16
+int_node:45 int_node:46 link_latency:1 bw_multiplier:16
+int_node:46 int_node:47 link_latency:1 bw_multiplier:16
+
+int_node:48 int_node:49 link_latency:1 bw_multiplier:16
+int_node:49 int_node:50 link_latency:1 bw_multiplier:16
+int_node:50 int_node:51 link_latency:1 bw_multiplier:16
+int_node:51 int_node:52 link_latency:1 bw_multiplier:16
+int_node:52 int_node:53 link_latency:1 bw_multiplier:16
+int_node:53 int_node:54 link_latency:1 bw_multiplier:16
+int_node:54 int_node:55 link_latency:1 bw_multiplier:16
+int_node:55 int_node:56 link_latency:1 bw_multiplier:16
+int_node:56 int_node:57 link_latency:1 bw_multiplier:16
+int_node:57 int_node:58 link_latency:1 bw_multiplier:16
+int_node:58 int_node:59 link_latency:1 bw_multiplier:16
+int_node:59 int_node:60 link_latency:1 bw_multiplier:16
+int_node:60 int_node:61 link_latency:1 bw_multiplier:16
+int_node:61 int_node:62 link_latency:1 bw_multiplier:16
+int_node:62 int_node:63 link_latency:1 bw_multiplier:16
+
+int_node:64 int_node:65 link_latency:1 bw_multiplier:16
+int_node:65 int_node:66 link_latency:1 bw_multiplier:16
+int_node:66 int_node:67 link_latency:1 bw_multiplier:16
+int_node:67 int_node:68 link_latency:1 bw_multiplier:16
+int_node:68 int_node:69 link_latency:1 bw_multiplier:16
+int_node:69 int_node:70 link_latency:1 bw_multiplier:16
+int_node:70 int_node:71 link_latency:1 bw_multiplier:16
+int_node:71 int_node:72 link_latency:1 bw_multiplier:16
+int_node:72 int_node:73 link_latency:1 bw_multiplier:16
+int_node:73 int_node:74 link_latency:1 bw_multiplier:16
+int_node:74 int_node:75 link_latency:1 bw_multiplier:16
+int_node:75 int_node:76 link_latency:1 bw_multiplier:16
+int_node:76 int_node:77 link_latency:1 bw_multiplier:16
+int_node:77 int_node:78 link_latency:1 bw_multiplier:16
+int_node:78 int_node:79 link_latency:1 bw_multiplier:16
+
+int_node:80 int_node:81 link_latency:1 bw_multiplier:16
+int_node:81 int_node:82 link_latency:1 bw_multiplier:16
+int_node:82 int_node:83 link_latency:1 bw_multiplier:16
+int_node:83 int_node:84 link_latency:1 bw_multiplier:16
+int_node:84 int_node:85 link_latency:1 bw_multiplier:16
+int_node:85 int_node:86 link_latency:1 bw_multiplier:16
+int_node:86 int_node:87 link_latency:1 bw_multiplier:16
+int_node:87 int_node:88 link_latency:1 bw_multiplier:16
+int_node:88 int_node:89 link_latency:1 bw_multiplier:16
+int_node:89 int_node:90 link_latency:1 bw_multiplier:16
+int_node:90 int_node:91 link_latency:1 bw_multiplier:16
+int_node:91 int_node:92 link_latency:1 bw_multiplier:16
+int_node:92 int_node:93 link_latency:1 bw_multiplier:16
+int_node:93 int_node:94 link_latency:1 bw_multiplier:16
+int_node:94 int_node:95 link_latency:1 bw_multiplier:16
+
+int_node:96 int_node:97 link_latency:1 bw_multiplier:16
+int_node:97 int_node:98 link_latency:1 bw_multiplier:16
+int_node:98 int_node:99 link_latency:1 bw_multiplier:16
+int_node:99 int_node:100 link_latency:1 bw_multiplier:16
+int_node:100 int_node:101 link_latency:1 bw_multiplier:16
+int_node:101 int_node:102 link_latency:1 bw_multiplier:16
+int_node:102 int_node:103 link_latency:1 bw_multiplier:16
+int_node:103 int_node:104 link_latency:1 bw_multiplier:16
+int_node:104 int_node:105 link_latency:1 bw_multiplier:16
+int_node:105 int_node:106 link_latency:1 bw_multiplier:16
+int_node:106 int_node:107 link_latency:1 bw_multiplier:16
+int_node:107 int_node:108 link_latency:1 bw_multiplier:16
+int_node:108 int_node:109 link_latency:1 bw_multiplier:16
+int_node:109 int_node:110 link_latency:1 bw_multiplier:16
+int_node:110 int_node:111 link_latency:1 bw_multiplier:16
+
+int_node:112 int_node:113 link_latency:1 bw_multiplier:16
+int_node:113 int_node:114 link_latency:1 bw_multiplier:16
+int_node:114 int_node:115 link_latency:1 bw_multiplier:16
+int_node:115 int_node:116 link_latency:1 bw_multiplier:16
+int_node:116 int_node:117 link_latency:1 bw_multiplier:16
+int_node:117 int_node:118 link_latency:1 bw_multiplier:16
+int_node:118 int_node:119 link_latency:1 bw_multiplier:16
+int_node:119 int_node:120 link_latency:1 bw_multiplier:16
+int_node:120 int_node:121 link_latency:1 bw_multiplier:16
+int_node:121 int_node:122 link_latency:1 bw_multiplier:16
+int_node:122 int_node:123 link_latency:1 bw_multiplier:16
+int_node:123 int_node:124 link_latency:1 bw_multiplier:16
+int_node:124 int_node:125 link_latency:1 bw_multiplier:16
+int_node:125 int_node:126 link_latency:1 bw_multiplier:16
+int_node:126 int_node:127 link_latency:1 bw_multiplier:16
+
+int_node:128 int_node:129 link_latency:1 bw_multiplier:16
+int_node:129 int_node:130 link_latency:1 bw_multiplier:16
+int_node:130 int_node:131 link_latency:1 bw_multiplier:16
+int_node:131 int_node:132 link_latency:1 bw_multiplier:16
+int_node:132 int_node:133 link_latency:1 bw_multiplier:16
+int_node:133 int_node:134 link_latency:1 bw_multiplier:16
+int_node:134 int_node:135 link_latency:1 bw_multiplier:16
+int_node:135 int_node:136 link_latency:1 bw_multiplier:16
+int_node:136 int_node:137 link_latency:1 bw_multiplier:16
+int_node:137 int_node:138 link_latency:1 bw_multiplier:16
+int_node:138 int_node:139 link_latency:1 bw_multiplier:16
+int_node:139 int_node:140 link_latency:1 bw_multiplier:16
+int_node:140 int_node:141 link_latency:1 bw_multiplier:16
+int_node:141 int_node:142 link_latency:1 bw_multiplier:16
+int_node:142 int_node:143 link_latency:1 bw_multiplier:16
+
+int_node:144 int_node:145 link_latency:1 bw_multiplier:16
+int_node:145 int_node:146 link_latency:1 bw_multiplier:16
+int_node:146 int_node:147 link_latency:1 bw_multiplier:16
+int_node:147 int_node:148 link_latency:1 bw_multiplier:16
+int_node:148 int_node:149 link_latency:1 bw_multiplier:16
+int_node:149 int_node:150 link_latency:1 bw_multiplier:16
+int_node:150 int_node:151 link_latency:1 bw_multiplier:16
+int_node:151 int_node:152 link_latency:1 bw_multiplier:16
+int_node:152 int_node:153 link_latency:1 bw_multiplier:16
+int_node:153 int_node:154 link_latency:1 bw_multiplier:16
+int_node:154 int_node:155 link_latency:1 bw_multiplier:16
+int_node:155 int_node:156 link_latency:1 bw_multiplier:16
+int_node:156 int_node:157 link_latency:1 bw_multiplier:16
+int_node:157 int_node:158 link_latency:1 bw_multiplier:16
+int_node:158 int_node:159 link_latency:1 bw_multiplier:16
+
+int_node:160 int_node:161 link_latency:1 bw_multiplier:16
+int_node:161 int_node:162 link_latency:1 bw_multiplier:16
+int_node:162 int_node:163 link_latency:1 bw_multiplier:16
+int_node:163 int_node:164 link_latency:1 bw_multiplier:16
+int_node:164 int_node:165 link_latency:1 bw_multiplier:16
+int_node:165 int_node:166 link_latency:1 bw_multiplier:16
+int_node:166 int_node:167 link_latency:1 bw_multiplier:16
+int_node:167 int_node:168 link_latency:1 bw_multiplier:16
+int_node:168 int_node:169 link_latency:1 bw_multiplier:16
+int_node:169 int_node:170 link_latency:1 bw_multiplier:16
+int_node:170 int_node:171 link_latency:1 bw_multiplier:16
+int_node:171 int_node:172 link_latency:1 bw_multiplier:16
+int_node:172 int_node:173 link_latency:1 bw_multiplier:16
+int_node:173 int_node:174 link_latency:1 bw_multiplier:16
+int_node:174 int_node:175 link_latency:1 bw_multiplier:16
+
+int_node:176 int_node:177 link_latency:1 bw_multiplier:16
+int_node:177 int_node:178 link_latency:1 bw_multiplier:16
+int_node:178 int_node:179 link_latency:1 bw_multiplier:16
+int_node:179 int_node:180 link_latency:1 bw_multiplier:16
+int_node:180 int_node:181 link_latency:1 bw_multiplier:16
+int_node:181 int_node:182 link_latency:1 bw_multiplier:16
+int_node:182 int_node:183 link_latency:1 bw_multiplier:16
+int_node:183 int_node:184 link_latency:1 bw_multiplier:16
+int_node:184 int_node:185 link_latency:1 bw_multiplier:16
+int_node:185 int_node:186 link_latency:1 bw_multiplier:16
+int_node:186 int_node:187 link_latency:1 bw_multiplier:16
+int_node:187 int_node:188 link_latency:1 bw_multiplier:16
+int_node:188 int_node:189 link_latency:1 bw_multiplier:16
+int_node:189 int_node:190 link_latency:1 bw_multiplier:16
+int_node:190 int_node:191 link_latency:1 bw_multiplier:16
+
+int_node:192 int_node:193 link_latency:1 bw_multiplier:16
+int_node:193 int_node:194 link_latency:1 bw_multiplier:16
+int_node:194 int_node:195 link_latency:1 bw_multiplier:16
+int_node:195 int_node:196 link_latency:1 bw_multiplier:16
+int_node:196 int_node:197 link_latency:1 bw_multiplier:16
+int_node:197 int_node:198 link_latency:1 bw_multiplier:16
+int_node:198 int_node:199 link_latency:1 bw_multiplier:16
+int_node:199 int_node:200 link_latency:1 bw_multiplier:16
+int_node:200 int_node:201 link_latency:1 bw_multiplier:16
+int_node:201 int_node:202 link_latency:1 bw_multiplier:16
+int_node:202 int_node:203 link_latency:1 bw_multiplier:16
+int_node:203 int_node:204 link_latency:1 bw_multiplier:16
+int_node:204 int_node:205 link_latency:1 bw_multiplier:16
+int_node:205 int_node:206 link_latency:1 bw_multiplier:16
+int_node:206 int_node:207 link_latency:1 bw_multiplier:16
+
+int_node:208 int_node:209 link_latency:1 bw_multiplier:16
+int_node:209 int_node:210 link_latency:1 bw_multiplier:16
+int_node:210 int_node:211 link_latency:1 bw_multiplier:16
+int_node:211 int_node:212 link_latency:1 bw_multiplier:16
+int_node:212 int_node:213 link_latency:1 bw_multiplier:16
+int_node:213 int_node:214 link_latency:1 bw_multiplier:16
+int_node:214 int_node:215 link_latency:1 bw_multiplier:16
+int_node:215 int_node:216 link_latency:1 bw_multiplier:16
+int_node:216 int_node:217 link_latency:1 bw_multiplier:16
+int_node:217 int_node:218 link_latency:1 bw_multiplier:16
+int_node:218 int_node:219 link_latency:1 bw_multiplier:16
+int_node:219 int_node:220 link_latency:1 bw_multiplier:16
+int_node:220 int_node:221 link_latency:1 bw_multiplier:16
+int_node:221 int_node:222 link_latency:1 bw_multiplier:16
+int_node:222 int_node:223 link_latency:1 bw_multiplier:16
+
+int_node:224 int_node:225 link_latency:1 bw_multiplier:16
+int_node:225 int_node:226 link_latency:1 bw_multiplier:16
+int_node:226 int_node:227 link_latency:1 bw_multiplier:16
+int_node:227 int_node:228 link_latency:1 bw_multiplier:16
+int_node:228 int_node:229 link_latency:1 bw_multiplier:16
+int_node:229 int_node:230 link_latency:1 bw_multiplier:16
+int_node:230 int_node:231 link_latency:1 bw_multiplier:16
+int_node:231 int_node:232 link_latency:1 bw_multiplier:16
+int_node:232 int_node:233 link_latency:1 bw_multiplier:16
+int_node:233 int_node:234 link_latency:1 bw_multiplier:16
+int_node:234 int_node:235 link_latency:1 bw_multiplier:16
+int_node:235 int_node:236 link_latency:1 bw_multiplier:16
+int_node:236 int_node:237 link_latency:1 bw_multiplier:16
+int_node:237 int_node:238 link_latency:1 bw_multiplier:16
+int_node:238 int_node:239 link_latency:1 bw_multiplier:16
+
+int_node:240 int_node:241 link_latency:1 bw_multiplier:16
+int_node:241 int_node:242 link_latency:1 bw_multiplier:16
+int_node:242 int_node:243 link_latency:1 bw_multiplier:16
+int_node:243 int_node:244 link_latency:1 bw_multiplier:16
+int_node:244 int_node:245 link_latency:1 bw_multiplier:16
+int_node:245 int_node:246 link_latency:1 bw_multiplier:16
+int_node:246 int_node:247 link_latency:1 bw_multiplier:16
+int_node:247 int_node:248 link_latency:1 bw_multiplier:16
+int_node:248 int_node:249 link_latency:1 bw_multiplier:16
+int_node:249 int_node:250 link_latency:1 bw_multiplier:16
+int_node:250 int_node:251 link_latency:1 bw_multiplier:16
+int_node:251 int_node:252 link_latency:1 bw_multiplier:16
+int_node:252 int_node:253 link_latency:1 bw_multiplier:16
+int_node:253 int_node:254 link_latency:1 bw_multiplier:16
+int_node:254 int_node:255 link_latency:1 bw_multiplier:16
+
+
+int_node:0 int_node:16 link_latency:1 bw_multiplier:16
+int_node:16 int_node:32 link_latency:1 bw_multiplier:16
+int_node:32 int_node:48 link_latency:1 bw_multiplier:16
+int_node:48 int_node:64 link_latency:1 bw_multiplier:16
+int_node:64 int_node:80 link_latency:1 bw_multiplier:16
+int_node:80 int_node:96 link_latency:1 bw_multiplier:16
+int_node:96 int_node:112 link_latency:1 bw_multiplier:16
+int_node:112 int_node:128 link_latency:1 bw_multiplier:16
+int_node:128 int_node:144 link_latency:1 bw_multiplier:16
+int_node:144 int_node:160 link_latency:1 bw_multiplier:16
+int_node:160 int_node:176 link_latency:1 bw_multiplier:16
+int_node:176 int_node:192 link_latency:1 bw_multiplier:16
+int_node:192 int_node:208 link_latency:1 bw_multiplier:16
+int_node:208 int_node:224 link_latency:1 bw_multiplier:16
+int_node:224 int_node:240 link_latency:1 bw_multiplier:16
+
+int_node:1 int_node:17 link_latency:1 bw_multiplier:16
+int_node:17 int_node:33 link_latency:1 bw_multiplier:16
+int_node:33 int_node:49 link_latency:1 bw_multiplier:16
+int_node:49 int_node:65 link_latency:1 bw_multiplier:16
+int_node:65 int_node:81 link_latency:1 bw_multiplier:16
+int_node:81 int_node:97 link_latency:1 bw_multiplier:16
+int_node:97 int_node:113 link_latency:1 bw_multiplier:16
+int_node:113 int_node:129 link_latency:1 bw_multiplier:16
+int_node:129 int_node:145 link_latency:1 bw_multiplier:16
+int_node:145 int_node:161 link_latency:1 bw_multiplier:16
+int_node:161 int_node:177 link_latency:1 bw_multiplier:16
+int_node:177 int_node:193 link_latency:1 bw_multiplier:16
+int_node:193 int_node:209 link_latency:1 bw_multiplier:16
+int_node:209 int_node:225 link_latency:1 bw_multiplier:16
+int_node:225 int_node:241 link_latency:1 bw_multiplier:16
+
+int_node:2 int_node:18 link_latency:1 bw_multiplier:16
+int_node:18 int_node:34 link_latency:1 bw_multiplier:16
+int_node:34 int_node:50 link_latency:1 bw_multiplier:16
+int_node:50 int_node:66 link_latency:1 bw_multiplier:16
+int_node:66 int_node:82 link_latency:1 bw_multiplier:16
+int_node:82 int_node:98 link_latency:1 bw_multiplier:16
+int_node:98 int_node:114 link_latency:1 bw_multiplier:16
+int_node:114 int_node:130 link_latency:1 bw_multiplier:16
+int_node:130 int_node:146 link_latency:1 bw_multiplier:16
+int_node:146 int_node:162 link_latency:1 bw_multiplier:16
+int_node:162 int_node:178 link_latency:1 bw_multiplier:16
+int_node:178 int_node:194 link_latency:1 bw_multiplier:16
+int_node:194 int_node:210 link_latency:1 bw_multiplier:16
+int_node:210 int_node:226 link_latency:1 bw_multiplier:16
+int_node:226 int_node:242 link_latency:1 bw_multiplier:16
+
+int_node:3 int_node:19 link_latency:1 bw_multiplier:16
+int_node:19 int_node:35 link_latency:1 bw_multiplier:16
+int_node:35 int_node:51 link_latency:1 bw_multiplier:16
+int_node:51 int_node:67 link_latency:1 bw_multiplier:16
+int_node:67 int_node:83 link_latency:1 bw_multiplier:16
+int_node:83 int_node:99 link_latency:1 bw_multiplier:16
+int_node:99 int_node:115 link_latency:1 bw_multiplier:16
+int_node:115 int_node:131 link_latency:1 bw_multiplier:16
+int_node:131 int_node:147 link_latency:1 bw_multiplier:16
+int_node:147 int_node:163 link_latency:1 bw_multiplier:16
+int_node:163 int_node:179 link_latency:1 bw_multiplier:16
+int_node:179 int_node:195 link_latency:1 bw_multiplier:16
+int_node:195 int_node:211 link_latency:1 bw_multiplier:16
+int_node:211 int_node:227 link_latency:1 bw_multiplier:16
+int_node:227 int_node:243 link_latency:1 bw_multiplier:16
+
+int_node:4 int_node:20 link_latency:1 bw_multiplier:16
+int_node:20 int_node:36 link_latency:1 bw_multiplier:16
+int_node:36 int_node:52 link_latency:1 bw_multiplier:16
+int_node:52 int_node:68 link_latency:1 bw_multiplier:16
+int_node:68 int_node:84 link_latency:1 bw_multiplier:16
+int_node:84 int_node:100 link_latency:1 bw_multiplier:16
+int_node:100 int_node:116 link_latency:1 bw_multiplier:16
+int_node:116 int_node:132 link_latency:1 bw_multiplier:16
+int_node:132 int_node:148 link_latency:1 bw_multiplier:16
+int_node:148 int_node:164 link_latency:1 bw_multiplier:16
+int_node:164 int_node:180 link_latency:1 bw_multiplier:16
+int_node:180 int_node:196 link_latency:1 bw_multiplier:16
+int_node:196 int_node:212 link_latency:1 bw_multiplier:16
+int_node:212 int_node:228 link_latency:1 bw_multiplier:16
+int_node:228 int_node:244 link_latency:1 bw_multiplier:16
+
+int_node:5 int_node:21 link_latency:1 bw_multiplier:16
+int_node:21 int_node:37 link_latency:1 bw_multiplier:16
+int_node:37 int_node:53 link_latency:1 bw_multiplier:16
+int_node:53 int_node:69 link_latency:1 bw_multiplier:16
+int_node:69 int_node:85 link_latency:1 bw_multiplier:16
+int_node:85 int_node:101 link_latency:1 bw_multiplier:16
+int_node:101 int_node:117 link_latency:1 bw_multiplier:16
+int_node:117 int_node:133 link_latency:1 bw_multiplier:16
+int_node:133 int_node:149 link_latency:1 bw_multiplier:16
+int_node:149 int_node:165 link_latency:1 bw_multiplier:16
+int_node:165 int_node:181 link_latency:1 bw_multiplier:16
+int_node:181 int_node:197 link_latency:1 bw_multiplier:16
+int_node:197 int_node:213 link_latency:1 bw_multiplier:16
+int_node:213 int_node:229 link_latency:1 bw_multiplier:16
+int_node:229 int_node:245 link_latency:1 bw_multiplier:16
+
+int_node:6 int_node:22 link_latency:1 bw_multiplier:16
+int_node:22 int_node:38 link_latency:1 bw_multiplier:16
+int_node:38 int_node:54 link_latency:1 bw_multiplier:16
+int_node:54 int_node:70 link_latency:1 bw_multiplier:16
+int_node:70 int_node:86 link_latency:1 bw_multiplier:16
+int_node:86 int_node:102 link_latency:1 bw_multiplier:16
+int_node:102 int_node:118 link_latency:1 bw_multiplier:16
+int_node:118 int_node:134 link_latency:1 bw_multiplier:16
+int_node:134 int_node:150 link_latency:1 bw_multiplier:16
+int_node:150 int_node:166 link_latency:1 bw_multiplier:16
+int_node:166 int_node:182 link_latency:1 bw_multiplier:16
+int_node:182 int_node:198 link_latency:1 bw_multiplier:16
+int_node:198 int_node:214 link_latency:1 bw_multiplier:16
+int_node:214 int_node:230 link_latency:1 bw_multiplier:16
+int_node:230 int_node:246 link_latency:1 bw_multiplier:16
+
+int_node:7 int_node:23 link_latency:1 bw_multiplier:16
+int_node:23 int_node:39 link_latency:1 bw_multiplier:16
+int_node:39 int_node:55 link_latency:1 bw_multiplier:16
+int_node:55 int_node:71 link_latency:1 bw_multiplier:16
+int_node:71 int_node:87 link_latency:1 bw_multiplier:16
+int_node:87 int_node:103 link_latency:1 bw_multiplier:16
+int_node:103 int_node:119 link_latency:1 bw_multiplier:16
+int_node:119 int_node:135 link_latency:1 bw_multiplier:16
+int_node:135 int_node:151 link_latency:1 bw_multiplier:16
+int_node:151 int_node:167 link_latency:1 bw_multiplier:16
+int_node:167 int_node:183 link_latency:1 bw_multiplier:16
+int_node:183 int_node:199 link_latency:1 bw_multiplier:16
+int_node:199 int_node:215 link_latency:1 bw_multiplier:16
+int_node:215 int_node:231 link_latency:1 bw_multiplier:16
+int_node:231 int_node:247 link_latency:1 bw_multiplier:16
+
+int_node:8 int_node:24 link_latency:1 bw_multiplier:16
+int_node:24 int_node:40 link_latency:1 bw_multiplier:16
+int_node:40 int_node:56 link_latency:1 bw_multiplier:16
+int_node:56 int_node:72 link_latency:1 bw_multiplier:16
+int_node:72 int_node:88 link_latency:1 bw_multiplier:16
+int_node:88 int_node:104 link_latency:1 bw_multiplier:16
+int_node:104 int_node:120 link_latency:1 bw_multiplier:16
+int_node:120 int_node:136 link_latency:1 bw_multiplier:16
+int_node:136 int_node:152 link_latency:1 bw_multiplier:16
+int_node:152 int_node:168 link_latency:1 bw_multiplier:16
+int_node:168 int_node:184 link_latency:1 bw_multiplier:16
+int_node:184 int_node:200 link_latency:1 bw_multiplier:16
+int_node:200 int_node:216 link_latency:1 bw_multiplier:16
+int_node:216 int_node:232 link_latency:1 bw_multiplier:16
+int_node:232 int_node:248 link_latency:1 bw_multiplier:16
+
+int_node:9 int_node:25 link_latency:1 bw_multiplier:16
+int_node:25 int_node:41 link_latency:1 bw_multiplier:16
+int_node:41 int_node:57 link_latency:1 bw_multiplier:16
+int_node:57 int_node:73 link_latency:1 bw_multiplier:16
+int_node:73 int_node:89 link_latency:1 bw_multiplier:16
+int_node:89 int_node:105 link_latency:1 bw_multiplier:16
+int_node:105 int_node:121 link_latency:1 bw_multiplier:16
+int_node:121 int_node:137 link_latency:1 bw_multiplier:16
+int_node:137 int_node:153 link_latency:1 bw_multiplier:16
+int_node:153 int_node:169 link_latency:1 bw_multiplier:16
+int_node:169 int_node:185 link_latency:1 bw_multiplier:16
+int_node:185 int_node:201 link_latency:1 bw_multiplier:16
+int_node:201 int_node:217 link_latency:1 bw_multiplier:16
+int_node:217 int_node:233 link_latency:1 bw_multiplier:16
+int_node:233 int_node:249 link_latency:1 bw_multiplier:16
+
+int_node:10 int_node:26 link_latency:1 bw_multiplier:16
+int_node:26 int_node:42 link_latency:1 bw_multiplier:16
+int_node:42 int_node:58 link_latency:1 bw_multiplier:16
+int_node:58 int_node:74 link_latency:1 bw_multiplier:16
+int_node:74 int_node:90 link_latency:1 bw_multiplier:16
+int_node:90 int_node:106 link_latency:1 bw_multiplier:16
+int_node:106 int_node:122 link_latency:1 bw_multiplier:16
+int_node:122 int_node:138 link_latency:1 bw_multiplier:16
+int_node:138 int_node:154 link_latency:1 bw_multiplier:16
+int_node:154 int_node:170 link_latency:1 bw_multiplier:16
+int_node:170 int_node:186 link_latency:1 bw_multiplier:16
+int_node:186 int_node:202 link_latency:1 bw_multiplier:16
+int_node:202 int_node:218 link_latency:1 bw_multiplier:16
+int_node:218 int_node:234 link_latency:1 bw_multiplier:16
+int_node:234 int_node:250 link_latency:1 bw_multiplier:16
+
+int_node:11 int_node:27 link_latency:1 bw_multiplier:16
+int_node:27 int_node:43 link_latency:1 bw_multiplier:16
+int_node:43 int_node:59 link_latency:1 bw_multiplier:16
+int_node:59 int_node:75 link_latency:1 bw_multiplier:16
+int_node:75 int_node:91 link_latency:1 bw_multiplier:16
+int_node:91 int_node:107 link_latency:1 bw_multiplier:16
+int_node:107 int_node:123 link_latency:1 bw_multiplier:16
+int_node:123 int_node:139 link_latency:1 bw_multiplier:16
+int_node:139 int_node:155 link_latency:1 bw_multiplier:16
+int_node:155 int_node:171 link_latency:1 bw_multiplier:16
+int_node:171 int_node:187 link_latency:1 bw_multiplier:16
+int_node:187 int_node:203 link_latency:1 bw_multiplier:16
+int_node:203 int_node:219 link_latency:1 bw_multiplier:16
+int_node:219 int_node:235 link_latency:1 bw_multiplier:16
+int_node:235 int_node:251 link_latency:1 bw_multiplier:16
+
+int_node:12 int_node:28 link_latency:1 bw_multiplier:16
+int_node:28 int_node:44 link_latency:1 bw_multiplier:16
+int_node:44 int_node:60 link_latency:1 bw_multiplier:16
+int_node:60 int_node:76 link_latency:1 bw_multiplier:16
+int_node:76 int_node:92 link_latency:1 bw_multiplier:16
+int_node:92 int_node:108 link_latency:1 bw_multiplier:16
+int_node:108 int_node:124 link_latency:1 bw_multiplier:16
+int_node:124 int_node:140 link_latency:1 bw_multiplier:16
+int_node:140 int_node:156 link_latency:1 bw_multiplier:16
+int_node:156 int_node:172 link_latency:1 bw_multiplier:16
+int_node:172 int_node:188 link_latency:1 bw_multiplier:16
+int_node:188 int_node:204 link_latency:1 bw_multiplier:16
+int_node:204 int_node:220 link_latency:1 bw_multiplier:16
+int_node:220 int_node:236 link_latency:1 bw_multiplier:16
+int_node:236 int_node:252 link_latency:1 bw_multiplier:16
+
+int_node:13 int_node:29 link_latency:1 bw_multiplier:16
+int_node:29 int_node:45 link_latency:1 bw_multiplier:16
+int_node:45 int_node:61 link_latency:1 bw_multiplier:16
+int_node:61 int_node:77 link_latency:1 bw_multiplier:16
+int_node:77 int_node:93 link_latency:1 bw_multiplier:16
+int_node:93 int_node:109 link_latency:1 bw_multiplier:16
+int_node:109 int_node:125 link_latency:1 bw_multiplier:16
+int_node:125 int_node:141 link_latency:1 bw_multiplier:16
+int_node:141 int_node:157 link_latency:1 bw_multiplier:16
+int_node:157 int_node:173 link_latency:1 bw_multiplier:16
+int_node:173 int_node:189 link_latency:1 bw_multiplier:16
+int_node:189 int_node:205 link_latency:1 bw_multiplier:16
+int_node:205 int_node:221 link_latency:1 bw_multiplier:16
+int_node:221 int_node:237 link_latency:1 bw_multiplier:16
+int_node:237 int_node:253 link_latency:1 bw_multiplier:16
+
+int_node:14 int_node:30 link_latency:1 bw_multiplier:16
+int_node:30 int_node:46 link_latency:1 bw_multiplier:16
+int_node:46 int_node:62 link_latency:1 bw_multiplier:16
+int_node:62 int_node:78 link_latency:1 bw_multiplier:16
+int_node:78 int_node:94 link_latency:1 bw_multiplier:16
+int_node:94 int_node:110 link_latency:1 bw_multiplier:16
+int_node:110 int_node:126 link_latency:1 bw_multiplier:16
+int_node:126 int_node:142 link_latency:1 bw_multiplier:16
+int_node:142 int_node:158 link_latency:1 bw_multiplier:16
+int_node:158 int_node:174 link_latency:1 bw_multiplier:16
+int_node:174 int_node:190 link_latency:1 bw_multiplier:16
+int_node:190 int_node:206 link_latency:1 bw_multiplier:16
+int_node:206 int_node:222 link_latency:1 bw_multiplier:16
+int_node:222 int_node:238 link_latency:1 bw_multiplier:16
+int_node:238 int_node:254 link_latency:1 bw_multiplier:16
+
+int_node:15 int_node:31 link_latency:1 bw_multiplier:16
+int_node:31 int_node:47 link_latency:1 bw_multiplier:16
+int_node:47 int_node:63 link_latency:1 bw_multiplier:16
+int_node:63 int_node:79 link_latency:1 bw_multiplier:16
+int_node:79 int_node:95 link_latency:1 bw_multiplier:16
+int_node:95 int_node:111 link_latency:1 bw_multiplier:16
+int_node:111 int_node:127 link_latency:1 bw_multiplier:16
+int_node:127 int_node:143 link_latency:1 bw_multiplier:16
+int_node:143 int_node:159 link_latency:1 bw_multiplier:16
+int_node:159 int_node:175 link_latency:1 bw_multiplier:16
+int_node:175 int_node:191 link_latency:1 bw_multiplier:16
+int_node:191 int_node:207 link_latency:1 bw_multiplier:16
+int_node:207 int_node:223 link_latency:1 bw_multiplier:16
+int_node:223 int_node:239 link_latency:1 bw_multiplier:16
+int_node:239 int_node:255 link_latency:1 bw_multiplier:16
+
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-1_ProcsPerChip-1_L2Banks-32_Memories-1.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-1_ProcsPerChip-1_L2Banks-32_Memories-1.txt
new file mode 100644
index 000000000..5aba03c94
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-1_ProcsPerChip-1_L2Banks-32_Memories-1.txt
@@ -0,0 +1,107 @@
+
+processors:1
+procs_per_chip:1
+L2banks:32
+memories:1
+bw_unit:1000
+
+ext_node:L1Cache:0 int_node:28 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:28 link_latency:80 bw_multiplier:10
+
+ext_node:L2Cache:0:bank:0 int_node:0 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:1 int_node:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:2 int_node:2 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:3 int_node:3 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:4 int_node:4 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:5 int_node:5 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:6 int_node:6 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:7 int_node:7 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:8 int_node:8 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:9 int_node:9 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:10 int_node:10 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:11 int_node:11 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:12 int_node:12 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:13 int_node:13 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:14 int_node:14 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:15 int_node:15 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:16 int_node:16 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:17 int_node:17 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:18 int_node:18 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:19 int_node:19 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:20 int_node:20 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:21 int_node:21 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:22 int_node:22 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:23 int_node:23 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:24 int_node:24 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:25 int_node:25 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:26 int_node:26 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:27 int_node:27 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:28 int_node:28 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:29 int_node:29 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:30 int_node:30 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:31 int_node:31 link_latency:1 bw_multiplier:16
+
+int_node:0 int_node:1 link_latency:1 bw_multiplier:16
+int_node:1 int_node:2 link_latency:1 bw_multiplier:16
+int_node:2 int_node:3 link_latency:1 bw_multiplier:16
+int_node:3 int_node:4 link_latency:1 bw_multiplier:16
+int_node:4 int_node:5 link_latency:1 bw_multiplier:16
+int_node:5 int_node:6 link_latency:1 bw_multiplier:16
+int_node:6 int_node:7 link_latency:1 bw_multiplier:16
+
+int_node:8 int_node:9 link_latency:1 bw_multiplier:16
+int_node:9 int_node:10 link_latency:1 bw_multiplier:16
+int_node:10 int_node:11 link_latency:1 bw_multiplier:16
+int_node:11 int_node:12 link_latency:1 bw_multiplier:16
+int_node:12 int_node:13 link_latency:1 bw_multiplier:16
+int_node:13 int_node:14 link_latency:1 bw_multiplier:16
+int_node:14 int_node:15 link_latency:1 bw_multiplier:16
+
+int_node:16 int_node:17 link_latency:1 bw_multiplier:16
+int_node:17 int_node:18 link_latency:1 bw_multiplier:16
+int_node:18 int_node:19 link_latency:1 bw_multiplier:16
+int_node:19 int_node:20 link_latency:1 bw_multiplier:16
+int_node:20 int_node:21 link_latency:1 bw_multiplier:16
+int_node:21 int_node:22 link_latency:1 bw_multiplier:16
+int_node:22 int_node:23 link_latency:1 bw_multiplier:16
+
+int_node:24 int_node:25 link_latency:1 bw_multiplier:16
+int_node:25 int_node:26 link_latency:1 bw_multiplier:16
+int_node:26 int_node:27 link_latency:1 bw_multiplier:16
+int_node:27 int_node:28 link_latency:1 bw_multiplier:16
+int_node:28 int_node:29 link_latency:1 bw_multiplier:16
+int_node:29 int_node:30 link_latency:1 bw_multiplier:16
+int_node:30 int_node:31 link_latency:1 bw_multiplier:16
+
+int_node:0 int_node:8 link_latency:1 bw_multiplier:16
+int_node:8 int_node:16 link_latency:1 bw_multiplier:16
+int_node:16 int_node:24 link_latency:1 bw_multiplier:16
+
+int_node:1 int_node:9 link_latency:1 bw_multiplier:16
+int_node:9 int_node:17 link_latency:1 bw_multiplier:16
+int_node:17 int_node:25 link_latency:1 bw_multiplier:16
+
+int_node:2 int_node:10 link_latency:1 bw_multiplier:16
+int_node:10 int_node:18 link_latency:1 bw_multiplier:16
+int_node:18 int_node:26 link_latency:1 bw_multiplier:16
+
+int_node:3 int_node:11 link_latency:1 bw_multiplier:16
+int_node:11 int_node:19 link_latency:1 bw_multiplier:16
+int_node:19 int_node:27 link_latency:1 bw_multiplier:16
+
+int_node:4 int_node:12 link_latency:1 bw_multiplier:16
+int_node:12 int_node:20 link_latency:1 bw_multiplier:16
+int_node:20 int_node:28 link_latency:1 bw_multiplier:16
+
+int_node:5 int_node:13 link_latency:1 bw_multiplier:16
+int_node:13 int_node:21 link_latency:1 bw_multiplier:16
+int_node:21 int_node:29 link_latency:1 bw_multiplier:16
+
+int_node:6 int_node:14 link_latency:1 bw_multiplier:16
+int_node:14 int_node:22 link_latency:1 bw_multiplier:16
+int_node:22 int_node:30 link_latency:1 bw_multiplier:16
+
+int_node:7 int_node:15 link_latency:1 bw_multiplier:16
+int_node:15 int_node:23 link_latency:1 bw_multiplier:16
+int_node:23 int_node:31 link_latency:1 bw_multiplier:16
+
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-1_ProcsPerChip-1_L2Banks-64_Memories-1.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-1_ProcsPerChip-1_L2Banks-64_Memories-1.txt
new file mode 100644
index 000000000..faf61c76d
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-1_ProcsPerChip-1_L2Banks-64_Memories-1.txt
@@ -0,0 +1,204 @@
+
+processors:1
+procs_per_chip:1
+L2banks:64
+memories:1
+bw_unit:1000
+
+ext_node:L1Cache:0 int_node:60 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:60 link_latency:80 bw_multiplier:10
+
+ext_node:L2Cache:0:bank:0 int_node:0 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:1 int_node:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:2 int_node:2 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:3 int_node:3 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:4 int_node:4 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:5 int_node:5 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:6 int_node:6 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:7 int_node:7 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:8 int_node:8 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:9 int_node:9 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:10 int_node:10 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:11 int_node:11 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:12 int_node:12 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:13 int_node:13 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:14 int_node:14 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:15 int_node:15 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:16 int_node:16 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:17 int_node:17 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:18 int_node:18 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:19 int_node:19 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:20 int_node:20 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:21 int_node:21 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:22 int_node:22 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:23 int_node:23 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:24 int_node:24 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:25 int_node:25 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:26 int_node:26 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:27 int_node:27 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:28 int_node:28 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:29 int_node:29 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:30 int_node:30 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:31 int_node:31 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:32 int_node:32 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:33 int_node:33 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:34 int_node:34 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:35 int_node:35 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:36 int_node:36 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:37 int_node:37 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:38 int_node:38 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:39 int_node:39 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:40 int_node:40 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:41 int_node:41 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:42 int_node:42 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:43 int_node:43 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:44 int_node:44 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:45 int_node:45 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:46 int_node:46 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:47 int_node:47 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:48 int_node:48 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:49 int_node:49 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:50 int_node:50 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:51 int_node:51 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:52 int_node:52 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:53 int_node:53 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:54 int_node:54 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:55 int_node:55 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:56 int_node:56 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:57 int_node:57 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:58 int_node:58 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:59 int_node:59 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:60 int_node:60 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:61 int_node:61 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:62 int_node:62 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:0:bank:63 int_node:63 link_latency:1 bw_multiplier:16
+
+int_node:0 int_node:1 link_latency:1 bw_multiplier:16
+int_node:1 int_node:2 link_latency:1 bw_multiplier:16
+int_node:2 int_node:3 link_latency:1 bw_multiplier:16
+int_node:3 int_node:4 link_latency:1 bw_multiplier:16
+int_node:4 int_node:5 link_latency:1 bw_multiplier:16
+int_node:5 int_node:6 link_latency:1 bw_multiplier:16
+int_node:6 int_node:7 link_latency:1 bw_multiplier:16
+
+int_node:8 int_node:9 link_latency:1 bw_multiplier:16
+int_node:9 int_node:10 link_latency:1 bw_multiplier:16
+int_node:10 int_node:11 link_latency:1 bw_multiplier:16
+int_node:11 int_node:12 link_latency:1 bw_multiplier:16
+int_node:12 int_node:13 link_latency:1 bw_multiplier:16
+int_node:13 int_node:14 link_latency:1 bw_multiplier:16
+int_node:14 int_node:15 link_latency:1 bw_multiplier:16
+
+int_node:16 int_node:17 link_latency:1 bw_multiplier:16
+int_node:17 int_node:18 link_latency:1 bw_multiplier:16
+int_node:18 int_node:19 link_latency:1 bw_multiplier:16
+int_node:19 int_node:20 link_latency:1 bw_multiplier:16
+int_node:20 int_node:21 link_latency:1 bw_multiplier:16
+int_node:21 int_node:22 link_latency:1 bw_multiplier:16
+int_node:22 int_node:23 link_latency:1 bw_multiplier:16
+
+int_node:24 int_node:25 link_latency:1 bw_multiplier:16
+int_node:25 int_node:26 link_latency:1 bw_multiplier:16
+int_node:26 int_node:27 link_latency:1 bw_multiplier:16
+int_node:27 int_node:28 link_latency:1 bw_multiplier:16
+int_node:28 int_node:29 link_latency:1 bw_multiplier:16
+int_node:29 int_node:30 link_latency:1 bw_multiplier:16
+int_node:30 int_node:31 link_latency:1 bw_multiplier:16
+
+int_node:32 int_node:33 link_latency:1 bw_multiplier:16
+int_node:33 int_node:34 link_latency:1 bw_multiplier:16
+int_node:34 int_node:35 link_latency:1 bw_multiplier:16
+int_node:35 int_node:36 link_latency:1 bw_multiplier:16
+int_node:36 int_node:37 link_latency:1 bw_multiplier:16
+int_node:37 int_node:38 link_latency:1 bw_multiplier:16
+int_node:38 int_node:39 link_latency:1 bw_multiplier:16
+
+int_node:40 int_node:41 link_latency:1 bw_multiplier:16
+int_node:41 int_node:42 link_latency:1 bw_multiplier:16
+int_node:42 int_node:43 link_latency:1 bw_multiplier:16
+int_node:43 int_node:44 link_latency:1 bw_multiplier:16
+int_node:44 int_node:45 link_latency:1 bw_multiplier:16
+int_node:45 int_node:46 link_latency:1 bw_multiplier:16
+int_node:46 int_node:47 link_latency:1 bw_multiplier:16
+
+int_node:48 int_node:49 link_latency:1 bw_multiplier:16
+int_node:49 int_node:50 link_latency:1 bw_multiplier:16
+int_node:50 int_node:51 link_latency:1 bw_multiplier:16
+int_node:51 int_node:52 link_latency:1 bw_multiplier:16
+int_node:52 int_node:53 link_latency:1 bw_multiplier:16
+int_node:53 int_node:54 link_latency:1 bw_multiplier:16
+int_node:54 int_node:55 link_latency:1 bw_multiplier:16
+
+int_node:56 int_node:57 link_latency:1 bw_multiplier:16
+int_node:57 int_node:58 link_latency:1 bw_multiplier:16
+int_node:58 int_node:59 link_latency:1 bw_multiplier:16
+int_node:59 int_node:60 link_latency:1 bw_multiplier:16
+int_node:60 int_node:61 link_latency:1 bw_multiplier:16
+int_node:61 int_node:62 link_latency:1 bw_multiplier:16
+int_node:62 int_node:63 link_latency:1 bw_multiplier:16
+
+
+int_node:0 int_node:8 link_latency:1 bw_multiplier:16
+int_node:8 int_node:16 link_latency:1 bw_multiplier:16
+int_node:16 int_node:24 link_latency:1 bw_multiplier:16
+int_node:24 int_node:32 link_latency:1 bw_multiplier:16
+int_node:32 int_node:40 link_latency:1 bw_multiplier:16
+int_node:40 int_node:48 link_latency:1 bw_multiplier:16
+int_node:48 int_node:56 link_latency:1 bw_multiplier:16
+
+int_node:1 int_node:9 link_latency:1 bw_multiplier:16
+int_node:9 int_node:17 link_latency:1 bw_multiplier:16
+int_node:17 int_node:25 link_latency:1 bw_multiplier:16
+int_node:25 int_node:33 link_latency:1 bw_multiplier:16
+int_node:33 int_node:41 link_latency:1 bw_multiplier:16
+int_node:41 int_node:49 link_latency:1 bw_multiplier:16
+int_node:49 int_node:57 link_latency:1 bw_multiplier:16
+
+int_node:2 int_node:10 link_latency:1 bw_multiplier:16
+int_node:10 int_node:18 link_latency:1 bw_multiplier:16
+int_node:18 int_node:26 link_latency:1 bw_multiplier:16
+int_node:26 int_node:34 link_latency:1 bw_multiplier:16
+int_node:34 int_node:42 link_latency:1 bw_multiplier:16
+int_node:42 int_node:50 link_latency:1 bw_multiplier:16
+int_node:50 int_node:58 link_latency:1 bw_multiplier:16
+
+int_node:3 int_node:11 link_latency:1 bw_multiplier:16
+int_node:11 int_node:19 link_latency:1 bw_multiplier:16
+int_node:19 int_node:27 link_latency:1 bw_multiplier:16
+int_node:27 int_node:35 link_latency:1 bw_multiplier:16
+int_node:35 int_node:43 link_latency:1 bw_multiplier:16
+int_node:43 int_node:51 link_latency:1 bw_multiplier:16
+int_node:51 int_node:59 link_latency:1 bw_multiplier:16
+
+int_node:4 int_node:12 link_latency:1 bw_multiplier:16
+int_node:12 int_node:20 link_latency:1 bw_multiplier:16
+int_node:20 int_node:28 link_latency:1 bw_multiplier:16
+int_node:28 int_node:36 link_latency:1 bw_multiplier:16
+int_node:36 int_node:44 link_latency:1 bw_multiplier:16
+int_node:44 int_node:52 link_latency:1 bw_multiplier:16
+int_node:52 int_node:60 link_latency:1 bw_multiplier:16
+
+int_node:5 int_node:13 link_latency:1 bw_multiplier:16
+int_node:13 int_node:21 link_latency:1 bw_multiplier:16
+int_node:21 int_node:29 link_latency:1 bw_multiplier:16
+int_node:29 int_node:37 link_latency:1 bw_multiplier:16
+int_node:37 int_node:45 link_latency:1 bw_multiplier:16
+int_node:45 int_node:53 link_latency:1 bw_multiplier:16
+int_node:53 int_node:61 link_latency:1 bw_multiplier:16
+
+int_node:6 int_node:14 link_latency:1 bw_multiplier:16
+int_node:14 int_node:22 link_latency:1 bw_multiplier:16
+int_node:22 int_node:30 link_latency:1 bw_multiplier:16
+int_node:30 int_node:38 link_latency:1 bw_multiplier:16
+int_node:38 int_node:46 link_latency:1 bw_multiplier:16
+int_node:46 int_node:54 link_latency:1 bw_multiplier:16
+int_node:54 int_node:62 link_latency:1 bw_multiplier:16
+
+int_node:7 int_node:15 link_latency:1 bw_multiplier:16
+int_node:15 int_node:23 link_latency:1 bw_multiplier:16
+int_node:23 int_node:31 link_latency:1 bw_multiplier:16
+int_node:31 int_node:39 link_latency:1 bw_multiplier:16
+int_node:39 int_node:47 link_latency:1 bw_multiplier:16
+int_node:47 int_node:55 link_latency:1 bw_multiplier:16
+int_node:55 int_node:63 link_latency:1 bw_multiplier:16
+
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-2_ProcsPerChip-1_L2Banks-2_Memories-2.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-2_ProcsPerChip-1_L2Banks-2_Memories-2.txt
new file mode 100644
index 000000000..5f3825b1d
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-2_ProcsPerChip-1_L2Banks-2_Memories-2.txt
@@ -0,0 +1,15 @@
+
+processors:2
+procs_per_chip:1
+L2banks:2
+memories:2
+bw_unit:1000
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:0 link_latency:40 bw_multiplier:10
+int_node:0 int_node:1 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:Directory:1 int_node:1 link_latency:40 bw_multiplier:10
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-2_ProcsPerChip-2_L2Banks-2_Memories-2.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-2_ProcsPerChip-2_L2Banks-2_Memories-2.txt
new file mode 100644
index 000000000..f776eab73
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-2_ProcsPerChip-2_L2Banks-2_Memories-2.txt
@@ -0,0 +1,15 @@
+
+processors:2
+procs_per_chip:2
+L2banks:2
+memories:2
+bw_unit:1000
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:2 link_latency:40 bw_multiplier:10
+int_node:0 int_node:2 link_latency:40 bw_multiplier:16
+int_node:1 int_node:2 link_latency:40 bw_multiplier:16
+
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-32_ProcsPerChip-32_L2Banks-32_Memories-16.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-32_ProcsPerChip-32_L2Banks-32_Memories-16.txt
new file mode 100644
index 000000000..6c29eaa43
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-32_ProcsPerChip-32_L2Banks-32_Memories-16.txt
@@ -0,0 +1,148 @@
+
+processors:32
+procs_per_chip:32
+L2banks:32
+memories:16
+
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:1 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:1 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:2 int_node:8 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:2 int_node:8 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:3 int_node:9 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:3 int_node:9 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:4 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:4 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:5 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:5 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:6 int_node:10 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:6 int_node:10 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:7 int_node:11 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:7 int_node:11 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:8 int_node:16 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:8 int_node:16 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:9 int_node:17 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:9 int_node:17 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:10 int_node:24 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:10 int_node:24 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:11 int_node:25 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:11 int_node:25 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:12 int_node:18 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:12 int_node:18 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:13 int_node:19 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:13 int_node:19 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:14 int_node:26 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:14 int_node:26 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:15 int_node:27 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:15 int_node:27 link_latency:1 bw_multiplier:72
+
+ext_node:L1Cache:16 int_node:4 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:16 int_node:4 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:17 int_node:5 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:17 int_node:5 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:18 int_node:12 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:18 int_node:12 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:19 int_node:13 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:19 int_node:13 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:20 int_node:6 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:20 int_node:6 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:21 int_node:7 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:21 int_node:7 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:22 int_node:14 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:22 int_node:14 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:23 int_node:15 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:23 int_node:15 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:24 int_node:20 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:24 int_node:20 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:25 int_node:21 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:25 int_node:21 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:26 int_node:28 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:26 int_node:28 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:27 int_node:29 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:27 int_node:29 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:28 int_node:22 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:28 int_node:22 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:29 int_node:23 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:29 int_node:23 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:30 int_node:30 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:30 int_node:30 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:31 int_node:31 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:31 int_node:31 link_latency:1 bw_multiplier:72
+
+
+int_node:0 int_node:1 link_latency:2 bw_multiplier:16
+int_node:1 int_node:2 link_latency:2 bw_multiplier:16
+int_node:2 int_node:3 link_latency:2 bw_multiplier:16
+int_node:8 int_node:9 link_latency:2 bw_multiplier:16
+int_node:9 int_node:10 link_latency:2 bw_multiplier:16
+int_node:10 int_node:11 link_latency:2 bw_multiplier:16
+int_node:16 int_node:17 link_latency:2 bw_multiplier:16
+int_node:17 int_node:18 link_latency:2 bw_multiplier:16
+int_node:18 int_node:19 link_latency:2 bw_multiplier:16
+int_node:24 int_node:25 link_latency:2 bw_multiplier:16
+int_node:25 int_node:26 link_latency:2 bw_multiplier:16
+int_node:26 int_node:27 link_latency:2 bw_multiplier:16
+int_node:0 int_node:8 link_latency:2 bw_multiplier:16
+int_node:8 int_node:16 link_latency:2 bw_multiplier:16
+int_node:16 int_node:24 link_latency:2 bw_multiplier:16
+int_node:1 int_node:9 link_latency:2 bw_multiplier:16
+int_node:9 int_node:17 link_latency:2 bw_multiplier:16
+int_node:17 int_node:25 link_latency:2 bw_multiplier:16
+int_node:2 int_node:10 link_latency:2 bw_multiplier:16
+int_node:10 int_node:18 link_latency:2 bw_multiplier:16
+int_node:18 int_node:26 link_latency:2 bw_multiplier:16
+int_node:3 int_node:11 link_latency:2 bw_multiplier:16
+int_node:11 int_node:19 link_latency:2 bw_multiplier:16
+int_node:19 int_node:27 link_latency:2 bw_multiplier:16
+
+int_node:4 int_node:5 link_latency:2 bw_multiplier:16
+int_node:5 int_node:6 link_latency:2 bw_multiplier:16
+int_node:6 int_node:7 link_latency:2 bw_multiplier:16
+int_node:12 int_node:13 link_latency:2 bw_multiplier:16
+int_node:13 int_node:14 link_latency:2 bw_multiplier:16
+int_node:14 int_node:15 link_latency:2 bw_multiplier:16
+int_node:20 int_node:21 link_latency:2 bw_multiplier:16
+int_node:21 int_node:22 link_latency:2 bw_multiplier:16
+int_node:22 int_node:23 link_latency:2 bw_multiplier:16
+int_node:28 int_node:29 link_latency:2 bw_multiplier:16
+int_node:29 int_node:30 link_latency:2 bw_multiplier:16
+int_node:30 int_node:31 link_latency:2 bw_multiplier:16
+int_node:4 int_node:12 link_latency:2 bw_multiplier:16
+int_node:12 int_node:20 link_latency:2 bw_multiplier:16
+int_node:20 int_node:28 link_latency:2 bw_multiplier:16
+int_node:5 int_node:13 link_latency:2 bw_multiplier:16
+int_node:13 int_node:21 link_latency:2 bw_multiplier:16
+int_node:21 int_node:29 link_latency:2 bw_multiplier:16
+int_node:6 int_node:14 link_latency:2 bw_multiplier:16
+int_node:14 int_node:22 link_latency:2 bw_multiplier:16
+int_node:22 int_node:30 link_latency:2 bw_multiplier:16
+int_node:7 int_node:15 link_latency:2 bw_multiplier:16
+int_node:15 int_node:23 link_latency:2 bw_multiplier:16
+int_node:23 int_node:31 link_latency:2 bw_multiplier:16
+
+int_node:3 int_node:4 link_latency:2 bw_multiplier:16
+int_node:11 int_node:12 link_latency:2 bw_multiplier:16
+int_node:19 int_node:20 link_latency:2 bw_multiplier:16
+int_node:27 int_node:28 link_latency:2 bw_multiplier:16
+
+
+ext_node:Directory:0 int_node:0 link_latency:20 bw_multiplier:80
+ext_node:Directory:4 int_node:1 link_latency:20 bw_multiplier:80
+ext_node:Directory:8 int_node:2 link_latency:20 bw_multiplier:80
+ext_node:Directory:12 int_node:3 link_latency:20 bw_multiplier:80
+ext_node:Directory:1 int_node:4 link_latency:20 bw_multiplier:80
+ext_node:Directory:5 int_node:5 link_latency:20 bw_multiplier:80
+ext_node:Directory:9 int_node:6 link_latency:20 bw_multiplier:80
+ext_node:Directory:13 int_node:7 link_latency:20 bw_multiplier:80
+
+ext_node:Directory:2 int_node:24 link_latency:20 bw_multiplier:80
+ext_node:Directory:6 int_node:25 link_latency:20 bw_multiplier:80
+ext_node:Directory:10 int_node:26 link_latency:20 bw_multiplier:80
+ext_node:Directory:14 int_node:27 link_latency:20 bw_multiplier:80
+ext_node:Directory:3 int_node:28 link_latency:20 bw_multiplier:80
+ext_node:Directory:7 int_node:29 link_latency:20 bw_multiplier:80
+ext_node:Directory:11 int_node:30 link_latency:20 bw_multiplier:80
+ext_node:Directory:15 int_node:31 link_latency:20 bw_multiplier:80
+
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-32_ProcsPerChip-32_L2Banks-32_Memories-4.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-32_ProcsPerChip-32_L2Banks-32_Memories-4.txt
new file mode 100644
index 000000000..f9eb088b7
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-32_ProcsPerChip-32_L2Banks-32_Memories-4.txt
@@ -0,0 +1,126 @@
+
+processors:32
+procs_per_chip:32
+L2banks:32
+memories:4
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:1 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:2 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:3 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:4 int_node:4 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:5 int_node:5 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:6 int_node:6 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:7 int_node:7 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:8 int_node:8 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:9 int_node:9 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:10 int_node:10 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:11 int_node:11 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:12 int_node:12 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:13 int_node:13 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:14 int_node:14 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:15 int_node:15 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:16 int_node:16 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:17 int_node:17 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:18 int_node:18 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:19 int_node:19 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:20 int_node:20 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:21 int_node:21 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:22 int_node:22 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:23 int_node:23 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:24 int_node:24 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:25 int_node:25 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:26 int_node:26 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:27 int_node:27 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:28 int_node:28 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:29 int_node:29 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:30 int_node:30 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:31 int_node:31 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:1 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:2 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:3 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:4 int_node:4 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:5 int_node:5 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:6 int_node:6 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:7 int_node:7 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:8 int_node:8 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:9 int_node:9 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:10 int_node:10 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:11 int_node:11 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:12 int_node:12 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:13 int_node:13 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:14 int_node:14 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:15 int_node:15 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:16 int_node:16 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:17 int_node:17 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:18 int_node:18 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:19 int_node:19 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:20 int_node:20 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:21 int_node:21 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:22 int_node:22 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:23 int_node:23 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:24 int_node:24 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:25 int_node:25 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:26 int_node:26 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:27 int_node:27 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:28 int_node:28 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:29 int_node:29 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:30 int_node:30 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:31 int_node:31 link_latency:1 bw_multiplier:72
+ext_node:Directory:0 int_node:0 link_latency:1 bw_multiplier:80
+ext_node:Directory:1 int_node:7 link_latency:1 bw_multiplier:80
+ext_node:Directory:2 int_node:24 link_latency:1 bw_multiplier:80
+ext_node:Directory:3 int_node:31 link_latency:1 bw_multiplier:80
+int_node:0 int_node:1 link_latency:1 bw_multiplier:72
+int_node:1 int_node:2 link_latency:1 bw_multiplier:72
+int_node:2 int_node:3 link_latency:1 bw_multiplier:72
+int_node:3 int_node:4 link_latency:1 bw_multiplier:72
+int_node:4 int_node:5 link_latency:1 bw_multiplier:72
+int_node:5 int_node:6 link_latency:1 bw_multiplier:72
+int_node:6 int_node:7 link_latency:1 bw_multiplier:72
+int_node:8 int_node:9 link_latency:1 bw_multiplier:72
+int_node:9 int_node:10 link_latency:1 bw_multiplier:72
+int_node:10 int_node:11 link_latency:1 bw_multiplier:72
+int_node:11 int_node:12 link_latency:1 bw_multiplier:72
+int_node:12 int_node:13 link_latency:1 bw_multiplier:72
+int_node:13 int_node:14 link_latency:1 bw_multiplier:72
+int_node:14 int_node:15 link_latency:1 bw_multiplier:72
+int_node:16 int_node:17 link_latency:1 bw_multiplier:72
+int_node:17 int_node:18 link_latency:1 bw_multiplier:72
+int_node:18 int_node:19 link_latency:1 bw_multiplier:72
+int_node:19 int_node:20 link_latency:1 bw_multiplier:72
+int_node:20 int_node:21 link_latency:1 bw_multiplier:72
+int_node:21 int_node:22 link_latency:1 bw_multiplier:72
+int_node:22 int_node:23 link_latency:1 bw_multiplier:72
+int_node:24 int_node:25 link_latency:1 bw_multiplier:72
+int_node:25 int_node:26 link_latency:1 bw_multiplier:72
+int_node:26 int_node:27 link_latency:1 bw_multiplier:72
+int_node:27 int_node:28 link_latency:1 bw_multiplier:72
+int_node:28 int_node:29 link_latency:1 bw_multiplier:72
+int_node:29 int_node:30 link_latency:1 bw_multiplier:72
+int_node:30 int_node:31 link_latency:1 bw_multiplier:72
+int_node:0 int_node:8 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:1 int_node:9 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:2 int_node:10 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:3 int_node:11 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:4 int_node:12 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:5 int_node:13 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:6 int_node:14 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:7 int_node:15 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:8 int_node:16 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:9 int_node:17 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:10 int_node:18 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:11 int_node:19 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:12 int_node:20 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:13 int_node:21 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:14 int_node:22 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:15 int_node:23 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:16 int_node:24 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:17 int_node:25 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:18 int_node:26 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:19 int_node:27 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:20 int_node:28 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:21 int_node:29 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:22 int_node:30 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:23 int_node:31 link_latency:1 bw_multiplier:72 link_weight:2
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-4_ProcsPerChip-1_L2Banks-4_Memories-4.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-4_ProcsPerChip-1_L2Banks-4_Memories-4.txt
new file mode 100644
index 000000000..ffbe7a7ff
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-4_ProcsPerChip-1_L2Banks-4_Memories-4.txt
@@ -0,0 +1,28 @@
+
+processors:4
+procs_per_chip:1
+L2banks:4
+memories:4
+bw_unit:10000
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:0 link_latency:40 bw_multiplier:10
+int_node:0 int_node:1 link_latency:40 bw_multiplier:16
+int_node:0 int_node:2 link_latency:40 bw_multiplier:16
+int_node:0 int_node:3 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:Directory:1 int_node:1 link_latency:40 bw_multiplier:10
+int_node:1 int_node:2 link_latency:40 bw_multiplier:16
+int_node:1 int_node:3 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:2 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:2 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:Directory:2 int_node:2 link_latency:40 bw_multiplier:10
+int_node:2 int_node:3 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:3 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:3 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:Directory:3 int_node:3 link_latency:40 bw_multiplier:10
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-4_ProcsPerChip-4_L2Banks-4_Memories-4.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-4_ProcsPerChip-4_L2Banks-4_Memories-4.txt
new file mode 100644
index 000000000..2cce39750
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-4_ProcsPerChip-4_L2Banks-4_Memories-4.txt
@@ -0,0 +1,24 @@
+
+processors:4
+procs_per_chip:4
+L2banks:4
+memories:4
+bw_unit:10000
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:2 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:3 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:2 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:3 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:0 link_latency:1 bw_multiplier:10
+ext_node:Directory:1 int_node:1 link_latency:1 bw_multiplier:10
+ext_node:Directory:2 int_node:2 link_latency:1 bw_multiplier:10
+ext_node:Directory:3 int_node:3 link_latency:1 bw_multiplier:10
+int_node:0 int_node:1 link_latency:1 bw_multiplier:16 link_weight:1
+int_node:0 int_node:2 link_latency:1 bw_multiplier:16 link_weight:2
+int_node:2 int_node:3 link_latency:1 bw_multiplier:16 link_weight:1
+int_node:1 int_node:3 link_latency:1 bw_multiplier:16 link_weight:2
+
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-7_ProcsPerChip-7_L2Banks-7_Memories-7.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-7_ProcsPerChip-7_L2Banks-7_Memories-7.txt
new file mode 100644
index 000000000..e3d6b0fc3
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-7_ProcsPerChip-7_L2Banks-7_Memories-7.txt
@@ -0,0 +1,139 @@
+
+processors:7
+procs_per_chip:7
+L2banks:7
+memories:7
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:1 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:2 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:3 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:4 int_node:4 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:5 int_node:5 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:6 int_node:6 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:7 int_node:7 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:8 int_node:8 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:9 int_node:9 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:10 int_node:10 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:11 int_node:11 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:12 int_node:12 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:13 int_node:13 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:14 int_node:14 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:15 int_node:15 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:0 int_node:16 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:1 int_node:17 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:2 int_node:18 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:3 int_node:19 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:4 int_node:20 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:5 int_node:21 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:6 int_node:22 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:7 int_node:23 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:8 int_node:24 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:9 int_node:25 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:10 int_node:26 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:11 int_node:27 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:12 int_node:28 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:13 int_node:29 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:14 int_node:30 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:15 int_node:31 link_latency:1 bw_multiplier:72
+ext_node:Directory:0 int_node:32 link_latency:1 bw_multiplier:80
+ext_node:Directory:1 int_node:33 link_latency:1 bw_multiplier:80
+ext_node:Directory:2 int_node:34 link_latency:1 bw_multiplier:80
+ext_node:Directory:3 int_node:35 link_latency:1 bw_multiplier:80
+ext_node:Directory:4 int_node:36 link_latency:1 bw_multiplier:80
+ext_node:Directory:5 int_node:37 link_latency:1 bw_multiplier:80
+ext_node:Directory:6 int_node:38 link_latency:1 bw_multiplier:80
+ext_node:Directory:7 int_node:39 link_latency:1 bw_multiplier:80
+ext_node:Directory:8 int_node:40 link_latency:1 bw_multiplier:80
+ext_node:Directory:9 int_node:41 link_latency:1 bw_multiplier:80
+ext_node:Directory:10 int_node:42 link_latency:1 bw_multiplier:80
+ext_node:Directory:11 int_node:43 link_latency:1 bw_multiplier:80
+ext_node:Directory:12 int_node:44 link_latency:1 bw_multiplier:80
+ext_node:Directory:13 int_node:45 link_latency:1 bw_multiplier:80
+ext_node:Directory:14 int_node:46 link_latency:1 bw_multiplier:80
+ext_node:Directory:15 int_node:47 link_latency:1 bw_multiplier:80
+ext_node:Directory:16 int_node:48 link_latency:1 bw_multiplier:80
+int_node:0 int_node:1 link_latency:1 bw_multiplier:72
+int_node:1 int_node:2 link_latency:1 bw_multiplier:72
+int_node:2 int_node:3 link_latency:1 bw_multiplier:72
+int_node:3 int_node:4 link_latency:1 bw_multiplier:72
+int_node:4 int_node:5 link_latency:1 bw_multiplier:72
+int_node:5 int_node:6 link_latency:1 bw_multiplier:72
+int_node:7 int_node:8 link_latency:1 bw_multiplier:72
+int_node:8 int_node:9 link_latency:1 bw_multiplier:72
+int_node:9 int_node:10 link_latency:1 bw_multiplier:72
+int_node:10 int_node:11 link_latency:1 bw_multiplier:72
+int_node:11 int_node:12 link_latency:1 bw_multiplier:72
+int_node:12 int_node:13 link_latency:1 bw_multiplier:72
+int_node:14 int_node:15 link_latency:1 bw_multiplier:72
+int_node:15 int_node:16 link_latency:1 bw_multiplier:72
+int_node:16 int_node:17 link_latency:1 bw_multiplier:72
+int_node:17 int_node:18 link_latency:1 bw_multiplier:72
+int_node:18 int_node:19 link_latency:1 bw_multiplier:72
+int_node:19 int_node:20 link_latency:1 bw_multiplier:72
+int_node:21 int_node:22 link_latency:1 bw_multiplier:72
+int_node:22 int_node:23 link_latency:1 bw_multiplier:72
+int_node:23 int_node:24 link_latency:1 bw_multiplier:72
+int_node:24 int_node:25 link_latency:1 bw_multiplier:72
+int_node:25 int_node:26 link_latency:1 bw_multiplier:72
+int_node:26 int_node:27 link_latency:1 bw_multiplier:72
+int_node:28 int_node:29 link_latency:1 bw_multiplier:72
+int_node:29 int_node:30 link_latency:1 bw_multiplier:72
+int_node:30 int_node:31 link_latency:1 bw_multiplier:72
+int_node:31 int_node:32 link_latency:1 bw_multiplier:72
+int_node:32 int_node:33 link_latency:1 bw_multiplier:72
+int_node:33 int_node:34 link_latency:1 bw_multiplier:72
+int_node:35 int_node:36 link_latency:1 bw_multiplier:72
+int_node:36 int_node:37 link_latency:1 bw_multiplier:72
+int_node:37 int_node:38 link_latency:1 bw_multiplier:72
+int_node:38 int_node:39 link_latency:1 bw_multiplier:72
+int_node:39 int_node:40 link_latency:1 bw_multiplier:72
+int_node:40 int_node:41 link_latency:1 bw_multiplier:72
+int_node:42 int_node:43 link_latency:1 bw_multiplier:72
+int_node:43 int_node:44 link_latency:1 bw_multiplier:72
+int_node:44 int_node:45 link_latency:1 bw_multiplier:72
+int_node:45 int_node:46 link_latency:1 bw_multiplier:72
+int_node:46 int_node:47 link_latency:1 bw_multiplier:72
+int_node:47 int_node:48 link_latency:1 bw_multiplier:72
+int_node:0 int_node:7 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:1 int_node:8 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:2 int_node:9 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:3 int_node:10 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:4 int_node:11 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:5 int_node:12 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:6 int_node:13 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:7 int_node:14 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:8 int_node:15 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:9 int_node:16 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:10 int_node:17 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:11 int_node:18 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:12 int_node:19 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:13 int_node:20 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:14 int_node:21 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:15 int_node:22 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:16 int_node:23 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:17 int_node:24 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:18 int_node:25 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:19 int_node:26 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:20 int_node:27 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:21 int_node:28 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:22 int_node:29 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:23 int_node:30 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:24 int_node:31 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:25 int_node:32 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:26 int_node:33 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:27 int_node:34 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:28 int_node:35 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:29 int_node:36 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:30 int_node:37 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:31 int_node:38 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:32 int_node:39 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:33 int_node:40 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:34 int_node:41 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:35 int_node:42 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:36 int_node:43 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:37 int_node:44 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:38 int_node:45 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:39 int_node:46 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:40 int_node:47 link_latency:1 bw_multiplier:72 link_weight:2
+int_node:41 int_node:48 link_latency:1 bw_multiplier:72 link_weight:2
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-8_ProcsPerChip-1_L2Banks-8_Memories-8.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-8_ProcsPerChip-1_L2Banks-8_Memories-8.txt
new file mode 100644
index 000000000..3603077c0
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-8_ProcsPerChip-1_L2Banks-8_Memories-8.txt
@@ -0,0 +1,66 @@
+
+processors:8
+procs_per_chip:1
+L2banks:8
+memories:8
+bw_unit:1000
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:0 link_latency:40 bw_multiplier:10
+int_node:0 int_node:1 link_latency:40 bw_multiplier:16
+int_node:0 int_node:2 link_latency:40 bw_multiplier:16
+int_node:0 int_node:3 link_latency:40 bw_multiplier:16
+int_node:0 int_node:4 link_latency:40 bw_multiplier:16
+int_node:0 int_node:5 link_latency:40 bw_multiplier:16
+int_node:0 int_node:6 link_latency:40 bw_multiplier:16
+int_node:0 int_node:7 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:Directory:1 int_node:1 link_latency:40 bw_multiplier:10
+int_node:1 int_node:2 link_latency:40 bw_multiplier:16
+int_node:1 int_node:3 link_latency:40 bw_multiplier:16
+int_node:1 int_node:4 link_latency:40 bw_multiplier:16
+int_node:1 int_node:5 link_latency:40 bw_multiplier:16
+int_node:1 int_node:6 link_latency:40 bw_multiplier:16
+int_node:1 int_node:7 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:2 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:2 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:Directory:2 int_node:2 link_latency:40 bw_multiplier:10
+int_node:2 int_node:3 link_latency:40 bw_multiplier:16
+int_node:2 int_node:4 link_latency:40 bw_multiplier:16
+int_node:2 int_node:5 link_latency:40 bw_multiplier:16
+int_node:2 int_node:6 link_latency:40 bw_multiplier:16
+int_node:2 int_node:7 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:3 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:3 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:Directory:3 int_node:3 link_latency:40 bw_multiplier:10
+int_node:3 int_node:4 link_latency:40 bw_multiplier:16
+int_node:3 int_node:5 link_latency:40 bw_multiplier:16
+int_node:3 int_node:6 link_latency:40 bw_multiplier:16
+int_node:3 int_node:7 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:4 int_node:4 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:4 int_node:4 link_latency:1 bw_multiplier:64
+ext_node:Directory:4 int_node:4 link_latency:40 bw_multiplier:10
+int_node:4 int_node:5 link_latency:40 bw_multiplier:16
+int_node:4 int_node:6 link_latency:40 bw_multiplier:16
+int_node:4 int_node:7 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:5 int_node:5 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:5 int_node:5 link_latency:1 bw_multiplier:64
+ext_node:Directory:5 int_node:5 link_latency:40 bw_multiplier:10
+int_node:5 int_node:6 link_latency:40 bw_multiplier:16
+int_node:5 int_node:7 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:6 int_node:6 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:6 int_node:6 link_latency:1 bw_multiplier:64
+ext_node:Directory:6 int_node:6 link_latency:40 bw_multiplier:10
+int_node:6 int_node:7 link_latency:40 bw_multiplier:16
+
+ext_node:L1Cache:7 int_node:7 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:7 int_node:7 link_latency:1 bw_multiplier:64
+ext_node:Directory:7 int_node:7 link_latency:40 bw_multiplier:10
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-8_ProcsPerChip-4_L2Banks-8_Memories-8.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-8_ProcsPerChip-4_L2Banks-8_Memories-8.txt
new file mode 100644
index 000000000..bdcb02297
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-8_ProcsPerChip-4_L2Banks-8_Memories-8.txt
@@ -0,0 +1,46 @@
+
+processors:8
+procs_per_chip:4
+L2banks:8
+memories:8
+bw_unit:1000
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:2 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:3 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:1 int_node:1 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:2 int_node:2 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:3 int_node:3 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:6 link_latency:20 bw_multiplier:10
+ext_node:Directory:1 int_node:6 link_latency:20 bw_multiplier:10
+ext_node:Directory:2 int_node:6 link_latency:20 bw_multiplier:10
+ext_node:Directory:3 int_node:6 link_latency:20 bw_multiplier:10
+int_node:0 int_node:1 link_latency:1 bw_multiplier:16
+int_node:1 int_node:4 link_latency:1 bw_multiplier:16
+int_node:4 int_node:3 link_latency:1 bw_multiplier:16
+int_node:3 int_node:2 link_latency:1 bw_multiplier:16
+int_node:5 int_node:4 link_latency:1 bw_multiplier:16
+int_node:5 int_node:6 link_latency:20 bw_multiplier:10
+
+int_node:5 int_node:12 link_latency:20 bw_multiplier:10
+
+ext_node:L1Cache:4 int_node:7 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:5 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:6 int_node:9 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:7 int_node:10 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:4 int_node:7 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:5 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:6 int_node:9 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:7 int_node:10 link_latency:1 bw_multiplier:64
+ext_node:Directory:4 int_node:13 link_latency:20 bw_multiplier:10
+ext_node:Directory:5 int_node:13 link_latency:20 bw_multiplier:10
+ext_node:Directory:6 int_node:13 link_latency:20 bw_multiplier:10
+ext_node:Directory:7 int_node:13 link_latency:20 bw_multiplier:10
+int_node:7 int_node:8 link_latency:1 bw_multiplier:16
+int_node:8 int_node:11 link_latency:1 bw_multiplier:16
+int_node:11 int_node:10 link_latency:1 bw_multiplier:16
+int_node:10 int_node:9 link_latency:1 bw_multiplier:16
+int_node:12 int_node:11 link_latency:1 bw_multiplier:16
+int_node:12 int_node:13 link_latency:20 bw_multiplier:10
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-8_ProcsPerChip-8_L2Banks-256_Memories-8.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-8_ProcsPerChip-8_L2Banks-256_Memories-8.txt
new file mode 100644
index 000000000..ecf52ab8f
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-8_ProcsPerChip-8_L2Banks-256_Memories-8.txt
@@ -0,0 +1,412 @@
+
+processors:8
+procs_per_chip:8
+L2banks:256
+memories:8
+
+ext_node:L1Cache:0 int_node:1 link_weight:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:1 int_node:5 link_weight:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:2 int_node:15 link_weight:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:3 int_node:47 link_weight:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:4 int_node:62 link_weight:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:5 int_node:58 link_weight:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:6 int_node:48 link_weight:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:7 int_node:16 link_weight:1 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:0 int_node:0 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:1 int_node:0 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:2 int_node:1 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:3 int_node:1 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:4 int_node:2 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:5 int_node:2 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:128 int_node:3 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:144 int_node:3 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:16 int_node:4 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:17 int_node:4 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:18 int_node:5 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:19 int_node:5 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:20 int_node:6 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:21 int_node:6 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:32 int_node:7 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:33 int_node:7 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:6 int_node:0 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:7 int_node:0 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:8 int_node:1 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:9 int_node:1 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:10 int_node:2 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:11 int_node:2 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:129 int_node:3 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:145 int_node:3 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:22 int_node:4 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:23 int_node:4 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:24 int_node:5 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:25 int_node:5 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:26 int_node:6 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:27 int_node:6 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:34 int_node:7 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:35 int_node:7 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:112 int_node:8 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:113 int_node:8 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:12 int_node:9 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:13 int_node:9 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:130 int_node:10 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:131 int_node:10 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:132 int_node:11 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:146 int_node:11 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:147 int_node:12 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:148 int_node:12 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:28 int_node:13 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:29 int_node:13 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:36 int_node:14 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:37 int_node:14 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:38 int_node:15 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:39 int_node:15 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:114 int_node:8 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:115 int_node:8 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:14 int_node:9 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:15 int_node:9 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:133 int_node:10 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:134 int_node:10 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:135 int_node:11 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:149 int_node:11 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:150 int_node:12 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:151 int_node:12 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:30 int_node:13 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:31 int_node:13 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:40 int_node:14 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:41 int_node:14 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:42 int_node:15 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:43 int_node:15 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:116 int_node:16 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:117 int_node:16 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:118 int_node:17 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:119 int_node:17 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:136 int_node:18 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:137 int_node:18 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:138 int_node:19 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:152 int_node:19 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:153 int_node:20 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:154 int_node:20 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:160 int_node:21 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:161 int_node:21 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:162 int_node:22 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:163 int_node:22 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:44 int_node:23 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:45 int_node:23 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:120 int_node:16 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:121 int_node:16 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:122 int_node:17 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:123 int_node:17 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:139 int_node:18 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:140 int_node:18 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:141 int_node:19 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:155 int_node:19 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:156 int_node:20 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:157 int_node:20 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:164 int_node:21 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:165 int_node:21 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:166 int_node:22 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:167 int_node:22 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:46 int_node:23 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:47 int_node:23 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:124 int_node:24 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:125 int_node:24 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:240 int_node:25 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:241 int_node:25 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:242 int_node:26 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:243 int_node:26 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:142 int_node:27 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:158 int_node:27 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:168 int_node:28 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:169 int_node:28 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:170 int_node:29 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:171 int_node:29 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:172 int_node:30 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:173 int_node:30 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:174 int_node:31 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:175 int_node:31 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:126 int_node:24 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:127 int_node:24 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:244 int_node:25 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:245 int_node:25 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:246 int_node:26 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:247 int_node:26 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:143 int_node:27 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:159 int_node:27 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:176 int_node:28 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:177 int_node:28 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:178 int_node:29 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:179 int_node:29 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:180 int_node:30 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:181 int_node:30 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:182 int_node:31 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:183 int_node:31 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:248 int_node:32 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:249 int_node:32 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:250 int_node:33 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:251 int_node:33 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:252 int_node:34 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:253 int_node:34 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:254 int_node:35 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:255 int_node:35 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:208 int_node:36 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:192 int_node:36 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:184 int_node:37 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:185 int_node:37 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:186 int_node:38 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:187 int_node:38 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:48 int_node:39 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:49 int_node:39 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:224 int_node:32 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:225 int_node:32 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:226 int_node:33 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:227 int_node:33 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:228 int_node:34 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:229 int_node:34 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:230 int_node:35 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:231 int_node:35 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:209 int_node:36 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:193 int_node:36 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:188 int_node:37 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:189 int_node:37 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:190 int_node:38 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:191 int_node:38 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:50 int_node:39 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:51 int_node:39 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:96 int_node:40 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:97 int_node:40 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:232 int_node:41 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:233 int_node:41 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:234 int_node:42 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:235 int_node:42 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:210 int_node:43 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:211 int_node:43 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:212 int_node:44 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:194 int_node:44 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:195 int_node:45 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:196 int_node:45 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:52 int_node:46 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:53 int_node:46 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:54 int_node:47 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:55 int_node:47 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:98 int_node:40 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:99 int_node:40 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:236 int_node:41 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:237 int_node:41 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:238 int_node:42 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:239 int_node:42 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:213 int_node:43 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:214 int_node:43 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:215 int_node:44 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:197 int_node:44 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:198 int_node:45 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:199 int_node:45 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:56 int_node:46 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:57 int_node:46 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:58 int_node:47 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:59 int_node:47 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:100 int_node:48 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:101 int_node:48 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:102 int_node:49 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:103 int_node:49 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:80 int_node:50 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:81 int_node:50 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:216 int_node:51 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:217 int_node:51 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:218 int_node:52 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:200 int_node:52 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:201 int_node:53 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:202 int_node:53 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:64 int_node:54 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:65 int_node:54 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:60 int_node:55 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:61 int_node:55 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:104 int_node:48 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:105 int_node:48 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:106 int_node:49 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:107 int_node:49 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:82 int_node:50 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:83 int_node:50 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:219 int_node:51 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:220 int_node:51 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:221 int_node:52 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:203 int_node:52 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:204 int_node:53 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:205 int_node:53 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:66 int_node:54 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:67 int_node:54 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:62 int_node:55 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:63 int_node:55 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:108 int_node:56 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:109 int_node:56 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:84 int_node:57 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:85 int_node:57 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:86 int_node:58 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:87 int_node:58 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:88 int_node:59 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:89 int_node:59 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:222 int_node:60 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:206 int_node:60 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:68 int_node:61 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:69 int_node:61 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:70 int_node:62 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:71 int_node:62 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:72 int_node:63 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:73 int_node:63 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:110 int_node:56 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:111 int_node:56 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:90 int_node:57 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:91 int_node:57 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:92 int_node:58 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:93 int_node:58 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:94 int_node:59 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:95 int_node:59 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:223 int_node:60 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:207 int_node:60 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:74 int_node:61 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:75 int_node:61 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:76 int_node:62 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:77 int_node:62 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:78 int_node:63 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:79 int_node:63 link_weight:1 link_latency:1 bw_multiplier:16
+int_node:65 int_node:64 link_weight:1000 link_latency:20 bw_multiplier:32
+ext_node:Directory:0 int_node:64 link_weight:1000 link_latency:20 bw_multiplier:32
+ext_node:Directory:1 int_node:64 link_weight:1000 link_latency:20 bw_multiplier:32
+ext_node:Directory:2 int_node:64 link_weight:1000 link_latency:20 bw_multiplier:32
+ext_node:Directory:3 int_node:64 link_weight:1000 link_latency:20 bw_multiplier:32
+ext_node:Directory:4 int_node:64 link_weight:1000 link_latency:20 bw_multiplier:32
+ext_node:Directory:5 int_node:64 link_weight:1000 link_latency:20 bw_multiplier:32
+ext_node:Directory:6 int_node:64 link_weight:1000 link_latency:20 bw_multiplier:32
+ext_node:Directory:7 int_node:64 link_weight:1000 link_latency:20 bw_multiplier:32
+int_node:27 int_node:65 link_weight:1000 link_latency:1 bw_multiplier:64
+int_node:28 int_node:65 link_weight:1000 link_latency:1 bw_multiplier:64
+int_node:35 int_node:65 link_weight:1000 link_latency:1 bw_multiplier:64
+int_node:36 int_node:65 link_weight:1000 link_latency:1 bw_multiplier:64
+
+int_node:0 int_node:1 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:1 int_node:2 link_weight:2 link_latency:1 bw_multiplier:32
+int_node:2 int_node:3 link_weight:3 link_latency:1 bw_multiplier:32
+int_node:3 int_node:4 link_weight:4 link_latency:1 bw_multiplier:32
+int_node:4 int_node:5 link_weight:5 link_latency:1 bw_multiplier:32
+int_node:5 int_node:6 link_weight:6 link_latency:1 bw_multiplier:32
+int_node:6 int_node:7 link_weight:7 link_latency:1 bw_multiplier:32
+
+int_node:8 int_node:9 link_weight:8 link_latency:1 bw_multiplier:32
+int_node:9 int_node:10 link_weight:9 link_latency:1 bw_multiplier:32
+int_node:10 int_node:11 link_weight:10 link_latency:1 bw_multiplier:32
+int_node:11 int_node:12 link_weight:11 link_latency:1 bw_multiplier:32
+int_node:12 int_node:13 link_weight:12 link_latency:1 bw_multiplier:32
+int_node:13 int_node:14 link_weight:13 link_latency:1 bw_multiplier:32
+int_node:14 int_node:15 link_weight:14 link_latency:1 bw_multiplier:32
+
+int_node:16 int_node:17 link_weight:15 link_latency:1 bw_multiplier:32
+int_node:17 int_node:18 link_weight:16 link_latency:1 bw_multiplier:32
+int_node:18 int_node:19 link_weight:17 link_latency:1 bw_multiplier:32
+int_node:19 int_node:20 link_weight:18 link_latency:1 bw_multiplier:32
+int_node:20 int_node:21 link_weight:19 link_latency:1 bw_multiplier:32
+int_node:21 int_node:22 link_weight:20 link_latency:1 bw_multiplier:32
+int_node:22 int_node:23 link_weight:21 link_latency:1 bw_multiplier:32
+
+int_node:24 int_node:25 link_weight:22 link_latency:1 bw_multiplier:32
+int_node:25 int_node:26 link_weight:23 link_latency:1 bw_multiplier:32
+int_node:26 int_node:27 link_weight:24 link_latency:1 bw_multiplier:32
+int_node:27 int_node:28 link_weight:25 link_latency:1 bw_multiplier:32
+int_node:28 int_node:29 link_weight:26 link_latency:1 bw_multiplier:32
+int_node:29 int_node:30 link_weight:27 link_latency:1 bw_multiplier:32
+int_node:30 int_node:31 link_weight:28 link_latency:1 bw_multiplier:32
+
+int_node:32 int_node:33 link_weight:29 link_latency:1 bw_multiplier:32
+int_node:33 int_node:34 link_weight:30 link_latency:1 bw_multiplier:32
+int_node:34 int_node:35 link_weight:31 link_latency:1 bw_multiplier:32
+int_node:35 int_node:36 link_weight:32 link_latency:1 bw_multiplier:32
+int_node:36 int_node:37 link_weight:33 link_latency:1 bw_multiplier:32
+int_node:37 int_node:38 link_weight:34 link_latency:1 bw_multiplier:32
+int_node:38 int_node:39 link_weight:35 link_latency:1 bw_multiplier:32
+
+int_node:40 int_node:41 link_weight:36 link_latency:1 bw_multiplier:32
+int_node:41 int_node:42 link_weight:37 link_latency:1 bw_multiplier:32
+int_node:42 int_node:43 link_weight:38 link_latency:1 bw_multiplier:32
+int_node:43 int_node:44 link_weight:39 link_latency:1 bw_multiplier:32
+int_node:44 int_node:45 link_weight:40 link_latency:1 bw_multiplier:32
+int_node:45 int_node:46 link_weight:41 link_latency:1 bw_multiplier:32
+int_node:46 int_node:47 link_weight:42 link_latency:1 bw_multiplier:32
+
+int_node:48 int_node:49 link_weight:43 link_latency:1 bw_multiplier:32
+int_node:49 int_node:50 link_weight:44 link_latency:1 bw_multiplier:32
+int_node:50 int_node:51 link_weight:45 link_latency:1 bw_multiplier:32
+int_node:51 int_node:52 link_weight:46 link_latency:1 bw_multiplier:32
+int_node:52 int_node:53 link_weight:47 link_latency:1 bw_multiplier:32
+int_node:53 int_node:54 link_weight:48 link_latency:1 bw_multiplier:32
+int_node:54 int_node:55 link_weight:49 link_latency:1 bw_multiplier:32
+
+int_node:56 int_node:57 link_weight:50 link_latency:1 bw_multiplier:32
+int_node:57 int_node:58 link_weight:51 link_latency:1 bw_multiplier:32
+int_node:58 int_node:59 link_weight:52 link_latency:1 bw_multiplier:32
+int_node:59 int_node:60 link_weight:53 link_latency:1 bw_multiplier:32
+int_node:60 int_node:61 link_weight:54 link_latency:1 bw_multiplier:32
+int_node:61 int_node:62 link_weight:55 link_latency:1 bw_multiplier:32
+int_node:62 int_node:63 link_weight:56 link_latency:1 bw_multiplier:32
+
+
+int_node:0 int_node:8 link_weight:57 link_latency:1 bw_multiplier:32
+int_node:1 int_node:9 link_weight:58 link_latency:1 bw_multiplier:32
+int_node:2 int_node:10 link_weight:59 link_latency:1 bw_multiplier:32
+int_node:3 int_node:11 link_weight:60 link_latency:1 bw_multiplier:32
+int_node:4 int_node:12 link_weight:61 link_latency:1 bw_multiplier:32
+int_node:5 int_node:13 link_weight:62 link_latency:1 bw_multiplier:32
+int_node:6 int_node:14 link_weight:63 link_latency:1 bw_multiplier:32
+int_node:7 int_node:15 link_weight:64 link_latency:1 bw_multiplier:32
+
+int_node:8 int_node:16 link_weight:65 link_latency:1 bw_multiplier:32
+int_node:9 int_node:17 link_weight:66 link_latency:1 bw_multiplier:32
+int_node:10 int_node:18 link_weight:67 link_latency:1 bw_multiplier:32
+int_node:11 int_node:19 link_weight:68 link_latency:1 bw_multiplier:32
+int_node:12 int_node:20 link_weight:69 link_latency:1 bw_multiplier:32
+int_node:13 int_node:21 link_weight:70 link_latency:1 bw_multiplier:32
+int_node:14 int_node:22 link_weight:71 link_latency:1 bw_multiplier:32
+int_node:15 int_node:23 link_weight:72 link_latency:1 bw_multiplier:32
+
+int_node:16 int_node:24 link_weight:73 link_latency:1 bw_multiplier:32
+int_node:17 int_node:25 link_weight:74 link_latency:1 bw_multiplier:32
+int_node:18 int_node:26 link_weight:75 link_latency:1 bw_multiplier:32
+int_node:19 int_node:27 link_weight:76 link_latency:1 bw_multiplier:32
+int_node:20 int_node:28 link_weight:77 link_latency:1 bw_multiplier:32
+int_node:21 int_node:29 link_weight:78 link_latency:1 bw_multiplier:32
+int_node:22 int_node:30 link_weight:79 link_latency:1 bw_multiplier:32
+int_node:23 int_node:31 link_weight:80 link_latency:1 bw_multiplier:32
+
+int_node:24 int_node:32 link_weight:81 link_latency:1 bw_multiplier:32
+int_node:25 int_node:33 link_weight:82 link_latency:1 bw_multiplier:32
+int_node:26 int_node:34 link_weight:83 link_latency:1 bw_multiplier:32
+int_node:27 int_node:35 link_weight:84 link_latency:1 bw_multiplier:32
+int_node:28 int_node:36 link_weight:85 link_latency:1 bw_multiplier:32
+int_node:29 int_node:37 link_weight:86 link_latency:1 bw_multiplier:32
+int_node:30 int_node:38 link_weight:87 link_latency:1 bw_multiplier:32
+int_node:31 int_node:39 link_weight:88 link_latency:1 bw_multiplier:32
+
+int_node:32 int_node:40 link_weight:89 link_latency:1 bw_multiplier:32
+int_node:33 int_node:41 link_weight:90 link_latency:1 bw_multiplier:32
+int_node:34 int_node:42 link_weight:91 link_latency:1 bw_multiplier:32
+int_node:35 int_node:43 link_weight:92 link_latency:1 bw_multiplier:32
+int_node:36 int_node:44 link_weight:93 link_latency:1 bw_multiplier:32
+int_node:37 int_node:45 link_weight:94 link_latency:1 bw_multiplier:32
+int_node:38 int_node:46 link_weight:95 link_latency:1 bw_multiplier:32
+int_node:39 int_node:47 link_weight:96 link_latency:1 bw_multiplier:32
+
+int_node:40 int_node:48 link_weight:97 link_latency:1 bw_multiplier:32
+int_node:41 int_node:49 link_weight:98 link_latency:1 bw_multiplier:32
+int_node:42 int_node:50 link_weight:99 link_latency:1 bw_multiplier:32
+int_node:43 int_node:51 link_weight:100 link_latency:1 bw_multiplier:32
+int_node:44 int_node:52 link_weight:101 link_latency:1 bw_multiplier:32
+int_node:45 int_node:53 link_weight:102 link_latency:1 bw_multiplier:32
+int_node:46 int_node:54 link_weight:103 link_latency:1 bw_multiplier:32
+int_node:47 int_node:55 link_weight:104 link_latency:1 bw_multiplier:32
+
+int_node:48 int_node:56 link_weight:105 link_latency:1 bw_multiplier:32
+int_node:49 int_node:57 link_weight:106 link_latency:1 bw_multiplier:32
+int_node:50 int_node:58 link_weight:107 link_latency:1 bw_multiplier:32
+int_node:51 int_node:59 link_weight:108 link_latency:1 bw_multiplier:32
+int_node:52 int_node:60 link_weight:109 link_latency:1 bw_multiplier:32
+int_node:53 int_node:61 link_weight:110 link_latency:1 bw_multiplier:32
+int_node:54 int_node:62 link_weight:111 link_latency:1 bw_multiplier:32
+int_node:55 int_node:63 link_weight:112 link_latency:1 bw_multiplier:32
+
diff --git a/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-8_ProcsPerChip-8_L2Banks-8_Memories-8.txt b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-8_ProcsPerChip-8_L2Banks-8_Memories-8.txt
new file mode 100644
index 000000000..acfc124a4
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NUCA_Procs-8_ProcsPerChip-8_L2Banks-8_Memories-8.txt
@@ -0,0 +1,44 @@
+
+processors:8
+procs_per_chip:8
+L2banks:8
+memories:8
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:1 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:2 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:3 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:4 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:5 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:6 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:7 int_node:3 link_latency:1 bw_multiplier:72
+
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:1 int_node:0 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:2 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:3 int_node:1 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:4 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:5 int_node:2 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:6 int_node:3 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:7 int_node:3 link_latency:1 bw_multiplier:72
+
+int_node:0 int_node:4 link_latency:1 bw_multiplier:72
+int_node:1 int_node:4 link_latency:1 bw_multiplier:72
+int_node:2 int_node:5 link_latency:1 bw_multiplier:72
+int_node:3 int_node:5 link_latency:1 bw_multiplier:72
+
+int_node:4 int_node:5 link_latency:1 bw_multiplier:72
+int_node:4 int_node:6 link_latency:1 bw_multiplier:72
+int_node:5 int_node:6 link_latency:1 bw_multiplier:72
+
+int_node:6 int_node:7 link_latency:20 bw_multiplier:10
+
+ext_node:Directory:0 int_node:7 link_latency:20 bw_multiplier:80
+ext_node:Directory:1 int_node:7 link_latency:20 bw_multiplier:80
+ext_node:Directory:2 int_node:7 link_latency:20 bw_multiplier:80
+ext_node:Directory:3 int_node:7 link_latency:20 bw_multiplier:80
+ext_node:Directory:4 int_node:7 link_latency:20 bw_multiplier:80
+ext_node:Directory:5 int_node:7 link_latency:20 bw_multiplier:80
+ext_node:Directory:6 int_node:7 link_latency:20 bw_multiplier:80
+ext_node:Directory:7 int_node:7 link_latency:20 bw_multiplier:80
+
diff --git a/src/mem/ruby/network/simple/Network_Files/NetworkFileMaker.py b/src/mem/ruby/network/simple/Network_Files/NetworkFileMaker.py
new file mode 100644
index 000000000..7d07588a1
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/NetworkFileMaker.py
@@ -0,0 +1,44 @@
+#!/s/std/bin/python
+import sys, os, string, re, math
+
+rows = 0
+cols =0
+
+if len(sys.argv) == 3:
+ rows = int(sys.argv[1])
+ cols = int(sys.argv[2])
+else:
+ sys.stderr.write("usage : NetworkFileMaker.py <rows> <cols> \n\n")
+
+banks = rows*cols
+bank = 0
+while bank < banks:
+ sys.stdout.write("ext_node:L2Cache:0:bank:%d int_node:%d link_latency:1 bw_multiplier:16\n" % (bank, bank))
+ bank += 1
+
+sys.stdout.write("\n")
+
+col = 0
+while col < cols:
+ row = 1
+ bank = col*rows
+ while row < rows:
+ sys.stdout.write("int_node:%d int_node:%d link_latency:1 bw_multiplier:16\n" % (bank, bank+1))
+ bank += 1
+ row += 1
+ sys.stdout.write("\n")
+ col += 1
+
+sys.stdout.write("\n")
+
+row = 0
+while row < rows:
+ col = 1
+ bank = row
+ while col < cols:
+ sys.stdout.write("int_node:%d int_node:%d link_latency:1 bw_multiplier:16\n" % (bank, rows+bank))
+ bank += rows
+ col += 1
+ sys.stdout.write("\n")
+ row += 1
+
diff --git a/src/mem/ruby/network/simple/Network_Files/TLC_Procs-8_ProcsPerChip-8_L2Banks-256_Memories-8.txt b/src/mem/ruby/network/simple/Network_Files/TLC_Procs-8_ProcsPerChip-8_L2Banks-256_Memories-8.txt
new file mode 100644
index 000000000..d43386237
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/TLC_Procs-8_ProcsPerChip-8_L2Banks-256_Memories-8.txt
@@ -0,0 +1,367 @@
+
+processors:8
+procs_per_chip:8
+L2banks:256
+memories:8
+
+ext_node:L1Cache:0 int_node:0 link_weight:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:1 int_node:2 link_weight:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:2 int_node:2 link_weight:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:3 int_node:4 link_weight:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:4 int_node:0 link_weight:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:5 int_node:2 link_weight:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:6 int_node:2 link_weight:1 link_latency:1 bw_multiplier:72
+ext_node:L1Cache:7 int_node:4 link_weight:1 link_latency:1 bw_multiplier:72
+ext_node:L2Cache:0 int_node:22 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:1 int_node:22 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:2 int_node:26 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:3 int_node:26 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:4 int_node:30 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:5 int_node:30 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:6 int_node:34 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:7 int_node:34 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:8 int_node:38 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:9 int_node:38 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:10 int_node:42 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:11 int_node:42 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:12 int_node:46 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:13 int_node:46 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:14 int_node:50 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:15 int_node:50 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:16 int_node:54 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:17 int_node:54 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:18 int_node:58 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:19 int_node:58 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:20 int_node:62 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:21 int_node:62 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:22 int_node:66 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:23 int_node:66 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:24 int_node:70 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:25 int_node:70 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:26 int_node:74 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:27 int_node:74 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:28 int_node:78 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:29 int_node:78 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:30 int_node:82 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:31 int_node:82 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:32 int_node:23 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:33 int_node:23 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:34 int_node:27 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:35 int_node:27 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:36 int_node:31 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:37 int_node:31 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:38 int_node:35 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:39 int_node:35 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:40 int_node:39 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:41 int_node:39 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:42 int_node:43 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:43 int_node:43 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:44 int_node:47 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:45 int_node:47 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:46 int_node:51 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:47 int_node:51 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:48 int_node:55 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:49 int_node:55 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:50 int_node:59 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:51 int_node:59 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:52 int_node:63 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:53 int_node:63 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:54 int_node:67 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:55 int_node:67 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:56 int_node:71 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:57 int_node:71 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:58 int_node:75 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:59 int_node:75 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:60 int_node:79 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:61 int_node:79 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:62 int_node:83 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:63 int_node:83 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:64 int_node:24 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:65 int_node:24 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:66 int_node:28 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:67 int_node:28 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:68 int_node:32 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:69 int_node:32 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:70 int_node:36 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:71 int_node:36 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:72 int_node:40 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:73 int_node:40 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:74 int_node:44 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:75 int_node:44 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:76 int_node:48 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:77 int_node:48 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:78 int_node:52 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:79 int_node:52 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:80 int_node:56 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:81 int_node:56 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:82 int_node:60 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:83 int_node:60 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:84 int_node:64 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:85 int_node:64 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:86 int_node:68 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:87 int_node:68 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:88 int_node:72 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:89 int_node:72 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:90 int_node:76 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:91 int_node:76 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:92 int_node:80 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:93 int_node:80 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:94 int_node:84 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:95 int_node:84 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:96 int_node:25 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:97 int_node:25 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:98 int_node:29 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:99 int_node:29 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:100 int_node:33 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:101 int_node:33 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:102 int_node:37 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:103 int_node:37 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:104 int_node:41 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:105 int_node:41 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:106 int_node:45 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:107 int_node:45 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:108 int_node:49 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:109 int_node:49 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:110 int_node:53 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:111 int_node:53 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:112 int_node:57 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:113 int_node:57 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:114 int_node:61 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:115 int_node:61 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:116 int_node:65 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:117 int_node:65 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:118 int_node:69 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:119 int_node:69 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:120 int_node:73 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:121 int_node:73 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:122 int_node:77 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:123 int_node:77 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:124 int_node:81 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:125 int_node:81 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:126 int_node:85 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:127 int_node:85 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:128 int_node:22 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:129 int_node:22 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:130 int_node:26 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:131 int_node:26 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:132 int_node:30 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:133 int_node:30 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:134 int_node:34 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:135 int_node:34 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:136 int_node:38 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:137 int_node:38 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:138 int_node:42 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:139 int_node:42 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:140 int_node:46 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:141 int_node:46 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:142 int_node:50 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:143 int_node:50 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:144 int_node:54 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:145 int_node:54 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:146 int_node:58 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:147 int_node:58 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:148 int_node:62 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:149 int_node:62 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:150 int_node:66 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:151 int_node:66 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:152 int_node:70 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:153 int_node:70 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:154 int_node:74 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:155 int_node:74 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:156 int_node:78 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:157 int_node:78 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:158 int_node:82 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:159 int_node:82 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:160 int_node:23 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:161 int_node:23 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:162 int_node:27 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:163 int_node:27 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:164 int_node:31 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:165 int_node:31 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:166 int_node:35 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:167 int_node:35 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:168 int_node:39 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:169 int_node:39 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:170 int_node:43 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:171 int_node:43 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:172 int_node:47 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:173 int_node:47 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:174 int_node:51 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:175 int_node:51 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:176 int_node:55 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:177 int_node:55 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:178 int_node:59 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:179 int_node:59 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:180 int_node:63 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:181 int_node:63 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:182 int_node:67 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:183 int_node:67 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:184 int_node:71 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:185 int_node:71 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:186 int_node:75 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:187 int_node:75 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:188 int_node:79 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:189 int_node:79 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:190 int_node:83 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:191 int_node:83 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:192 int_node:24 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:193 int_node:24 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:194 int_node:28 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:195 int_node:28 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:196 int_node:32 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:197 int_node:32 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:198 int_node:36 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:199 int_node:36 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:200 int_node:40 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:201 int_node:40 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:202 int_node:44 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:203 int_node:44 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:204 int_node:48 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:205 int_node:48 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:206 int_node:52 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:207 int_node:52 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:208 int_node:56 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:209 int_node:56 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:210 int_node:60 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:211 int_node:60 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:212 int_node:64 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:213 int_node:64 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:214 int_node:68 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:215 int_node:68 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:216 int_node:72 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:217 int_node:72 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:218 int_node:76 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:219 int_node:76 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:220 int_node:80 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:221 int_node:80 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:222 int_node:84 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:223 int_node:84 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:224 int_node:25 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:225 int_node:25 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:226 int_node:29 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:227 int_node:29 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:228 int_node:33 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:229 int_node:33 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:230 int_node:37 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:231 int_node:37 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:232 int_node:41 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:233 int_node:41 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:234 int_node:45 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:235 int_node:45 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:236 int_node:49 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:237 int_node:49 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:238 int_node:53 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:239 int_node:53 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:240 int_node:57 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:241 int_node:57 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:242 int_node:61 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:243 int_node:61 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:244 int_node:65 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:245 int_node:65 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:246 int_node:69 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:247 int_node:69 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:248 int_node:73 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:249 int_node:73 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:250 int_node:77 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:251 int_node:77 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:252 int_node:81 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:253 int_node:81 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:254 int_node:85 link_weight:1 link_latency:1 bw_multiplier:16
+ext_node:L2Cache:255 int_node:85 link_weight:1 link_latency:1 bw_multiplier:16
+
+int_node:22 int_node:5 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:23 int_node:5 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:24 int_node:5 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:25 int_node:5 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:26 int_node:6 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:27 int_node:6 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:28 int_node:6 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:29 int_node:6 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:30 int_node:7 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:31 int_node:7 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:32 int_node:7 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:33 int_node:7 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:34 int_node:8 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:35 int_node:8 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:36 int_node:8 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:37 int_node:8 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:38 int_node:9 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:39 int_node:9 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:40 int_node:9 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:41 int_node:9 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:42 int_node:10 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:43 int_node:10 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:44 int_node:10 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:45 int_node:10 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:46 int_node:11 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:47 int_node:11 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:48 int_node:11 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:49 int_node:11 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:50 int_node:12 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:51 int_node:12 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:52 int_node:12 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:53 int_node:12 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:54 int_node:13 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:55 int_node:13 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:56 int_node:13 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:57 int_node:13 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:58 int_node:14 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:59 int_node:14 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:60 int_node:14 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:61 int_node:14 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:62 int_node:15 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:63 int_node:15 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:64 int_node:15 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:65 int_node:15 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:66 int_node:16 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:67 int_node:16 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:68 int_node:16 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:69 int_node:16 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:70 int_node:17 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:71 int_node:17 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:72 int_node:17 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:73 int_node:17 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:74 int_node:18 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:75 int_node:18 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:76 int_node:18 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:77 int_node:18 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:78 int_node:19 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:79 int_node:19 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:80 int_node:19 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:81 int_node:19 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:82 int_node:20 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:83 int_node:20 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:84 int_node:20 link_weight:1 link_latency:1 bw_multiplier:32
+int_node:85 int_node:20 link_weight:1 link_latency:1 bw_multiplier:32
+
+int_node:2 int_node:21 link_weight:1000 link_latency:20 bw_multiplier:32
+ext_node:Directory:0 int_node:21 link_weight:1000 link_latency:20 bw_multiplier:32
+ext_node:Directory:1 int_node:21 link_weight:1000 link_latency:20 bw_multiplier:32
+ext_node:Directory:2 int_node:21 link_weight:1000 link_latency:20 bw_multiplier:32
+ext_node:Directory:3 int_node:21 link_weight:1000 link_latency:20 bw_multiplier:32
+ext_node:Directory:4 int_node:21 link_weight:1000 link_latency:20 bw_multiplier:32
+ext_node:Directory:5 int_node:21 link_weight:1000 link_latency:20 bw_multiplier:32
+ext_node:Directory:6 int_node:21 link_weight:1000 link_latency:20 bw_multiplier:32
+ext_node:Directory:7 int_node:21 link_weight:1000 link_latency:20 bw_multiplier:32
+
+int_node:0 int_node:1 link_weight:1 link_latency:1 bw_multiplier:80
+int_node:1 int_node:2 link_weight:1 link_latency:1 bw_multiplier:80
+int_node:2 int_node:3 link_weight:1 link_latency:1 bw_multiplier:80
+int_node:3 int_node:4 link_weight:1 link_latency:1 bw_multiplier:80
+int_node:0 int_node:5 link_weight:1 link_latency:1 bw_multiplier:8
+int_node:1 int_node:6 link_weight:1 link_latency:1 bw_multiplier:8
+int_node:2 int_node:7 link_weight:1 link_latency:1 bw_multiplier:8
+int_node:2 int_node:8 link_weight:1 link_latency:1 bw_multiplier:8
+int_node:2 int_node:9 link_weight:1 link_latency:1 bw_multiplier:8
+int_node:2 int_node:10 link_weight:1 link_latency:1 bw_multiplier:8
+int_node:3 int_node:11 link_weight:1 link_latency:1 bw_multiplier:8
+int_node:4 int_node:12 link_weight:1 link_latency:1 bw_multiplier:8
+int_node:0 int_node:13 link_weight:1 link_latency:1 bw_multiplier:8
+int_node:1 int_node:14 link_weight:1 link_latency:1 bw_multiplier:8
+int_node:2 int_node:15 link_weight:1 link_latency:1 bw_multiplier:8
+int_node:2 int_node:16 link_weight:1 link_latency:1 bw_multiplier:8
+int_node:2 int_node:17 link_weight:1 link_latency:1 bw_multiplier:8
+int_node:2 int_node:18 link_weight:1 link_latency:1 bw_multiplier:8
+int_node:3 int_node:19 link_weight:1 link_latency:1 bw_multiplier:8
+int_node:4 int_node:20 link_weight:1 link_latency:1 bw_multiplier:8
+
diff --git a/src/mem/ruby/network/simple/Network_Files/TOKEN_CMP_Procs-16_ProcsPerChip-16_L2Banks-16_Memories-16.txt b/src/mem/ruby/network/simple/Network_Files/TOKEN_CMP_Procs-16_ProcsPerChip-16_L2Banks-16_Memories-16.txt
new file mode 100644
index 000000000..b6b1dbd98
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/TOKEN_CMP_Procs-16_ProcsPerChip-16_L2Banks-16_Memories-16.txt
@@ -0,0 +1,74 @@
+
+processors:16
+procs_per_chip:16
+L2banks:16
+memories:16
+bw_unit:10000
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:1 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:2 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:3 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:4 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:5 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:6 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:7 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:8 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:9 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:10 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:11 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:12 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:13 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:14 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:15 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:1 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:2 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:3 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:4 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:5 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:6 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:7 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:8 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:9 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:10 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:11 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:12 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:13 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:14 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:15 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:1 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:2 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:3 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:4 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:5 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:6 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:7 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:8 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:9 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:10 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:11 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:12 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:13 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:14 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:15 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:0 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:1 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:2 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:3 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:4 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:5 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:6 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:7 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:8 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:9 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:10 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:11 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:12 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:13 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:14 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:15 int_node:2 link_latency:20 bw_multiplier:10
+int_node:0 int_node:1 link_latency:20 bw_multiplier:16
+int_node:1 int_node:2 link_latency:20 bw_multiplier:10
+
diff --git a/src/mem/ruby/network/simple/Network_Files/TOKEN_CMP_Procs-16_ProcsPerChip-4_L2Banks-16_Memories-16.txt b/src/mem/ruby/network/simple/Network_Files/TOKEN_CMP_Procs-16_ProcsPerChip-4_L2Banks-16_Memories-16.txt
new file mode 100644
index 000000000..28a9be8f5
--- /dev/null
+++ b/src/mem/ruby/network/simple/Network_Files/TOKEN_CMP_Procs-16_ProcsPerChip-4_L2Banks-16_Memories-16.txt
@@ -0,0 +1,101 @@
+
+processors:16
+procs_per_chip:4
+L2banks:16
+memories:16
+bw_unit:1000
+
+ext_node:L1Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:1 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:2 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:3 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:0 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:1 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:2 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:3 int_node:0 link_latency:1 bw_multiplier:64
+ext_node:Directory:0 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:1 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:2 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:Directory:3 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:0 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:1 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:2 int_node:2 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:3 int_node:2 link_latency:20 bw_multiplier:10
+int_node:0 int_node:1 link_latency:1 bw_multiplier:16
+int_node:1 int_node:2 link_latency:20 bw_multiplier:10
+int_node:0 int_node:16 link_latency:1 bw_multiplier:16
+int_node:16 int_node:3 link_latency:1 bw_multiplier:16
+
+int_node:3 int_node:7 link_latency:20 bw_multiplier:10
+
+ext_node:L1Cache:4 int_node:4 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:5 int_node:4 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:6 int_node:4 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:7 int_node:4 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:4 int_node:4 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:5 int_node:4 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:6 int_node:4 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:7 int_node:4 link_latency:1 bw_multiplier:64
+ext_node:Directory:4 int_node:6 link_latency:20 bw_multiplier:10
+ext_node:Directory:5 int_node:6 link_latency:20 bw_multiplier:10
+ext_node:Directory:6 int_node:6 link_latency:20 bw_multiplier:10
+ext_node:Directory:7 int_node:6 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:4 int_node:6 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:5 int_node:6 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:6 int_node:6 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:7 int_node:6 link_latency:20 bw_multiplier:10
+int_node:4 int_node:5 link_latency:1 bw_multiplier:16
+int_node:5 int_node:6 link_latency:20 bw_multiplier:16
+int_node:4 int_node:17 link_latency:1 bw_multiplier:16
+int_node:17 int_node:7 link_latency:1 bw_multiplier:16
+
+int_node:7 int_node:11 link_latency:20 bw_multiplier:10
+
+ext_node:L1Cache:8 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:9 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:10 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:11 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:8 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:9 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:10 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:11 int_node:8 link_latency:1 bw_multiplier:64
+ext_node:Directory:8 int_node:10 link_latency:20 bw_multiplier:10
+ext_node:Directory:9 int_node:10 link_latency:20 bw_multiplier:10
+ext_node:Directory:10 int_node:10 link_latency:20 bw_multiplier:10
+ext_node:Directory:11 int_node:10 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:8 int_node:10 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:9 int_node:10 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:10 int_node:10 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:11 int_node:10 link_latency:20 bw_multiplier:10
+int_node:8 int_node:9 link_latency:1 bw_multiplier:16
+int_node:9 int_node:10 link_latency:20 bw_multiplier:10
+int_node:8 int_node:18 link_latency:1 bw_multiplier:16
+int_node:18 int_node:11 link_latency:1 bw_multiplier:16
+
+int_node:11 int_node:15 link_latency:20 bw_multiplier:10
+
+ext_node:L1Cache:12 int_node:12 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:13 int_node:12 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:14 int_node:12 link_latency:1 bw_multiplier:64
+ext_node:L1Cache:15 int_node:12 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:12 int_node:12 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:13 int_node:12 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:14 int_node:12 link_latency:1 bw_multiplier:64
+ext_node:L2Cache:15 int_node:12 link_latency:1 bw_multiplier:64
+ext_node:Directory:12 int_node:14 link_latency:20 bw_multiplier:10
+ext_node:Directory:13 int_node:14 link_latency:20 bw_multiplier:10
+ext_node:Directory:14 int_node:14 link_latency:20 bw_multiplier:10
+ext_node:Directory:15 int_node:14 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:12 int_node:14 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:13 int_node:14 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:14 int_node:14 link_latency:20 bw_multiplier:10
+ext_node:PersistentArbiter:15 int_node:14 link_latency:20 bw_multiplier:10
+int_node:12 int_node:13 link_latency:1 bw_multiplier:16
+int_node:13 int_node:14 link_latency:20 bw_multiplier:10
+int_node:12 int_node:19 link_latency:1 bw_multiplier:16
+int_node:19 int_node:15 link_latency:1 bw_multiplier:16
+
+int_node:15 int_node:3 link_latency:20 bw_multiplier:10
+int_node:15 int_node:7 link_latency:20 bw_multiplier:10
+int_node:11 int_node:3 link_latency:20 bw_multiplier:10
+
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc b/src/mem/ruby/network/simple/PerfectSwitch.cc
new file mode 100644
index 000000000..a88a29e83
--- /dev/null
+++ b/src/mem/ruby/network/simple/PerfectSwitch.cc
@@ -0,0 +1,319 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * PerfectSwitch.C
+ *
+ * Description: See PerfectSwitch.h
+ *
+ * $Id$
+ *
+ */
+
+
+#include "PerfectSwitch.hh"
+#include "NetworkMessage.hh"
+#include "Profiler.hh"
+#include "System.hh"
+#include "SimpleNetwork.hh"
+#include "util.hh"
+#include "MessageBuffer.hh"
+#include "Protocol.hh"
+
+const int PRIORITY_SWITCH_LIMIT = 128;
+
+// Operator for helper class
+bool operator<(const LinkOrder& l1, const LinkOrder& l2) {
+ return (l1.m_value < l2.m_value);
+}
+
+PerfectSwitch::PerfectSwitch(SwitchID sid, SimpleNetwork* network_ptr)
+{
+ m_virtual_networks = NUMBER_OF_VIRTUAL_NETWORKS; // FIXME - pass me as a parameter?
+ m_switch_id = sid;
+ m_round_robin_start = 0;
+ m_network_ptr = network_ptr;
+ m_wakeups_wo_switch = 0;
+}
+
+void PerfectSwitch::addInPort(const Vector<MessageBuffer*>& in)
+{
+ assert(in.size() == m_virtual_networks);
+ NodeID port = m_in.size();
+ m_in.insertAtBottom(in);
+ for (int j = 0; j < m_virtual_networks; j++) {
+ m_in[port][j]->setConsumer(this);
+ string desc = "[Queue from port " + NodeIDToString(m_switch_id) + " " + NodeIDToString(port) + " " + NodeIDToString(j) + " to PerfectSwitch]";
+ m_in[port][j]->setDescription(desc);
+ }
+}
+
+void PerfectSwitch::addOutPort(const Vector<MessageBuffer*>& out, const NetDest& routing_table_entry)
+{
+ assert(out.size() == m_virtual_networks);
+
+ // Setup link order
+ LinkOrder l;
+ l.m_value = 0;
+ l.m_link = m_out.size();
+ m_link_order.insertAtBottom(l);
+
+ // Add to routing table
+ m_out.insertAtBottom(out);
+ m_routing_table.insertAtBottom(routing_table_entry);
+
+ if (g_PRINT_TOPOLOGY) {
+ m_out_link_vec.insertAtBottom(out);
+ }
+}
+
+void PerfectSwitch::clearRoutingTables()
+{
+ m_routing_table.clear();
+}
+
+void PerfectSwitch::clearBuffers()
+{
+ for(int i=0; i<m_in.size(); i++){
+ for(int vnet=0; vnet < m_virtual_networks; vnet++) {
+ m_in[i][vnet]->clear();
+ }
+ }
+
+ for(int i=0; i<m_out.size(); i++){
+ for(int vnet=0; vnet < m_virtual_networks; vnet++) {
+ m_out[i][vnet]->clear();
+ }
+ }
+}
+
+void PerfectSwitch::reconfigureOutPort(const NetDest& routing_table_entry)
+{
+ m_routing_table.insertAtBottom(routing_table_entry);
+}
+
+PerfectSwitch::~PerfectSwitch()
+{
+}
+
+void PerfectSwitch::wakeup()
+{
+
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, m_switch_id);
+
+ MsgPtr msg_ptr;
+
+ // Give the highest numbered link priority most of the time
+ m_wakeups_wo_switch++;
+ int highest_prio_vnet = m_virtual_networks-1;
+ int lowest_prio_vnet = 0;
+ int decrementer = 1;
+ bool schedule_wakeup = false;
+ NetworkMessage* net_msg_ptr = NULL;
+
+ // invert priorities to avoid starvation seen in the component network
+ if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
+ m_wakeups_wo_switch = 0;
+ highest_prio_vnet = 0;
+ lowest_prio_vnet = m_virtual_networks-1;
+ decrementer = -1;
+ }
+
+ for (int vnet = highest_prio_vnet; (vnet*decrementer) >= (decrementer*lowest_prio_vnet); vnet -= decrementer) {
+
+ // For all components incoming queues
+ int incoming = m_round_robin_start; // This is for round-robin scheduling
+ m_round_robin_start++;
+ if (m_round_robin_start >= m_in.size()) {
+ m_round_robin_start = 0;
+ }
+
+ // for all input ports, use round robin scheduling
+ for (int counter = 0; counter < m_in.size(); counter++) {
+
+ // Round robin scheduling
+ incoming++;
+ if (incoming >= m_in.size()) {
+ incoming = 0;
+ }
+
+ // temporary vectors to store the routing results
+ Vector<LinkID> output_links;
+ Vector<NetDest> output_link_destinations;
+
+ // Is there a message waiting?
+ while (m_in[incoming][vnet]->isReady()) {
+
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, incoming);
+
+ // Peek at message
+ msg_ptr = m_in[incoming][vnet]->peekMsgPtr();
+ net_msg_ptr = dynamic_cast<NetworkMessage*>(msg_ptr.ref());
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, *net_msg_ptr);
+
+ output_links.clear();
+ output_link_destinations.clear();
+ NetDest msg_destinations = net_msg_ptr->getInternalDestination();
+
+ // Unfortunately, the token-protocol sends some
+ // zero-destination messages, so this assert isn't valid
+ // assert(msg_destinations.count() > 0);
+
+ assert(m_link_order.size() == m_routing_table.size());
+ assert(m_link_order.size() == m_out.size());
+
+ if (g_adaptive_routing) {
+ if (m_network_ptr->isVNetOrdered(vnet)) {
+ // Don't adaptively route
+ for (int outlink=0; outlink<m_out.size(); outlink++) {
+ m_link_order[outlink].m_link = outlink;
+ m_link_order[outlink].m_value = 0;
+ }
+ } else {
+ // Find how clogged each link is
+ for (int outlink=0; outlink<m_out.size(); outlink++) {
+ int out_queue_length = 0;
+ for (int v=0; v<m_virtual_networks; v++) {
+ out_queue_length += m_out[outlink][v]->getSize();
+ }
+ m_link_order[outlink].m_link = outlink;
+ m_link_order[outlink].m_value = 0;
+ m_link_order[outlink].m_value |= (out_queue_length << 8);
+ m_link_order[outlink].m_value |= (random() & 0xff);
+ }
+ m_link_order.sortVector(); // Look at the most empty link first
+ }
+ }
+
+ for (int i=0; i<m_routing_table.size(); i++) {
+ // pick the next link to look at
+ int link = m_link_order[i].m_link;
+
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, m_routing_table[link]);
+
+ if (msg_destinations.intersectionIsNotEmpty(m_routing_table[link])) {
+
+ // Remember what link we're using
+ output_links.insertAtBottom(link);
+
+ // Need to remember which destinations need this message
+ // in another vector. This Set is the intersection of the
+ // routing_table entry and the current destination set.
+ // The intersection must not be empty, since we are inside "if"
+ output_link_destinations.insertAtBottom(msg_destinations.AND(m_routing_table[link]));
+
+ // Next, we update the msg_destination not to include
+ // those nodes that were already handled by this link
+ msg_destinations.removeNetDest(m_routing_table[link]);
+ }
+ }
+
+ assert(msg_destinations.count() == 0);
+ //assert(output_links.size() > 0);
+
+ // Check for resources - for all outgoing queues
+ bool enough = true;
+ for (int i=0; i<output_links.size(); i++) {
+ int outgoing = output_links[i];
+ enough = enough && m_out[outgoing][vnet]->areNSlotsAvailable(1);
+ DEBUG_MSG(NETWORK_COMP, HighPrio, "checking if node is blocked");
+ DEBUG_EXPR(NETWORK_COMP, HighPrio, outgoing);
+ DEBUG_EXPR(NETWORK_COMP, HighPrio, vnet);
+ DEBUG_EXPR(NETWORK_COMP, HighPrio, enough);
+ }
+
+ // There were not enough resources
+ if(!enough) {
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ DEBUG_MSG(NETWORK_COMP, HighPrio, "Can't deliver message to anyone since a node is blocked");
+ DEBUG_EXPR(NETWORK_COMP, HighPrio, *net_msg_ptr);
+ break; // go to next incoming port
+ }
+
+ MsgPtr unmodified_msg_ptr;
+
+ if (output_links.size() > 1) {
+ // If we are sending this message down more than one link
+ // (size>1), we need to make a copy of the message so each
+ // branch can have a different internal destination
+ // we need to create an unmodified MsgPtr because the MessageBuffer enqueue func
+ // will modify the message
+ unmodified_msg_ptr = *(msg_ptr.ref()); // This magic line creates a private copy of the message
+ }
+
+ // Enqueue it - for all outgoing queues
+ for (int i=0; i<output_links.size(); i++) {
+ int outgoing = output_links[i];
+
+ if (i > 0) {
+ msg_ptr = *(unmodified_msg_ptr.ref()); // create a private copy of the unmodified message
+ }
+
+ // Change the internal destination set of the message so it
+ // knows which destinations this link is responsible for.
+ net_msg_ptr = dynamic_cast<NetworkMessage*>(msg_ptr.ref());
+ net_msg_ptr->getInternalDestination() = output_link_destinations[i];
+
+ // Enqeue msg
+ DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
+ DEBUG_MSG(NETWORK_COMP,HighPrio,"switch: " + int_to_string(m_switch_id)
+ + " enqueuing net msg from inport[" + int_to_string(incoming) + "]["
+ + int_to_string(vnet) +"] to outport [" + int_to_string(outgoing)
+ + "][" + int_to_string(vnet) +"]"
+ + " time: " + int_to_string(g_eventQueue_ptr->getTime()) + ".");
+ DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
+
+ m_out[outgoing][vnet]->enqueue(msg_ptr);
+ }
+
+ // Dequeue msg
+ m_in[incoming][vnet]->pop();
+ }
+ }
+ }
+}
+
+void PerfectSwitch::printStats(ostream& out) const
+{
+ out << "PerfectSwitch printStats" << endl;
+}
+
+void PerfectSwitch::clearStats()
+{
+}
+
+void PerfectSwitch::printConfig(ostream& out) const
+{
+}
+
+void PerfectSwitch::print(ostream& out) const
+{
+ out << "[PerfectSwitch " << m_switch_id << "]";
+}
+
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.hh b/src/mem/ruby/network/simple/PerfectSwitch.hh
new file mode 100644
index 000000000..4d381ccc9
--- /dev/null
+++ b/src/mem/ruby/network/simple/PerfectSwitch.hh
@@ -0,0 +1,118 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: Perfect switch, of course it is perfect and no latency or what
+ * so ever. Every cycle it is woke up and perform all the
+ * necessary routings that must be done. Note, this switch also
+ * has number of input ports/output ports and has a routing table
+ * as well.
+ *
+ */
+
+#ifndef PerfectSwitch_H
+#define PerfectSwitch_H
+
+#include "Global.hh"
+#include "Vector.hh"
+#include "Consumer.hh"
+#include "NodeID.hh"
+
+class MessageBuffer;
+class NetDest;
+class SimpleNetwork;
+
+class LinkOrder {
+public:
+ int m_link;
+ int m_value;
+};
+
+class PerfectSwitch : public Consumer {
+public:
+ // Constructors
+
+ // constructor specifying the number of ports
+ PerfectSwitch(SwitchID sid, SimpleNetwork* network_ptr);
+ void addInPort(const Vector<MessageBuffer*>& in);
+ void addOutPort(const Vector<MessageBuffer*>& out, const NetDest& routing_table_entry);
+ void clearRoutingTables();
+ void clearBuffers();
+ void reconfigureOutPort(const NetDest& routing_table_entry);
+ int getInLinks() const { return m_in.size(); }
+ int getOutLinks() const { return m_out.size(); }
+
+ // Destructor
+ ~PerfectSwitch();
+
+ // Public Methods
+ void wakeup();
+
+ void printStats(ostream& out) const;
+ void clearStats();
+ void printConfig(ostream& out) const;
+
+ void print(ostream& out) const;
+private:
+
+ // Private copy constructor and assignment operator
+ PerfectSwitch(const PerfectSwitch& obj);
+ PerfectSwitch& operator=(const PerfectSwitch& obj);
+
+ // Data Members (m_ prefix)
+ SwitchID m_switch_id;
+
+ // vector of queues from the components
+ Vector<Vector<MessageBuffer*> > m_in;
+ Vector<Vector<MessageBuffer*> > m_out;
+ Vector<NetDest> m_routing_table;
+ Vector<LinkOrder> m_link_order;
+ int m_virtual_networks;
+ int m_round_robin_start;
+ int m_wakeups_wo_switch;
+ SimpleNetwork* m_network_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const PerfectSwitch& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const PerfectSwitch& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //PerfectSwitch_H
diff --git a/src/mem/ruby/network/simple/SimpleNetwork.cc b/src/mem/ruby/network/simple/SimpleNetwork.cc
new file mode 100644
index 000000000..549503e47
--- /dev/null
+++ b/src/mem/ruby/network/simple/SimpleNetwork.cc
@@ -0,0 +1,257 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * SimpleNetwork.C
+ *
+ * Description: See SimpleNetwork.h
+ *
+ * $Id$
+ *
+ */
+
+#include "SimpleNetwork.hh"
+#include "Profiler.hh"
+#include "System.hh"
+#include "Switch.hh"
+#include "NetDest.hh"
+#include "Topology.hh"
+#include "TopologyType.hh"
+#include "MachineType.hh"
+#include "MessageBuffer.hh"
+#include "Protocol.hh"
+#include "Map.hh"
+
+// ***BIG HACK*** - This is actually code that _should_ be in Network.C
+
+// Note: Moved to Princeton Network
+// calls new to abstract away from the network
+/*
+Network* Network::createNetwork(int nodes)
+{
+ return new SimpleNetwork(nodes);
+}
+*/
+
+SimpleNetwork::SimpleNetwork(int nodes)
+{
+ m_nodes = MachineType_base_number(MachineType_NUM);
+
+ m_virtual_networks = NUMBER_OF_VIRTUAL_NETWORKS;
+ m_endpoint_switches.setSize(m_nodes);
+
+ m_in_use.setSize(m_virtual_networks);
+ m_ordered.setSize(m_virtual_networks);
+ for (int i = 0; i < m_virtual_networks; i++) {
+ m_in_use[i] = false;
+ m_ordered[i] = false;
+ }
+
+ // Allocate to and from queues
+ m_toNetQueues.setSize(m_nodes);
+ m_fromNetQueues.setSize(m_nodes);
+ for (int node = 0; node < m_nodes; node++) {
+ m_toNetQueues[node].setSize(m_virtual_networks);
+ m_fromNetQueues[node].setSize(m_virtual_networks);
+ for (int j = 0; j < m_virtual_networks; j++) {
+ m_toNetQueues[node][j] = new MessageBuffer;
+ m_fromNetQueues[node][j] = new MessageBuffer;
+ }
+ }
+
+ // Setup the network switches
+ m_topology_ptr = new Topology(this, m_nodes);
+ int number_of_switches = m_topology_ptr->numSwitches();
+ for (int i=0; i<number_of_switches; i++) {
+ m_switch_ptr_vector.insertAtBottom(new Switch(i, this));
+ }
+ m_topology_ptr->createLinks(false); // false because this isn't a reconfiguration
+}
+
+void SimpleNetwork::reset()
+{
+ for (int node = 0; node < m_nodes; node++) {
+ for (int j = 0; j < m_virtual_networks; j++) {
+ m_toNetQueues[node][j]->clear();
+ m_fromNetQueues[node][j]->clear();
+ }
+ }
+
+ for(int i=0; i<m_switch_ptr_vector.size(); i++){
+ m_switch_ptr_vector[i]->clearBuffers();
+ }
+}
+
+SimpleNetwork::~SimpleNetwork()
+{
+ for (int i = 0; i < m_nodes; i++) {
+ m_toNetQueues[i].deletePointers();
+ m_fromNetQueues[i].deletePointers();
+ }
+ m_switch_ptr_vector.deletePointers();
+ m_buffers_to_free.deletePointers();
+ delete m_topology_ptr;
+}
+
+// From a switch to an endpoint node
+void SimpleNetwork::makeOutLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration)
+{
+ assert(dest < m_nodes);
+ assert(src < m_switch_ptr_vector.size());
+ assert(m_switch_ptr_vector[src] != NULL);
+ if(!isReconfiguration){
+ m_switch_ptr_vector[src]->addOutPort(m_fromNetQueues[dest], routing_table_entry, link_latency, bw_multiplier);
+ m_endpoint_switches[dest] = m_switch_ptr_vector[src];
+ } else {
+ m_switch_ptr_vector[src]->reconfigureOutPort(routing_table_entry);
+ }
+}
+
+// From an endpoint node to a switch
+void SimpleNetwork::makeInLink(NodeID src, SwitchID dest, const NetDest& routing_table_entry, int link_latency, int bw_multiplier, bool isReconfiguration)
+{
+ assert(src < m_nodes);
+ if(!isReconfiguration){
+ m_switch_ptr_vector[dest]->addInPort(m_toNetQueues[src]);
+ } else {
+ // do nothing
+ }
+}
+
+// From a switch to a switch
+void SimpleNetwork::makeInternalLink(SwitchID src, SwitchID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration)
+{
+ if(!isReconfiguration){
+ // Create a set of new MessageBuffers
+ Vector<MessageBuffer*> queues;
+ for (int i = 0; i < m_virtual_networks; i++) {
+ // allocate a buffer
+ MessageBuffer* buffer_ptr = new MessageBuffer;
+ buffer_ptr->setOrdering(true);
+ if(FINITE_BUFFERING) {
+ buffer_ptr->setSize(FINITE_BUFFER_SIZE);
+ }
+ queues.insertAtBottom(buffer_ptr);
+ // remember to deallocate it
+ m_buffers_to_free.insertAtBottom(buffer_ptr);
+ }
+
+ // Connect it to the two switches
+ m_switch_ptr_vector[dest]->addInPort(queues);
+ m_switch_ptr_vector[src]->addOutPort(queues, routing_table_entry, link_latency, bw_multiplier);
+ } else {
+ m_switch_ptr_vector[src]->reconfigureOutPort(routing_table_entry);
+ }
+}
+
+void SimpleNetwork::checkNetworkAllocation(NodeID id, bool ordered, int network_num)
+{
+ ASSERT(id < m_nodes);
+ ASSERT(network_num < m_virtual_networks);
+
+ if (ordered) {
+ m_ordered[network_num] = true;
+ }
+ m_in_use[network_num] = true;
+}
+
+MessageBuffer* SimpleNetwork::getToNetQueue(NodeID id, bool ordered, int network_num)
+{
+ checkNetworkAllocation(id, ordered, network_num);
+ return m_toNetQueues[id][network_num];
+}
+
+MessageBuffer* SimpleNetwork::getFromNetQueue(NodeID id, bool ordered, int network_num)
+{
+ checkNetworkAllocation(id, ordered, network_num);
+ return m_fromNetQueues[id][network_num];
+}
+
+const Vector<Throttle*>* SimpleNetwork::getThrottles(NodeID id) const
+{
+ assert(id >= 0);
+ assert(id < m_nodes);
+ assert(m_endpoint_switches[id] != NULL);
+ return m_endpoint_switches[id]->getThrottles();
+}
+
+void SimpleNetwork::printStats(ostream& out) const
+{
+ out << endl;
+ out << "Network Stats" << endl;
+ out << "-------------" << endl;
+ out << endl;
+ for(int i=0; i<m_switch_ptr_vector.size(); i++) {
+ m_switch_ptr_vector[i]->printStats(out);
+ }
+}
+
+void SimpleNetwork::clearStats()
+{
+ for(int i=0; i<m_switch_ptr_vector.size(); i++) {
+ m_switch_ptr_vector[i]->clearStats();
+ }
+}
+
+void SimpleNetwork::printConfig(ostream& out) const
+{
+ out << endl;
+ out << "Network Configuration" << endl;
+ out << "---------------------" << endl;
+ out << "network: SIMPLE_NETWORK" << endl;
+ out << "topology: " << g_NETWORK_TOPOLOGY << endl;
+ out << endl;
+
+ for (int i = 0; i < m_virtual_networks; i++) {
+ out << "virtual_net_" << i << ": ";
+ if (m_in_use[i]) {
+ out << "active, ";
+ if (m_ordered[i]) {
+ out << "ordered" << endl;
+ } else {
+ out << "unordered" << endl;
+ }
+ } else {
+ out << "inactive" << endl;
+ }
+ }
+ out << endl;
+ for(int i=0; i<m_switch_ptr_vector.size(); i++) {
+ m_switch_ptr_vector[i]->printConfig(out);
+ }
+
+ if (g_PRINT_TOPOLOGY) {
+ m_topology_ptr->printConfig(out);
+ }
+}
+
+void SimpleNetwork::print(ostream& out) const
+{
+ out << "[SimpleNetwork]";
+}
diff --git a/src/mem/ruby/network/simple/SimpleNetwork.hh b/src/mem/ruby/network/simple/SimpleNetwork.hh
new file mode 100644
index 000000000..a28904227
--- /dev/null
+++ b/src/mem/ruby/network/simple/SimpleNetwork.hh
@@ -0,0 +1,157 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * SimpleNetwork.h
+ *
+ * Description: The SimpleNetwork class implements the interconnection
+ * SimpleNetwork between components (processor/cache components and
+ * memory/directory components). The interconnection network as
+ * described here is not a physical network, but a programming concept
+ * used to implement all communication between components. Thus parts
+ * of this 'network' may model the on-chip connections between cache
+ * controllers and directory controllers as well as the links between
+ * chip and network switches.
+ *
+ * Two conceptual networks, an address and data network, are modeled.
+ * The data network is unordered, where the address network provides
+ * and conforms to a global ordering of all transactions.
+ *
+ * Currently the data network is point-to-point and the address
+ * network is a broadcast network. These two distinct conceptual
+ * network can be modeled as physically separate networks or
+ * multiplexed over a single physical network.
+ *
+ * The network encapsulates all notion of virtual global time and is
+ * responsible for ordering the network transactions received. This
+ * hides all of these ordering details from the processor/cache and
+ * directory/memory modules.
+ *
+ * FIXME: Various flavor of networks are provided as a compiler time
+ * configurable. We currently include this SimpleNetwork in the
+ * makefile's vpath, so that SimpleNetwork.C can provide an alternative
+ * version constructor for the abstract Network class. It is easy to
+ * modify this to make network a runtime configuable. Just make the
+ * abstract Network class take a enumeration parameter, and based on
+ * that to initial proper network. Or even better, just make the ruby
+ * system initializer choose the proper network to initiate.
+ *
+ * $Id$
+ *
+ */
+
+#ifndef SIMPLENETWORK_H
+#define SIMPLENETWORK_H
+
+#include "Global.hh"
+#include "Vector.hh"
+#include "Network.hh"
+#include "NodeID.hh"
+
+class NetDest;
+class MessageBuffer;
+class Throttle;
+class Switch;
+class Topology;
+
+class SimpleNetwork : public Network {
+public:
+ // Constructors
+ SimpleNetwork(int nodes);
+
+ // Destructor
+ ~SimpleNetwork();
+
+ // Public Methods
+ void printStats(ostream& out) const;
+ void clearStats();
+ void printConfig(ostream& out) const;
+
+ void reset();
+
+ // returns the queue requested for the given component
+ MessageBuffer* getToNetQueue(NodeID id, bool ordered, int network_num);
+ MessageBuffer* getFromNetQueue(NodeID id, bool ordered, int network_num);
+ virtual const Vector<Throttle*>* getThrottles(NodeID id) const;
+
+ bool isVNetOrdered(int vnet) { return m_ordered[vnet]; }
+ bool validVirtualNetwork(int vnet) { return m_in_use[vnet]; }
+
+ int getNumNodes() {return m_nodes; }
+
+ // Methods used by Topology to setup the network
+ void makeOutLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration);
+ void makeInLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int bw_multiplier, bool isReconfiguration);
+ void makeInternalLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration);
+
+ void print(ostream& out) const;
+private:
+ void checkNetworkAllocation(NodeID id, bool ordered, int network_num);
+ void addLink(SwitchID src, SwitchID dest, int link_latency);
+ void makeLink(SwitchID src, SwitchID dest, const NetDest& routing_table_entry, int link_latency);
+ SwitchID createSwitch();
+ void makeTopology();
+ void linkTopology();
+
+
+ // Private copy constructor and assignment operator
+ SimpleNetwork(const SimpleNetwork& obj);
+ SimpleNetwork& operator=(const SimpleNetwork& obj);
+
+ // Data Members (m_ prefix)
+
+ // vector of queues from the components
+ Vector<Vector<MessageBuffer*> > m_toNetQueues;
+ Vector<Vector<MessageBuffer*> > m_fromNetQueues;
+
+ int m_nodes;
+ int m_virtual_networks;
+ Vector<bool> m_in_use;
+ Vector<bool> m_ordered;
+ Vector<Switch*> m_switch_ptr_vector;
+ Vector<MessageBuffer*> m_buffers_to_free;
+ Vector<Switch*> m_endpoint_switches;
+ Topology* m_topology_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const SimpleNetwork& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const SimpleNetwork& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //SIMPLENETWORK_H
diff --git a/src/mem/ruby/network/simple/Switch.cc b/src/mem/ruby/network/simple/Switch.cc
new file mode 100644
index 000000000..3b55d156f
--- /dev/null
+++ b/src/mem/ruby/network/simple/Switch.cc
@@ -0,0 +1,205 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Switch.C
+ *
+ * Description: See Switch.h
+ *
+ * $Id$
+ *
+ */
+
+
+#include "Switch.hh"
+#include "PerfectSwitch.hh"
+#include "MessageBuffer.hh"
+#include "Throttle.hh"
+#include "MessageSizeType.hh"
+#include "Network.hh"
+#include "Protocol.hh"
+
+Switch::Switch(SwitchID sid, SimpleNetwork* network_ptr)
+{
+ m_perfect_switch_ptr = new PerfectSwitch(sid, network_ptr);
+ m_switch_id = sid;
+ m_throttles.setSize(0);
+}
+
+Switch::~Switch()
+{
+ delete m_perfect_switch_ptr;
+
+ // Delete throttles (one per output port)
+ m_throttles.deletePointers();
+
+ // Delete MessageBuffers
+ m_buffers_to_free.deletePointers();
+}
+
+void Switch::addInPort(const Vector<MessageBuffer*>& in)
+{
+ m_perfect_switch_ptr->addInPort(in);
+}
+
+void Switch::addOutPort(const Vector<MessageBuffer*>& out, const NetDest& routing_table_entry, int link_latency, int bw_multiplier)
+{
+ Throttle* throttle_ptr = NULL;
+
+ // Create a throttle
+ throttle_ptr = new Throttle(m_switch_id, m_throttles.size(), link_latency, bw_multiplier);
+ m_throttles.insertAtBottom(throttle_ptr);
+
+ // Create one buffer per vnet (these are intermediaryQueues)
+ Vector<MessageBuffer*> intermediateBuffers;
+ for (int i=0; i<out.size(); i++) {
+ MessageBuffer* buffer_ptr = new MessageBuffer;
+ // Make these queues ordered
+ buffer_ptr->setOrdering(true);
+ if(FINITE_BUFFERING) {
+ buffer_ptr->setSize(FINITE_BUFFER_SIZE);
+ }
+ intermediateBuffers.insertAtBottom(buffer_ptr);
+ m_buffers_to_free.insertAtBottom(buffer_ptr);
+ }
+
+ // Hook the queues to the PerfectSwitch
+ m_perfect_switch_ptr->addOutPort(intermediateBuffers, routing_table_entry);
+
+ // Hook the queues to the Throttle
+ throttle_ptr->addLinks(intermediateBuffers, out);
+
+}
+
+void Switch::clearRoutingTables()
+{
+ m_perfect_switch_ptr->clearRoutingTables();
+}
+
+void Switch::clearBuffers()
+{
+ m_perfect_switch_ptr->clearBuffers();
+ for (int i=0; i<m_throttles.size(); i++) {
+ if (m_throttles[i] != NULL) {
+ m_throttles[i]->clear();
+ }
+ }
+}
+
+void Switch::reconfigureOutPort(const NetDest& routing_table_entry)
+{
+ m_perfect_switch_ptr->reconfigureOutPort(routing_table_entry);
+}
+
+const Throttle* Switch::getThrottle(LinkID link_number) const
+{
+ assert(m_throttles[link_number] != NULL);
+ return m_throttles[link_number];
+}
+
+const Vector<Throttle*>* Switch::getThrottles() const
+{
+ return &m_throttles;
+}
+
+void Switch::printStats(ostream& out) const
+{
+ out << "switch_" << m_switch_id << "_inlinks: " << m_perfect_switch_ptr->getInLinks() << endl;
+ out << "switch_" << m_switch_id << "_outlinks: " << m_perfect_switch_ptr->getOutLinks() << endl;
+
+ // Average link utilizations
+ double average_utilization = 0.0;
+ int throttle_count = 0;
+
+ for (int i=0; i<m_throttles.size(); i++) {
+ Throttle* throttle_ptr = m_throttles[i];
+ if (throttle_ptr != NULL) {
+ average_utilization += throttle_ptr->getUtilization();
+ throttle_count++;
+ }
+ }
+ average_utilization = (throttle_count == 0) ? 0 : average_utilization / float(throttle_count);
+
+ // Individual link utilizations
+ out << "links_utilized_percent_switch_" << m_switch_id << ": " << average_utilization << endl;
+ for (int link=0; link<m_throttles.size(); link++) {
+ Throttle* throttle_ptr = m_throttles[link];
+ if (throttle_ptr != NULL) {
+ out << " links_utilized_percent_switch_" << m_switch_id << "_link_" << link << ": "
+ << throttle_ptr->getUtilization() << " bw: " << throttle_ptr->getLinkBandwidth()
+ << " base_latency: " << throttle_ptr->getLatency() << endl;
+ }
+ }
+ out << endl;
+
+ // Traffic breakdown
+ for (int link=0; link<m_throttles.size(); link++) {
+ Throttle* throttle_ptr = m_throttles[link];
+ if (throttle_ptr != NULL) {
+ const Vector<Vector<int> >& message_counts = throttle_ptr->getCounters();
+ for (int int_type=0; int_type<MessageSizeType_NUM; int_type++) {
+ MessageSizeType type = MessageSizeType(int_type);
+ int sum = message_counts[type].sum();
+ if (sum != 0) {
+ out << " outgoing_messages_switch_" << m_switch_id << "_link_" << link << "_" << type
+ << ": " << sum << " " << sum * MessageSizeType_to_int(type)
+ << " " << message_counts[type] << " base_latency: " << throttle_ptr->getLatency() << endl;
+ }
+ }
+ }
+ }
+ out << endl;
+}
+
+void Switch::clearStats()
+{
+ m_perfect_switch_ptr->clearStats();
+ for (int i=0; i<m_throttles.size(); i++) {
+ if (m_throttles[i] != NULL) {
+ m_throttles[i]->clearStats();
+ }
+ }
+}
+
+void Switch::printConfig(ostream& out) const
+{
+ m_perfect_switch_ptr->printConfig(out);
+ for (int i=0; i<m_throttles.size(); i++) {
+ if (m_throttles[i] != NULL) {
+ m_throttles[i]->printConfig(out);
+ }
+ }
+}
+
+void Switch::print(ostream& out) const
+{
+ // FIXME printing
+ out << "[Switch]";
+}
+
diff --git a/src/mem/ruby/network/simple/Switch.hh b/src/mem/ruby/network/simple/Switch.hh
new file mode 100644
index 000000000..a408155c0
--- /dev/null
+++ b/src/mem/ruby/network/simple/Switch.hh
@@ -0,0 +1,105 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: The actual modelled switch. It use the perfect switch and a
+ * Throttle object to control and bandwidth and timing *only for
+ * the output port*. So here we have un-realistic modelling,
+ * since the order of PerfectSwitch and Throttle objects get
+ * woke up affect the message timing. A more accurate model would
+ * be having two set of system states, one for this cycle, one for
+ * next cycle. And on the cycle boundary swap the two set of
+ * states.
+ *
+ */
+
+#ifndef Switch_H
+#define Switch_H
+
+#include "Global.hh"
+#include "Vector.hh"
+
+class MessageBuffer;
+class PerfectSwitch;
+class NetDest;
+class SimpleNetwork;
+class Throttle;
+
+class Switch {
+public:
+ // Constructors
+
+ // constructor specifying the number of ports
+ Switch(SwitchID sid, SimpleNetwork* network_ptr);
+ void addInPort(const Vector<MessageBuffer*>& in);
+ void addOutPort(const Vector<MessageBuffer*>& out, const NetDest& routing_table_entry, int link_latency, int bw_multiplier);
+ const Throttle* getThrottle(LinkID link_number) const;
+ const Vector<Throttle*>* getThrottles() const;
+ void clearRoutingTables();
+ void clearBuffers();
+ void reconfigureOutPort(const NetDest& routing_table_entry);
+
+ void printStats(ostream& out) const;
+ void clearStats();
+ void printConfig(ostream& out) const;
+
+ // Destructor
+ ~Switch();
+
+ void print(ostream& out) const;
+private:
+
+ // Private copy constructor and assignment operator
+ Switch(const Switch& obj);
+ Switch& operator=(const Switch& obj);
+
+ // Data Members (m_ prefix)
+ PerfectSwitch* m_perfect_switch_ptr;
+ Vector<Throttle*> m_throttles;
+ Vector<MessageBuffer*> m_buffers_to_free;
+ SwitchID m_switch_id;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Switch& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Switch& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //Switch_H
diff --git a/src/mem/ruby/network/simple/Throttle.cc b/src/mem/ruby/network/simple/Throttle.cc
new file mode 100644
index 000000000..2f6e68afd
--- /dev/null
+++ b/src/mem/ruby/network/simple/Throttle.cc
@@ -0,0 +1,291 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: see Throttle.h
+ *
+ */
+
+#include "Throttle.hh"
+#include "MessageBuffer.hh"
+#include "Network.hh"
+#include "System.hh"
+#include "NetworkMessage.hh"
+#include "Protocol.hh"
+
+const int HIGH_RANGE = 256;
+const int ADJUST_INTERVAL = 50000;
+const int MESSAGE_SIZE_MULTIPLIER = 1000;
+//const int BROADCAST_SCALING = 4; // Have a 16p system act like a 64p systems
+const int BROADCAST_SCALING = 1;
+const int PRIORITY_SWITCH_LIMIT = 128;
+
+static int network_message_to_size(NetworkMessage* net_msg_ptr);
+
+extern std::ostream * debug_cout_ptr;
+
+Throttle::Throttle(int sID, NodeID node, int link_latency, int link_bandwidth_multiplier)
+{
+ init(node, link_latency, link_bandwidth_multiplier);
+ m_sID = sID;
+}
+
+Throttle::Throttle(NodeID node, int link_latency, int link_bandwidth_multiplier)
+{
+ init(node, link_latency, link_bandwidth_multiplier);
+ m_sID = 0;
+}
+
+void Throttle::init(NodeID node, int link_latency, int link_bandwidth_multiplier)
+{
+ m_node = node;
+ m_vnets = 0;
+
+ ASSERT(link_bandwidth_multiplier > 0);
+ m_link_bandwidth_multiplier = link_bandwidth_multiplier;
+ m_link_latency = link_latency;
+
+ m_bash_counter = HIGH_RANGE;
+ m_bandwidth_since_sample = 0;
+ m_last_bandwidth_sample = 0;
+ m_wakeups_wo_switch = 0;
+ clearStats();
+}
+
+void Throttle::clear()
+{
+ for (int counter = 0; counter < m_vnets; counter++) {
+ m_in[counter]->clear();
+ m_out[counter]->clear();
+ }
+}
+
+void Throttle::addLinks(const Vector<MessageBuffer*>& in_vec, const Vector<MessageBuffer*>& out_vec)
+{
+ assert(in_vec.size() == out_vec.size());
+ for (int i=0; i<in_vec.size(); i++) {
+ addVirtualNetwork(in_vec[i], out_vec[i]);
+ }
+
+ m_message_counters.setSize(MessageSizeType_NUM);
+ for (int i=0; i<MessageSizeType_NUM; i++) {
+ m_message_counters[i].setSize(in_vec.size());
+ for (int j=0; j<m_message_counters[i].size(); j++) {
+ m_message_counters[i][j] = 0;
+ }
+ }
+
+ if (g_PRINT_TOPOLOGY) {
+ m_out_link_vec.insertAtBottom(out_vec);
+ }
+}
+
+void Throttle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr)
+{
+ m_units_remaining.insertAtBottom(0);
+ m_in.insertAtBottom(in_ptr);
+ m_out.insertAtBottom(out_ptr);
+
+ // Set consumer and description
+ m_in[m_vnets]->setConsumer(this);
+ string desc = "[Queue to Throttle " + NodeIDToString(m_sID) + " " + NodeIDToString(m_node) + "]";
+ m_in[m_vnets]->setDescription(desc);
+ m_vnets++;
+}
+
+void Throttle::wakeup()
+{
+ // Limits the number of message sent to a limited number of bytes/cycle.
+ assert(getLinkBandwidth() > 0);
+ int bw_remaining = getLinkBandwidth();
+
+ // Give the highest numbered link priority most of the time
+ m_wakeups_wo_switch++;
+ int highest_prio_vnet = m_vnets-1;
+ int lowest_prio_vnet = 0;
+ int counter = 1;
+ bool schedule_wakeup = false;
+
+ // invert priorities to avoid starvation seen in the component network
+ if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
+ m_wakeups_wo_switch = 0;
+ highest_prio_vnet = 0;
+ lowest_prio_vnet = m_vnets-1;
+ counter = -1;
+ }
+
+ for (int vnet = highest_prio_vnet; (vnet*counter) >= (counter*lowest_prio_vnet); vnet -= counter) {
+
+ assert(m_out[vnet] != NULL);
+ assert(m_in[vnet] != NULL);
+ assert(m_units_remaining[vnet] >= 0);
+
+ while ((bw_remaining > 0) && ((m_in[vnet]->isReady()) || (m_units_remaining[vnet] > 0)) && m_out[vnet]->areNSlotsAvailable(1)) {
+
+ // See if we are done transferring the previous message on this virtual network
+ if (m_units_remaining[vnet] == 0 && m_in[vnet]->isReady()) {
+
+ // Find the size of the message we are moving
+ MsgPtr msg_ptr = m_in[vnet]->peekMsgPtr();
+ NetworkMessage* net_msg_ptr = dynamic_cast<NetworkMessage*>(msg_ptr.ref());
+ m_units_remaining[vnet] += network_message_to_size(net_msg_ptr);
+
+ DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
+ DEBUG_MSG(NETWORK_COMP,HighPrio,"throttle: " + int_to_string(m_node)
+ + " my bw " + int_to_string(getLinkBandwidth())
+ + " bw spent enqueueing net msg " + int_to_string(m_units_remaining[vnet])
+ + " time: " + int_to_string(g_eventQueue_ptr->getTime()) + ".");
+
+ // Move the message
+ m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency);
+ m_in[vnet]->pop();
+
+ // Count the message
+ m_message_counters[net_msg_ptr->getMessageSize()][vnet]++;
+
+ DEBUG_MSG(NETWORK_COMP,LowPrio,*m_out[vnet]);
+ DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
+ }
+
+ // Calculate the amount of bandwidth we spent on this message
+ int diff = m_units_remaining[vnet] - bw_remaining;
+ m_units_remaining[vnet] = max(0, diff);
+ bw_remaining = max(0, -diff);
+ }
+
+ if ((bw_remaining > 0) && ((m_in[vnet]->isReady()) || (m_units_remaining[vnet] > 0)) && !m_out[vnet]->areNSlotsAvailable(1)) {
+ DEBUG_MSG(NETWORK_COMP,LowPrio,vnet);
+ schedule_wakeup = true; // schedule me to wakeup again because I'm waiting for my output queue to become available
+ }
+ }
+
+ // We should only wake up when we use the bandwidth
+ // assert(bw_remaining != getLinkBandwidth()); // This is only mostly true
+
+ // Record that we used some or all of the link bandwidth this cycle
+ double ratio = 1.0-(double(bw_remaining)/double(getLinkBandwidth()));
+ // If ratio = 0, we used no bandwidth, if ratio = 1, we used all
+ linkUtilized(ratio);
+
+ // Sample the link bandwidth utilization over a number of cycles
+ int bw_used = getLinkBandwidth()-bw_remaining;
+ m_bandwidth_since_sample += bw_used;
+
+ // FIXME - comment out the bash specific code for faster performance
+ // Start Bash code
+ // Update the predictor
+ Time current_time = g_eventQueue_ptr->getTime();
+ while ((current_time - m_last_bandwidth_sample) > ADJUST_INTERVAL) {
+ double utilization = m_bandwidth_since_sample/double(ADJUST_INTERVAL * getLinkBandwidth());
+
+ if (utilization > g_bash_bandwidth_adaptive_threshold) {
+ // Used more bandwidth
+ m_bash_counter++;
+ } else {
+ // Used less bandwidth
+ m_bash_counter--;
+ }
+
+ // Make sure we don't overflow
+ m_bash_counter = min(HIGH_RANGE, m_bash_counter);
+ m_bash_counter = max(0, m_bash_counter);
+
+ // Reset samples
+ m_last_bandwidth_sample += ADJUST_INTERVAL;
+ m_bandwidth_since_sample = 0;
+ }
+ // End Bash code
+
+ if ((bw_remaining > 0) && !schedule_wakeup) {
+ // We have extra bandwidth and our output buffer was available, so we must not have anything else to do until another message arrives.
+ DEBUG_MSG(NETWORK_COMP,LowPrio,*this);
+ DEBUG_MSG(NETWORK_COMP,LowPrio,"not scheduled again");
+ } else {
+ DEBUG_MSG(NETWORK_COMP,LowPrio,*this);
+ DEBUG_MSG(NETWORK_COMP,LowPrio,"scheduled again");
+ // We are out of bandwidth for this cycle, so wakeup next cycle and continue
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ }
+}
+
+bool Throttle::broadcastBandwidthAvailable(int rand) const
+{
+ bool result = !(m_bash_counter > ((HIGH_RANGE/4) + (rand % (HIGH_RANGE/2))));
+ return result;
+}
+
+void Throttle::printStats(ostream& out) const
+{
+ out << "utilized_percent: " << getUtilization() << endl;
+}
+
+void Throttle::clearStats()
+{
+ m_ruby_start = g_eventQueue_ptr->getTime();
+ m_links_utilized = 0.0;
+
+ for (int i=0; i<m_message_counters.size(); i++) {
+ for (int j=0; j<m_message_counters[i].size(); j++) {
+ m_message_counters[i][j] = 0;
+ }
+ }
+}
+
+void Throttle::printConfig(ostream& out) const
+{
+
+}
+
+double Throttle::getUtilization() const
+{
+ return (100.0 * double(m_links_utilized)) / (double(g_eventQueue_ptr->getTime()-m_ruby_start));
+}
+
+void Throttle::print(ostream& out) const
+{
+ out << "[Throttle: " << m_sID << " " << m_node << " bw: " << getLinkBandwidth() << "]";
+}
+
+// Helper function
+
+static
+int network_message_to_size(NetworkMessage* net_msg_ptr)
+{
+ assert(net_msg_ptr != NULL);
+
+ // Artificially increase the size of broadcast messages
+ if (BROADCAST_SCALING > 1) {
+ if (net_msg_ptr->getDestination().isBroadcast()) {
+ return (MessageSizeType_to_int(net_msg_ptr->getMessageSize()) * MESSAGE_SIZE_MULTIPLIER * BROADCAST_SCALING);
+ }
+ }
+ return (MessageSizeType_to_int(net_msg_ptr->getMessageSize()) * MESSAGE_SIZE_MULTIPLIER);
+}
diff --git a/src/mem/ruby/network/simple/Throttle.hh b/src/mem/ruby/network/simple/Throttle.hh
new file mode 100644
index 000000000..67cfabcdc
--- /dev/null
+++ b/src/mem/ruby/network/simple/Throttle.hh
@@ -0,0 +1,124 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: The class to implement bandwidth and latency throttle. An
+ * instance of consumer class that can be woke up. It is only used
+ * to control bandwidth at output port of a switch. And the
+ * throttle is added *after* the output port, means the message is
+ * put in the output port of the PerfectSwitch (a
+ * intermediateBuffers) first, then go through the Throttle.
+ *
+ */
+
+#ifndef THROTTLE_H
+#define THROTTLE_H
+
+#include "Global.hh"
+#include "Vector.hh"
+#include "Consumer.hh"
+#include "NodeID.hh"
+#include "RubyConfig.hh"
+
+class MessageBuffer;
+
+class Throttle : public Consumer {
+public:
+ // Constructors
+ Throttle(int sID, NodeID node, int link_latency, int link_bandwidth_multiplier);
+ Throttle(NodeID node, int link_latency, int link_bandwidth_multiplier);
+
+ // Destructor
+ ~Throttle() {}
+
+ // Public Methods
+ void addLinks(const Vector<MessageBuffer*>& in_vec, const Vector<MessageBuffer*>& out_vec);
+ void wakeup();
+ bool broadcastBandwidthAvailable(int rand) const;
+
+ void printStats(ostream& out) const;
+ void clearStats();
+ void printConfig(ostream& out) const;
+ double getUtilization() const; // The average utilization (a percent) since last clearStats()
+ int getLinkBandwidth() const { return g_endpoint_bandwidth * m_link_bandwidth_multiplier; }
+ int getLatency() const { return m_link_latency; }
+
+ const Vector<Vector<int> >& getCounters() const { return m_message_counters; }
+
+ void clear();
+
+ void print(ostream& out) const;
+
+private:
+ // Private Methods
+ void init(NodeID node, int link_latency, int link_bandwidth_multiplier);
+ void addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr);
+ void linkUtilized(double ratio) { m_links_utilized += ratio; }
+
+ // Private copy constructor and assignment operator
+ Throttle(const Throttle& obj);
+ Throttle& operator=(const Throttle& obj);
+
+ // Data Members (m_ prefix)
+ Vector<MessageBuffer*> m_in;
+ Vector<MessageBuffer*> m_out;
+ Vector<Vector<int> > m_message_counters;
+ int m_vnets;
+ Vector<int> m_units_remaining;
+ int m_sID;
+ NodeID m_node;
+ int m_bash_counter;
+ int m_bandwidth_since_sample;
+ Time m_last_bandwidth_sample;
+ int m_link_bandwidth_multiplier;
+ int m_link_latency;
+ int m_wakeups_wo_switch;
+
+ // For tracking utilization
+ Time m_ruby_start;
+ double m_links_utilized;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Throttle& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Throttle& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //THROTTLE_H
diff --git a/src/mem/ruby/network/simple/Topology.cc b/src/mem/ruby/network/simple/Topology.cc
new file mode 100644
index 000000000..db886052f
--- /dev/null
+++ b/src/mem/ruby/network/simple/Topology.cc
@@ -0,0 +1,801 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Topology.C
+ *
+ * Description: See Topology.h
+ *
+ * $Id$
+ *
+ * */
+
+#include "Topology.hh"
+#include "NetDest.hh"
+#include "Network.hh"
+#include "TopologyType.hh"
+#include "RubyConfig.hh"
+#include "util.hh"
+#include "MachineType.hh"
+#include "Protocol.hh"
+#include <string>
+
+static const int INFINITE_LATENCY = 10000; // Yes, this is a big hack
+static const int DEFAULT_BW_MULTIPLIER = 1; // Just to be consistent with above :)
+
+// Note: In this file, we use the first 2*m_nodes SwitchIDs to
+// represent the input and output endpoint links. These really are
+// not 'switches', as they will not have a Switch object allocated for
+// them. The first m_nodes SwitchIDs are the links into the network,
+// the second m_nodes set of SwitchIDs represent the the output queues
+// of the network.
+
+// Helper functions based on chapter 29 of Cormen et al.
+static Matrix extend_shortest_path(const Matrix& current_dist, Matrix& latencies, Matrix& inter_switches);
+static Matrix shortest_path(const Matrix& weights, Matrix& latencies, Matrix& inter_switches);
+static bool link_is_shortest_path_to_node(SwitchID src, SwitchID next, SwitchID final, const Matrix& weights, const Matrix& dist);
+static NetDest shortest_path_to_node(SwitchID src, SwitchID next, const Matrix& weights, const Matrix& dist);
+
+
+Topology::Topology(Network* network_ptr, int number_of_nodes)
+{
+ m_network_ptr = network_ptr;
+ m_nodes = number_of_nodes;
+ m_number_of_switches = 0;
+ init();
+}
+
+void Topology::init()
+{
+ if (m_nodes == 1) {
+ SwitchID id = newSwitchID();
+ addLink(0, id, NETWORK_LINK_LATENCY);
+ addLink(id, 1, NETWORK_LINK_LATENCY);
+ return;
+ }
+
+ // topology-specific set-up
+ TopologyType topology = string_to_TopologyType(g_NETWORK_TOPOLOGY);
+ switch (topology) {
+ case TopologyType_TORUS_2D:
+ make2DTorus();
+ break;
+ case TopologyType_HIERARCHICAL_SWITCH:
+ makeHierarchicalSwitch(FAN_OUT_DEGREE);
+ break;
+ case TopologyType_CROSSBAR:
+ makeHierarchicalSwitch(1024);
+ break;
+ case TopologyType_PT_TO_PT:
+ makePtToPt();
+ break;
+ case TopologyType_FILE_SPECIFIED:
+ makeFileSpecified();
+ break;
+ default:
+ ERROR_MSG("Unexpected typology type")
+ }
+
+ // initialize component latencies record
+ m_component_latencies.setSize(0);
+ m_component_inter_switches.setSize(0);
+}
+
+void Topology::makeSwitchesPerChip(Vector< Vector < SwitchID > > &nodePairs, Vector<int> &latencies, Vector<int> &bw_multis, int numberOfChipSwitches)
+{
+
+ Vector < SwitchID > nodes; // temporary buffer
+ nodes.setSize(2);
+
+ Vector<bool> endpointConnectionExist; // used to ensure all endpoints are connected to the network
+ endpointConnectionExist.setSize(m_nodes);
+ // initialize endpoint check vector
+ for (int k = 0; k < endpointConnectionExist.size(); k++) {
+ endpointConnectionExist[k] = false;
+ }
+
+ Vector<int> componentCount;
+ componentCount.setSize(MachineType_NUM);
+ for (MachineType mType = MachineType_FIRST; mType < MachineType_NUM; ++mType) {
+ componentCount[mType] = 0;
+ }
+
+ // components to/from network links
+ for (int chip = 0; chip < RubyConfig::numberOfChips(); chip++) {
+ for (MachineType mType = MachineType_FIRST; mType < MachineType_NUM; ++mType) {
+ for (int component = 0; component < MachineType_chip_count(mType, chip); component++) {
+
+ int latency = -1;
+ int bw_multiplier = -1; // internal link bw multiplier of the global bandwidth
+ if (mType != MachineType_Directory) {
+ latency = ON_CHIP_LINK_LATENCY; // internal link latency
+ bw_multiplier = 10; // internal link bw multiplier of the global bandwidth
+ } else {
+ latency = NETWORK_LINK_LATENCY; // local memory latency
+ bw_multiplier = 1; // local memory link bw multiplier of the global bandwidth
+ }
+ nodes[0] = MachineType_base_number(mType)+componentCount[mType];
+ nodes[1] = chip+m_nodes*2; // this is the chip's internal switch id #
+
+ // insert link
+ nodePairs.insertAtBottom(nodes);
+ latencies.insertAtBottom(latency);
+ //bw_multis.insertAtBottom(bw_multiplier);
+ bw_multis.insertAtBottom(componentCount[mType]+MachineType_base_number((MachineType)mType));
+
+ // opposite direction link
+ Vector < SwitchID > otherDirectionNodes;
+ otherDirectionNodes.setSize(2);
+ otherDirectionNodes[0] = nodes[1];
+ otherDirectionNodes[1] = nodes[0]+m_nodes;
+ nodePairs.insertAtBottom(otherDirectionNodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+
+ assert(!endpointConnectionExist[nodes[0]]);
+ endpointConnectionExist[nodes[0]] = true;
+ componentCount[mType]++;
+ }
+ }
+ }
+
+ // make sure all enpoints are connected in the soon to be created network
+ for (int k = 0; k < endpointConnectionExist.size(); k++) {
+ if (endpointConnectionExist[k] == false) {
+ cerr << "Error: Unconnected Endpoint: " << k << endl;
+ exit(1);
+ }
+ }
+
+ // secondary check to ensure we saw the correct machine counts
+ for (MachineType mType = MachineType_FIRST; mType < MachineType_NUM; ++mType) {
+ assert(componentCount[mType] == MachineType_base_count((MachineType)mType));
+ }
+
+}
+
+// 2D torus topology
+
+void Topology::make2DTorus()
+{
+ Vector< Vector < SwitchID > > nodePairs; // node pairs extracted from the file
+ Vector<int> latencies; // link latencies for each link extracted
+ Vector<int> bw_multis; // bw multipliers for each link extracted
+
+ Vector < SwitchID > nodes; // temporary buffer
+ nodes.setSize(2);
+
+ // number of inter-chip switches
+ int numberOfTorusSwitches = m_nodes/MachineType_base_level(MachineType_NUM);
+ // one switch per machine node grouping
+ Vector<SwitchID> torusSwitches;
+ for(int i=0; i<numberOfTorusSwitches; i++){
+ SwitchID new_switch = newSwitchID();
+ torusSwitches.insertAtBottom(new_switch);
+ }
+
+ makeSwitchesPerChip(nodePairs, latencies, bw_multis, numberOfTorusSwitches);
+
+ int lengthOfSide = (int)sqrt((double)numberOfTorusSwitches);
+
+ // Now connect the inter-chip torus links
+
+ int latency = NETWORK_LINK_LATENCY; // external link latency
+ int bw_multiplier = 1; // external link bw multiplier of the global bandwidth
+
+ for(int i=0; i<numberOfTorusSwitches; i++){
+ nodes[0] = torusSwitches[i]; // current switch
+
+ // left
+ if(nodes[0]%lengthOfSide == 0){ // determine left neighbor
+ nodes[1] = nodes[0] - 1 + lengthOfSide;
+ } else {
+ nodes[1] = nodes[0] - 1;
+ }
+ nodePairs.insertAtBottom(nodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+
+ // right
+ if((nodes[0] + 1)%lengthOfSide == 0){ // determine right neighbor
+ nodes[1] = nodes[0] + 1 - lengthOfSide;
+ } else {
+ nodes[1] = nodes[0] + 1;
+ }
+ nodePairs.insertAtBottom(nodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+
+ // top
+ if(nodes[0] - lengthOfSide < 2*m_nodes){ // determine if node is on the top
+ nodes[1] = nodes[0] - lengthOfSide + (lengthOfSide*lengthOfSide);
+ } else {
+ nodes[1] = nodes[0] - lengthOfSide;
+ }
+ nodePairs.insertAtBottom(nodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+
+ // bottom
+ if(nodes[0] + lengthOfSide >= 2*m_nodes+numberOfTorusSwitches){ // determine if node is on the bottom
+ // sorin: bad bug if this is a > instead of a >=
+ nodes[1] = nodes[0] + lengthOfSide - (lengthOfSide*lengthOfSide);
+ } else {
+ nodes[1] = nodes[0] + lengthOfSide;
+ }
+ nodePairs.insertAtBottom(nodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+
+ }
+
+ // add links
+ ASSERT(nodePairs.size() == latencies.size() && latencies.size() == bw_multis.size())
+ for (int k = 0; k < nodePairs.size(); k++) {
+ ASSERT(nodePairs[k].size() == 2);
+ addLink(nodePairs[k][0], nodePairs[k][1], latencies[k], bw_multis[k]);
+ }
+
+}
+
+// hierarchical switch topology
+void Topology::makeHierarchicalSwitch(int fan_out_degree)
+{
+ // Make a row of switches with only one input. This extra row makes
+ // sure the links out of the nodes have latency and limited
+ // bandwidth.
+
+ // number of inter-chip switches, i.e. the last row of switches
+ Vector<SwitchID> last_level;
+ for (int i=0; i<m_nodes; i++) {
+ SwitchID new_switch = newSwitchID(); // internal switch id #
+ addLink(i, new_switch, NETWORK_LINK_LATENCY);
+ last_level.insertAtBottom(new_switch);
+ }
+
+ // Create Hierarchical Switches
+
+ // start from the bottom level and work up to root
+ Vector<SwitchID> next_level;
+ while(last_level.size() > 1) {
+ for (int i=0; i<last_level.size(); i++) {
+ if ((i % fan_out_degree) == 0) {
+ next_level.insertAtBottom(newSwitchID());
+ }
+ // Add this link to the last switch we created
+ addLink(last_level[i], next_level[next_level.size()-1], NETWORK_LINK_LATENCY);
+ }
+
+ // Make the current level the last level to get ready for next
+ // iteration
+ last_level = next_level;
+ next_level.clear();
+ }
+
+ SwitchID root_switch = last_level[0];
+
+ Vector<SwitchID> out_level;
+ for (int i=0; i<m_nodes; i++) {
+ out_level.insertAtBottom(m_nodes+i);
+ }
+
+ // Build the down network from the endpoints to the root
+ while(out_level.size() != 1) {
+
+ // A level of switches
+ for (int i=0; i<out_level.size(); i++) {
+ if ((i % fan_out_degree) == 0) {
+ if (out_level.size() > fan_out_degree) {
+ next_level.insertAtBottom(newSwitchID());
+ } else {
+ next_level.insertAtBottom(root_switch);
+ }
+ }
+ // Add this link to the last switch we created
+ addLink(next_level[next_level.size()-1], out_level[i], NETWORK_LINK_LATENCY);
+ }
+
+ // Make the current level the last level to get ready for next
+ // iteration
+ out_level = next_level;
+ next_level.clear();
+ }
+}
+
+// one internal node per chip, point to point links between chips
+void Topology::makePtToPt()
+{
+ Vector< Vector < SwitchID > > nodePairs; // node pairs extracted from the file
+ Vector<int> latencies; // link latencies for each link extracted
+ Vector<int> bw_multis; // bw multipliers for each link extracted
+
+ Vector < SwitchID > nodes;
+ nodes.setSize(2);
+
+ // number of inter-chip switches
+ int numberOfChipSwitches = m_nodes/MachineType_base_level(MachineType_NUM);
+ // two switches per machine node grouping
+ // one intra-chip switch and one inter-chip switch per chip
+ for(int i=0; i<numberOfChipSwitches; i++){
+ SwitchID new_switch = newSwitchID();
+ new_switch = newSwitchID();
+ }
+
+ makeSwitchesPerChip(nodePairs, latencies, bw_multis, numberOfChipSwitches);
+
+ // connect intra-chip switch to inter-chip switch
+ for (int chip = 0; chip < RubyConfig::numberOfChips(); chip++) {
+
+ int latency = ON_CHIP_LINK_LATENCY; // internal link latency
+ int bw_multiplier = 10; // external link bw multiplier of the global bandwidth
+
+ nodes[0] = chip+m_nodes*2;
+ nodes[1] = chip+m_nodes*2+RubyConfig::numberOfChips();
+
+ // insert link
+ nodePairs.insertAtBottom(nodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+
+ // opposite direction link
+ Vector < SwitchID > otherDirectionNodes;
+ otherDirectionNodes.setSize(2);
+ otherDirectionNodes[0] = nodes[1];
+ otherDirectionNodes[1] = nodes[0];
+ nodePairs.insertAtBottom(otherDirectionNodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+ }
+
+ // point-to-point network between chips
+ for (int chip = 0; chip < RubyConfig::numberOfChips(); chip++) {
+ for (int other_chip = chip+1; other_chip < RubyConfig::numberOfChips(); other_chip++) {
+
+ int latency = NETWORK_LINK_LATENCY; // external link latency
+ int bw_multiplier = 1; // external link bw multiplier of the global bandwidth
+
+ nodes[0] = chip+m_nodes*2+RubyConfig::numberOfChips();
+ nodes[1] = other_chip+m_nodes*2+RubyConfig::numberOfChips();
+
+ // insert link
+ nodePairs.insertAtBottom(nodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+
+ // opposite direction link
+ Vector < SwitchID > otherDirectionNodes;
+ otherDirectionNodes.setSize(2);
+ otherDirectionNodes[0] = nodes[1];
+ otherDirectionNodes[1] = nodes[0];
+ nodePairs.insertAtBottom(otherDirectionNodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+ }
+ }
+
+ // add links
+ ASSERT(nodePairs.size() == latencies.size() && latencies.size() == bw_multis.size())
+ for (int k = 0; k < nodePairs.size(); k++) {
+ ASSERT(nodePairs[k].size() == 2);
+ addLink(nodePairs[k][0], nodePairs[k][1], latencies[k], bw_multis[k]);
+ }
+}
+
+// make a network as described by the networkFile
+void Topology::makeFileSpecified()
+{
+
+ Vector< Vector < SwitchID > > nodePairs; // node pairs extracted from the file
+ Vector<int> latencies; // link latencies for each link extracted
+ Vector<int> bw_multis; // bw multipliers for each link extracted
+ Vector<int> weights; // link weights used to enfore e-cube deadlock free routing
+ Vector< SwitchID > int_network_switches; // internal switches extracted from the file
+ Vector<bool> endpointConnectionExist; // used to ensure all endpoints are connected to the network
+
+ endpointConnectionExist.setSize(m_nodes);
+
+ // initialize endpoint check vector
+ for (int k = 0; k < endpointConnectionExist.size(); k++) {
+ endpointConnectionExist[k] = false;
+ }
+
+ string filename = "network/simple/Network_Files/";
+ filename = filename+g_CACHE_DESIGN
+ +"_Procs-"+int_to_string(RubyConfig::numberOfProcessors())
+ +"_ProcsPerChip-"+int_to_string(RubyConfig::numberOfProcsPerChip())
+ +"_L2Banks-"+int_to_string(RubyConfig::numberOfL2Cache())
+ +"_Memories-"+int_to_string(RubyConfig::numberOfMemories())
+ +".txt";
+
+ if (g_SIMICS) {
+ filename = "../../../ruby/"+filename;
+ }
+ ifstream networkFile( filename.c_str() , ios::in);
+ if (!networkFile.is_open()) {
+ cerr << "Error: Could not open network file: " << filename << endl;
+ cerr << "Probably no network file exists for " << RubyConfig::numberOfProcessors()
+ << " processors and " << RubyConfig::numberOfProcsPerChip() << " procs per chip " << endl;
+ exit(1);
+ }
+
+ string line = "";
+
+ while (!networkFile.eof()) {
+
+ Vector < SwitchID > nodes;
+ nodes.setSize(2);
+ int latency = -1; // null latency
+ int weight = -1; // null weight
+ int bw_multiplier = DEFAULT_BW_MULTIPLIER; // default multiplier incase the network file doesn't define it
+ int i = 0; // node pair index
+ int varsFound = 0; // number of varsFound on the line
+ int internalNodes = 0; // used to determine if the link is between 2 internal nodes
+ std::getline(networkFile, line, '\n');
+ string varStr = string_split(line, ' ');
+
+ // parse the current line in the file
+ while (varStr != "") {
+ string label = string_split(varStr, ':');
+
+ // valid node labels
+ if (label == "ext_node" || label == "int_node") {
+ ASSERT(i < 2); // one link between 2 switches per line
+ varsFound++;
+ bool isNewIntSwitch = true;
+ if (label == "ext_node") { // input link to node
+ MachineType machine = string_to_MachineType(string_split(varStr, ':'));
+ string nodeStr = string_split(varStr, ':');
+ if (string_split(varStr, ':') == "bank") {
+ nodes[i] = MachineType_base_number(machine)
+ + atoi(nodeStr.c_str())
+ + atoi((string_split(varStr, ':')).c_str())*RubyConfig::numberOfChips();
+ } else {
+ nodes[i] = MachineType_base_number(machine)
+ + atoi(nodeStr.c_str());
+ }
+ // in nodes should be numbered 0 to m_nodes-1
+ ASSERT(nodes[i] >= 0 && nodes[i] < m_nodes);
+ isNewIntSwitch = false;
+ endpointConnectionExist[nodes[i]] = true;
+ }
+ if (label == "int_node") { // interior node
+ nodes[i] = atoi((string_split(varStr, ':')).c_str())+m_nodes*2;
+ // in nodes should be numbered >= m_nodes*2
+ ASSERT(nodes[i] >= m_nodes*2);
+ for (int k = 0; k < int_network_switches.size(); k++) {
+ if (int_network_switches[k] == nodes[i]) {
+ isNewIntSwitch = false;
+ }
+ }
+ if (isNewIntSwitch) { // if internal switch
+ m_number_of_switches++;
+ int_network_switches.insertAtBottom(nodes[i]);
+ }
+ internalNodes++;
+ }
+ i++;
+ } else if (label == "link_latency") {
+ latency = atoi((string_split(varStr, ':')).c_str());
+ varsFound++;
+ } else if (label == "bw_multiplier") { // not necessary, defaults to DEFAULT_BW_MULTIPLIER
+ bw_multiplier = atoi((string_split(varStr, ':')).c_str());
+ } else if (label == "link_weight") { // not necessary, defaults to link_latency
+ weight = atoi((string_split(varStr, ':')).c_str());
+ } else if (label == "processors") {
+ ASSERT(atoi((string_split(varStr, ':')).c_str()) == RubyConfig::numberOfProcessors());
+ } else if (label == "bw_unit") {
+ ASSERT(atoi((string_split(varStr, ':')).c_str()) == g_endpoint_bandwidth);
+ } else if (label == "procs_per_chip") {
+ ASSERT(atoi((string_split(varStr, ':')).c_str()) == RubyConfig::numberOfProcsPerChip());
+ } else if (label == "L2banks") {
+ ASSERT(atoi((string_split(varStr, ':')).c_str()) == RubyConfig::numberOfL2Cache());
+ } else if (label == "memories") {
+ ASSERT(atoi((string_split(varStr, ':')).c_str()) == RubyConfig::numberOfMemories());
+ } else {
+ cerr << "Error: Unexpected Identifier: " << label << endl;
+ exit(1);
+ }
+ varStr = string_split(line, ' ');
+ }
+ if (varsFound == 3) { // all three necessary link variables where found so add the link
+ nodePairs.insertAtBottom(nodes);
+ latencies.insertAtBottom(latency);
+ if (weight != -1) {
+ weights.insertAtBottom(weight);
+ } else {
+ weights.insertAtBottom(latency);
+ }
+ bw_multis.insertAtBottom(bw_multiplier);
+ Vector < SwitchID > otherDirectionNodes;
+ otherDirectionNodes.setSize(2);
+ otherDirectionNodes[0] = nodes[1];
+ if (internalNodes == 2) { // this is an internal link
+ otherDirectionNodes[1] = nodes[0];
+ } else {
+ otherDirectionNodes[1] = nodes[0]+m_nodes;
+ }
+ nodePairs.insertAtBottom(otherDirectionNodes);
+ latencies.insertAtBottom(latency);
+ if (weight != -1) {
+ weights.insertAtBottom(weight);
+ } else {
+ weights.insertAtBottom(latency);
+ }
+ bw_multis.insertAtBottom(bw_multiplier);
+ } else {
+ if (varsFound != 0) { // if this is not a valid link, then no vars should have been found
+ cerr << "Error in line: " << line << endl;
+ exit(1);
+ }
+ }
+ } // end of file
+
+ // makes sure all enpoints are connected in the soon to be created network
+ for (int k = 0; k < endpointConnectionExist.size(); k++) {
+ if (endpointConnectionExist[k] == false) {
+ cerr << "Error: Unconnected Endpoint: " << k << endl;
+ exit(1);
+ }
+ }
+
+ ASSERT(nodePairs.size() == latencies.size() && latencies.size() == bw_multis.size() && latencies.size() == weights.size())
+ for (int k = 0; k < nodePairs.size(); k++) {
+ ASSERT(nodePairs[k].size() == 2);
+ addLink(nodePairs[k][0], nodePairs[k][1], latencies[k], bw_multis[k], weights[k]);
+ }
+
+ networkFile.close();
+}
+
+void Topology::createLinks(bool isReconfiguration)
+{
+ // Find maximum switchID
+
+ SwitchID max_switch_id = 0;
+ for (int i=0; i<m_links_src_vector.size(); i++) {
+ max_switch_id = max(max_switch_id, m_links_src_vector[i]);
+ max_switch_id = max(max_switch_id, m_links_dest_vector[i]);
+ }
+
+ // Initialize weight vector
+ Matrix topology_weights;
+ Matrix topology_latency;
+ Matrix topology_bw_multis;
+ int num_switches = max_switch_id+1;
+ topology_weights.setSize(num_switches);
+ topology_latency.setSize(num_switches);
+ topology_bw_multis.setSize(num_switches);
+ m_component_latencies.setSize(num_switches); // FIXME setting the size of a member variable here is a HACK!
+ m_component_inter_switches.setSize(num_switches); // FIXME setting the size of a member variable here is a HACK!
+ for(int i=0; i<topology_weights.size(); i++) {
+ topology_weights[i].setSize(num_switches);
+ topology_latency[i].setSize(num_switches);
+ topology_bw_multis[i].setSize(num_switches);
+ m_component_latencies[i].setSize(num_switches);
+ m_component_inter_switches[i].setSize(num_switches); // FIXME setting the size of a member variable here is a HACK!
+ for(int j=0; j<topology_weights[i].size(); j++) {
+ topology_weights[i][j] = INFINITE_LATENCY;
+ topology_latency[i][j] = -1; // initialize to an invalid value
+ topology_bw_multis[i][j] = -1; // initialize to an invalid value
+ m_component_latencies[i][j] = -1; // initialize to an invalid value
+ m_component_inter_switches[i][j] = 0; // initially assume direct connections / no intermediate switches between components
+ }
+ }
+
+ // Set identity weights to zero
+ for(int i=0; i<topology_weights.size(); i++) {
+ topology_weights[i][i] = 0;
+ }
+
+ // Fill in the topology weights and bandwidth multipliers
+ for (int i=0; i<m_links_src_vector.size(); i++) {
+ topology_weights[m_links_src_vector[i]][m_links_dest_vector[i]] = m_links_weight_vector[i];
+ topology_latency[m_links_src_vector[i]][m_links_dest_vector[i]] = m_links_latency_vector[i];
+ m_component_latencies[m_links_src_vector[i]][m_links_dest_vector[i]] = m_links_latency_vector[i]; // initialize to latency vector
+ topology_bw_multis[m_links_src_vector[i]][m_links_dest_vector[i]] = m_bw_multiplier_vector[i];
+ }
+
+ // Walk topology and hookup the links
+ Matrix dist = shortest_path(topology_weights, m_component_latencies, m_component_inter_switches);
+ for(int i=0; i<topology_weights.size(); i++) {
+ for(int j=0; j<topology_weights[i].size(); j++) {
+ int weight = topology_weights[i][j];
+ int bw_multiplier = topology_bw_multis[i][j];
+ int latency = topology_latency[i][j];
+ if (weight > 0 && weight != INFINITE_LATENCY) {
+ NetDest destination_set = shortest_path_to_node(i, j, topology_weights, dist);
+ assert(latency != -1);
+ makeLink(i, j, destination_set, latency, weight, bw_multiplier, isReconfiguration);
+ }
+ }
+ }
+}
+
+SwitchID Topology::newSwitchID()
+{
+ m_number_of_switches++;
+ return m_number_of_switches-1+m_nodes+m_nodes;
+}
+
+void Topology::addLink(SwitchID src, SwitchID dest, int link_latency)
+{
+ addLink(src, dest, link_latency, DEFAULT_BW_MULTIPLIER, link_latency);
+}
+
+void Topology::addLink(SwitchID src, SwitchID dest, int link_latency, int bw_multiplier)
+{
+ addLink(src, dest, link_latency, bw_multiplier, link_latency);
+}
+
+void Topology::addLink(SwitchID src, SwitchID dest, int link_latency, int bw_multiplier, int link_weight)
+{
+ ASSERT(src <= m_number_of_switches+m_nodes+m_nodes);
+ ASSERT(dest <= m_number_of_switches+m_nodes+m_nodes);
+ m_links_src_vector.insertAtBottom(src);
+ m_links_dest_vector.insertAtBottom(dest);
+ m_links_latency_vector.insertAtBottom(link_latency);
+ m_links_weight_vector.insertAtBottom(link_weight);
+ m_bw_multiplier_vector.insertAtBottom(bw_multiplier);
+}
+
+void Topology::makeLink(SwitchID src, SwitchID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration)
+{
+ // Make sure we're not trying to connect two end-point nodes directly together
+ assert((src >= 2*m_nodes) || (dest >= 2*m_nodes));
+
+ if (src < m_nodes) {
+ m_network_ptr->makeInLink(src, dest-(2*m_nodes), routing_table_entry, link_latency, bw_multiplier, isReconfiguration);
+ } else if (dest < 2*m_nodes) {
+ assert(dest >= m_nodes);
+ NodeID node = dest-m_nodes;
+ m_network_ptr->makeOutLink(src-(2*m_nodes), node, routing_table_entry, link_latency, link_weight, bw_multiplier, isReconfiguration);
+ } else {
+ assert((src >= 2*m_nodes) && (dest >= 2*m_nodes));
+ m_network_ptr->makeInternalLink(src-(2*m_nodes), dest-(2*m_nodes), routing_table_entry, link_latency, link_weight, bw_multiplier, isReconfiguration);
+ }
+}
+
+void Topology::printConfig(ostream& out) const
+{
+ assert(m_component_latencies.size() > 0);
+
+ out << "--- Begin Topology Print ---" << endl;
+ out << endl;
+ out << "Topology print ONLY indicates the _NETWORK_ latency between two machines" << endl;
+ out << "It does NOT include the latency within the machines" << endl;
+ out << endl;
+ for (int m=0; m<MachineType_NUM; m++) {
+ for (int i=0; i<MachineType_base_count((MachineType)m); i++) {
+ MachineID cur_mach = {(MachineType)m, i};
+ out << cur_mach << " Network Latencies" << endl;
+ for (int n=0; n<MachineType_NUM; n++) {
+ for (int j=0; j<MachineType_base_count((MachineType)n); j++) {
+ MachineID dest_mach = {(MachineType)n, j};
+ if (cur_mach != dest_mach) {
+ int link_latency = m_component_latencies[MachineType_base_number((MachineType)m)+i][MachineType_base_number(MachineType_NUM)+MachineType_base_number((MachineType)n)+j];
+ int intermediate_switches = m_component_inter_switches[MachineType_base_number((MachineType)m)+i][MachineType_base_number(MachineType_NUM)+MachineType_base_number((MachineType)n)+j];
+ out << " " << cur_mach << " -> " << dest_mach << " net_lat: "
+ << link_latency+intermediate_switches << endl; // NOTE switches are assumed to have single cycle latency
+ }
+ }
+ }
+ out << endl;
+ }
+ }
+
+ out << "--- End Topology Print ---" << endl;
+}
+
+/**************************************************************************/
+
+// The following all-pairs shortest path algorithm is based on the
+// discussion from Cormen et al., Chapter 26.1.
+
+static void extend_shortest_path(Matrix& current_dist, Matrix& latencies, Matrix& inter_switches)
+{
+ bool change = true;
+ int nodes = current_dist.size();
+
+ while (change) {
+ change = false;
+ for (int i=0; i<nodes; i++) {
+ for (int j=0; j<nodes; j++) {
+ int minimum = current_dist[i][j];
+ int previous_minimum = minimum;
+ int intermediate_switch = -1;
+ for (int k=0; k<nodes; k++) {
+ minimum = min(minimum, current_dist[i][k] + current_dist[k][j]);
+ if (previous_minimum != minimum) {
+ intermediate_switch = k;
+ inter_switches[i][j] = inter_switches[i][k] + inter_switches[k][j] + 1;
+ }
+ previous_minimum = minimum;
+ }
+ if (current_dist[i][j] != minimum) {
+ change = true;
+ current_dist[i][j] = minimum;
+ assert(intermediate_switch >= 0);
+ assert(intermediate_switch < latencies[i].size());
+ latencies[i][j] = latencies[i][intermediate_switch] + latencies[intermediate_switch][j];
+ }
+ }
+ }
+ }
+}
+
+static Matrix shortest_path(const Matrix& weights, Matrix& latencies, Matrix& inter_switches)
+{
+ Matrix dist = weights;
+ extend_shortest_path(dist, latencies, inter_switches);
+ return dist;
+}
+
+static bool link_is_shortest_path_to_node(SwitchID src, SwitchID next, SwitchID final,
+ const Matrix& weights, const Matrix& dist)
+{
+ return (weights[src][next] + dist[next][final] == dist[src][final]);
+}
+
+static NetDest shortest_path_to_node(SwitchID src, SwitchID next,
+ const Matrix& weights, const Matrix& dist)
+{
+ NetDest result;
+ int d = 0;
+ int machines;
+ int max_machines;
+
+ machines = MachineType_NUM;
+ max_machines = MachineType_base_number(MachineType_NUM);
+
+ for (int m=0; m<machines; m++) {
+ for (int i=0; i<MachineType_base_count((MachineType)m); i++) {
+ // we use "d+max_machines" below since the "destination" switches for the machines are numbered
+ // [MachineType_base_number(MachineType_NUM)...2*MachineType_base_number(MachineType_NUM)-1]
+ // for the component network
+ if (link_is_shortest_path_to_node(src, next,
+ d+max_machines,
+ weights, dist)) {
+ MachineID mach = {(MachineType)m, i};
+ result.add(mach);
+ }
+ d++;
+ }
+ }
+
+ DEBUG_MSG(NETWORK_COMP, MedPrio, "returning shortest path");
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, (src-(2*max_machines)));
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, (next-(2*max_machines)));
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, src);
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, next);
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, result);
+ DEBUG_NEWLINE(NETWORK_COMP, MedPrio);
+
+ return result;
+}
+
diff --git a/src/mem/ruby/network/simple/Topology.hh b/src/mem/ruby/network/simple/Topology.hh
new file mode 100644
index 000000000..bfc503087
--- /dev/null
+++ b/src/mem/ruby/network/simple/Topology.hh
@@ -0,0 +1,126 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Topology.h
+ *
+ * Description: The topology here is configurable; it can be a hierachical
+ * (default one) or a 2D torus or a 2D torus with half switches
+ * killed. I think all input port has a
+ * one-input-one-output switch connected just to control and
+ * bandwidth, since we don't control bandwidth on input ports.
+ * Basically, the class has a vector of nodes and edges. First
+ * 2*m_nodes elements in the node vector are input and output
+ * ports. Edges are represented in two vectors of src and dest
+ * nodes. All edges have latency.
+ *
+ * $Id$
+ *
+ * */
+
+#ifndef TOPOLOGY_H
+#define TOPOLOGY_H
+
+#include "Global.hh"
+#include "Vector.hh"
+#include "NodeID.hh"
+
+class Network;
+class NetDest;
+
+typedef Vector < Vector <int> > Matrix;
+
+class Topology {
+public:
+ // Constructors
+ Topology(Network* network_ptr, int number_of_nodes);
+
+ // Destructor
+ ~Topology() {}
+
+ // Public Methods
+ int numSwitches() const { return m_number_of_switches; }
+ void createLinks(bool isReconfiguration);
+
+ void printStats(ostream& out) const {}
+ void clearStats() {}
+ void printConfig(ostream& out) const;
+ void print(ostream& out) const { out << "[Topology]"; }
+
+private:
+ // Private Methods
+ void init();
+ SwitchID newSwitchID();
+ void addLink(SwitchID src, SwitchID dest, int link_latency);
+ void addLink(SwitchID src, SwitchID dest, int link_latency, int bw_multiplier);
+ void addLink(SwitchID src, SwitchID dest, int link_latency, int bw_multiplier, int link_weight);
+ void makeLink(SwitchID src, SwitchID dest, const NetDest& routing_table_entry, int link_latency, int weight, int bw_multiplier, bool isReconfiguration);
+
+ void makeHierarchicalSwitch(int fan_out_degree);
+ void make2DTorus();
+ void makePtToPt();
+ void makeFileSpecified();
+
+ void makeSwitchesPerChip(Vector< Vector < SwitchID > > &nodePairs, Vector<int> &latencies, Vector<int> &bw_multis, int numberOfChips);
+
+ string getDesignStr();
+ // Private copy constructor and assignment operator
+ Topology(const Topology& obj);
+ Topology& operator=(const Topology& obj);
+
+ // Data Members (m_ prefix)
+ Network* m_network_ptr;
+ NodeID m_nodes;
+ int m_number_of_switches;
+
+ Vector<SwitchID> m_links_src_vector;
+ Vector<SwitchID> m_links_dest_vector;
+ Vector<int> m_links_latency_vector;
+ Vector<int> m_links_weight_vector;
+ Vector<int> m_bw_multiplier_vector;
+
+ Matrix m_component_latencies;
+ Matrix m_component_inter_switches;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Topology& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Topology& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif
diff --git a/src/mem/ruby/profiler/AccessTraceForAddress.cc b/src/mem/ruby/profiler/AccessTraceForAddress.cc
new file mode 100644
index 000000000..13aa3bc59
--- /dev/null
+++ b/src/mem/ruby/profiler/AccessTraceForAddress.cc
@@ -0,0 +1,126 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "AccessTraceForAddress.hh"
+#include "Histogram.hh"
+
+AccessTraceForAddress::AccessTraceForAddress()
+{
+ m_histogram_ptr = NULL;
+}
+
+AccessTraceForAddress::AccessTraceForAddress(const Address& addr)
+{
+ m_addr = addr;
+ m_total = 0;
+ m_loads = 0;
+ m_stores = 0;
+ m_atomics = 0;
+ m_user = 0;
+ m_sharing = 0;
+ m_histogram_ptr = NULL;
+}
+
+AccessTraceForAddress::~AccessTraceForAddress()
+{
+ if (m_histogram_ptr != NULL) {
+ delete m_histogram_ptr;
+ m_histogram_ptr = NULL;
+ }
+}
+
+void AccessTraceForAddress::print(ostream& out) const
+{
+ out << m_addr;
+
+ if (m_histogram_ptr == NULL) {
+ out << " " << m_total;
+ out << " | " << m_loads;
+ out << " " << m_stores;
+ out << " " << m_atomics;
+ out << " | " << m_user;
+ out << " " << m_total-m_user;
+ out << " | " << m_sharing;
+ out << " | " << m_touched_by.count();
+ } else {
+ assert(m_total == 0);
+ out << " " << (*m_histogram_ptr);
+ }
+}
+
+void AccessTraceForAddress::update(CacheRequestType type, AccessModeType access_mode, NodeID cpu, bool sharing_miss)
+{
+ m_touched_by.add(cpu);
+ m_total++;
+ if(type == CacheRequestType_ATOMIC) {
+ m_atomics++;
+ } else if(type == CacheRequestType_LD){
+ m_loads++;
+ } else if (type == CacheRequestType_ST){
+ m_stores++;
+ } else {
+ // ERROR_MSG("Trying to add invalid access to trace");
+ }
+
+ if (access_mode == AccessModeType_UserMode) {
+ m_user++;
+ }
+
+ if (sharing_miss) {
+ m_sharing++;
+ }
+}
+
+int AccessTraceForAddress::getTotal() const
+{
+ if (m_histogram_ptr == NULL) {
+ return m_total;
+ } else {
+ return m_histogram_ptr->getTotal();
+ }
+}
+
+void AccessTraceForAddress::addSample(int value)
+{
+ assert(m_total == 0);
+ if (m_histogram_ptr == NULL) {
+ m_histogram_ptr = new Histogram;
+ }
+ m_histogram_ptr->add(value);
+}
+
+bool node_less_then_eq(const AccessTraceForAddress* n1, const AccessTraceForAddress* n2)
+{
+ return (n1->getTotal() > n2->getTotal());
+}
diff --git a/src/mem/ruby/profiler/AccessTraceForAddress.hh b/src/mem/ruby/profiler/AccessTraceForAddress.hh
new file mode 100644
index 000000000..8ed47c2ee
--- /dev/null
+++ b/src/mem/ruby/profiler/AccessTraceForAddress.hh
@@ -0,0 +1,104 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef ACCESSTRACEFORADDRESS_H
+#define ACCESSTRACEFORADDRESS_H
+
+#include "Global.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "CacheRequestType.hh"
+#include "AccessModeType.hh"
+#include "NodeID.hh"
+#include "Set.hh"
+class Histogram;
+
+class AccessTraceForAddress {
+public:
+ // Constructors
+ AccessTraceForAddress();
+ explicit AccessTraceForAddress(const Address& addr);
+
+ // Destructor
+ ~AccessTraceForAddress();
+
+ // Public Methods
+
+ void update(CacheRequestType type, AccessModeType access_mode, NodeID cpu, bool sharing_miss);
+ int getTotal() const;
+ int getSharing() const { return m_sharing; }
+ int getTouchedBy() const { return m_touched_by.count(); }
+ const Address& getAddress() const { return m_addr; }
+ void addSample(int value);
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ // AccessTraceForAddress(const AccessTraceForAddress& obj);
+ // AccessTraceForAddress& operator=(const AccessTraceForAddress& obj);
+
+ // Data Members (m_ prefix)
+
+ Address m_addr;
+ uint64 m_loads;
+ uint64 m_stores;
+ uint64 m_atomics;
+ uint64 m_total;
+ uint64 m_user;
+ uint64 m_sharing;
+ Set m_touched_by;
+ Histogram* m_histogram_ptr;
+};
+
+bool node_less_then_eq(const AccessTraceForAddress* n1, const AccessTraceForAddress* n2);
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const AccessTraceForAddress& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const AccessTraceForAddress& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //ACCESSTRACEFORADDRESS_H
diff --git a/src/mem/ruby/profiler/AddressProfiler.cc b/src/mem/ruby/profiler/AddressProfiler.cc
new file mode 100644
index 000000000..2d14b458f
--- /dev/null
+++ b/src/mem/ruby/profiler/AddressProfiler.cc
@@ -0,0 +1,310 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * AddressProfiler.C
+ *
+ * Description: See AddressProfiler.h
+ *
+ * $Id$
+ *
+ */
+
+#include "AddressProfiler.hh"
+#include "CacheMsg.hh"
+#include "AccessTraceForAddress.hh"
+#include "PrioHeap.hh"
+#include "Map.hh"
+#include "System.hh"
+#include "Profiler.hh"
+
+// Helper functions
+static AccessTraceForAddress& lookupTraceForAddress(const Address& addr, Map<Address, AccessTraceForAddress>* record_map);
+static void printSorted(ostream& out, const Map<Address, AccessTraceForAddress>* record_map, string description);
+
+AddressProfiler::AddressProfiler()
+{
+ m_dataAccessTrace = new Map<Address, AccessTraceForAddress>;
+ m_macroBlockAccessTrace = new Map<Address, AccessTraceForAddress>;
+ m_programCounterAccessTrace = new Map<Address, AccessTraceForAddress>;
+ m_retryProfileMap = new Map<Address, AccessTraceForAddress>;
+ m_persistentPredictionProfileMap = new Map<Address, AccessTraceForAddress>;
+ clearStats();
+}
+
+AddressProfiler::~AddressProfiler()
+{
+ delete m_dataAccessTrace;
+ delete m_macroBlockAccessTrace;
+ delete m_programCounterAccessTrace;
+ delete m_retryProfileMap;
+ delete m_persistentPredictionProfileMap;
+}
+
+void AddressProfiler::printStats(ostream& out) const
+{
+ if (PROFILE_HOT_LINES) {
+ out << endl;
+ out << "AddressProfiler Stats" << endl;
+ out << "---------------------" << endl;
+
+ out << endl;
+ out << "sharing_misses: " << m_sharing_miss_counter << endl;
+ out << "getx_sharing_histogram: " << m_getx_sharing_histogram << endl;
+ out << "gets_sharing_histogram: " << m_gets_sharing_histogram << endl;
+
+ out << endl;
+ out << "Hot Data Blocks" << endl;
+ out << "---------------" << endl;
+ out << endl;
+ printSorted(out, m_dataAccessTrace, "block_address");
+
+ out << endl;
+ out << "Hot MacroData Blocks" << endl;
+ out << "--------------------" << endl;
+ out << endl;
+ printSorted(out, m_macroBlockAccessTrace, "macroblock_address");
+
+ out << "Hot Instructions" << endl;
+ out << "----------------" << endl;
+ out << endl;
+ printSorted(out, m_programCounterAccessTrace, "pc_address");
+ }
+
+ if (PROFILE_ALL_INSTRUCTIONS){
+ out << endl;
+ out << "All Instructions Profile:" << endl;
+ out << "-------------------------" << endl;
+ out << endl;
+ printSorted(out, m_programCounterAccessTrace, "pc_address");
+ out << endl;
+ }
+
+ if (m_retryProfileHisto.size() > 0) {
+ out << "Retry Profile" << endl;
+ out << "-------------" << endl;
+ out << endl;
+ out << "retry_histogram_absolute: " << m_retryProfileHisto << endl;
+ out << "retry_histogram_write: " << m_retryProfileHistoWrite << endl;
+ out << "retry_histogram_read: " << m_retryProfileHistoRead << endl;
+
+ out << "retry_histogram_percent: ";
+ m_retryProfileHisto.printPercent(out);
+ out << endl;
+
+ out << "retry_histogram_per_instruction: ";
+ m_retryProfileHisto.printWithMultiplier(out, 1.0 / double(g_system_ptr->getProfiler()->getTotalInstructionsExecuted()));
+ out << endl;
+
+ printSorted(out, m_retryProfileMap, "block_address");
+ out << endl;
+ }
+
+ if (m_persistentPredictionProfileHisto.size() > 0) {
+ out << "Persistent Prediction Profile" << endl;
+ out << "-------------" << endl;
+ out << endl;
+ out << "persistent prediction_histogram: " << m_persistentPredictionProfileHisto << endl;
+
+ out << "persistent prediction_histogram_percent: ";
+ m_persistentPredictionProfileHisto.printPercent(out);
+ out << endl;
+
+ out << "persistentPrediction_histogram_per_instruction: ";
+ m_persistentPredictionProfileHisto.printWithMultiplier(out, 1.0 / double(g_system_ptr->getProfiler()->getTotalInstructionsExecuted()));
+ out << endl;
+
+ printSorted(out, m_persistentPredictionProfileMap, "block_address");
+ out << endl;
+ }
+}
+
+void AddressProfiler::clearStats()
+{
+ // Clear the maps
+ m_sharing_miss_counter = 0;
+ m_dataAccessTrace->clear();
+ m_macroBlockAccessTrace->clear();
+ m_programCounterAccessTrace->clear();
+ m_retryProfileMap->clear();
+ m_retryProfileHisto.clear();
+ m_retryProfileHistoRead.clear();
+ m_retryProfileHistoWrite.clear();
+ m_getx_sharing_histogram.clear();
+ m_gets_sharing_histogram.clear();
+}
+
+void AddressProfiler::profileGetX(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor)
+{
+ Set indirection_set;
+ indirection_set.addSet(sharers);
+ indirection_set.addSet(owner);
+ indirection_set.remove(requestor);
+ int num_indirections = indirection_set.count();
+
+ m_getx_sharing_histogram.add(num_indirections);
+ bool indirection_miss = (num_indirections > 0);
+
+ addTraceSample(datablock, PC, CacheRequestType_ST, AccessModeType(0), requestor, indirection_miss);
+}
+
+void AddressProfiler::profileGetS(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor)
+{
+ Set indirection_set;
+ indirection_set.addSet(owner);
+ indirection_set.remove(requestor);
+ int num_indirections = indirection_set.count();
+
+ m_gets_sharing_histogram.add(num_indirections);
+ bool indirection_miss = (num_indirections > 0);
+
+ addTraceSample(datablock, PC, CacheRequestType_LD, AccessModeType(0), requestor, indirection_miss);
+}
+
+void AddressProfiler::addTraceSample(Address data_addr, Address pc_addr, CacheRequestType type, AccessModeType access_mode, NodeID id, bool sharing_miss)
+{
+ if (PROFILE_HOT_LINES) {
+ if (sharing_miss) {
+ m_sharing_miss_counter++;
+ }
+
+ // record data address trace info
+ data_addr.makeLineAddress();
+ lookupTraceForAddress(data_addr, m_dataAccessTrace).update(type, access_mode, id, sharing_miss);
+
+ // record macro data address trace info
+ Address macro_addr(data_addr.maskLowOrderBits(10)); // 6 for datablock, 4 to make it 16x more coarse
+ lookupTraceForAddress(macro_addr, m_macroBlockAccessTrace).update(type, access_mode, id, sharing_miss);
+
+ // record program counter address trace info
+ lookupTraceForAddress(pc_addr, m_programCounterAccessTrace).update(type, access_mode, id, sharing_miss);
+ }
+
+ if (PROFILE_ALL_INSTRUCTIONS) {
+ // This code is used if the address profiler is an all-instructions profiler
+ // record program counter address trace info
+ lookupTraceForAddress(pc_addr, m_programCounterAccessTrace).update(type, access_mode, id, sharing_miss);
+ }
+}
+
+void AddressProfiler::profileRetry(const Address& data_addr, AccessType type, int count)
+{
+ m_retryProfileHisto.add(count);
+ if (type == AccessType_Read) {
+ m_retryProfileHistoRead.add(count);
+ } else {
+ m_retryProfileHistoWrite.add(count);
+ }
+ if (count > 1) {
+ lookupTraceForAddress(data_addr, m_retryProfileMap).addSample(count);
+ }
+}
+
+void AddressProfiler::profilePersistentPrediction(const Address& data_addr, AccessType type)
+{
+ m_persistentPredictionProfileHisto.add(1);
+ lookupTraceForAddress(data_addr, m_persistentPredictionProfileMap).addSample(1);
+}
+
+// ***** Normal Functions ******
+
+static void printSorted(ostream& out, const Map<Address, AccessTraceForAddress>* record_map, string description)
+{
+ const int records_printed = 100;
+
+ uint64 misses = 0;
+ PrioHeap<AccessTraceForAddress*> heap;
+ Vector<Address> keys = record_map->keys();
+ for(int i=0; i<keys.size(); i++){
+ AccessTraceForAddress* record = &(record_map->lookup(keys[i]));
+ misses += record->getTotal();
+ heap.insert(record);
+ }
+
+ out << "Total_entries_" << description << ": " << keys.size() << endl;
+ if (PROFILE_ALL_INSTRUCTIONS)
+ out << "Total_Instructions_" << description << ": " << misses << endl;
+ else
+ out << "Total_data_misses_" << description << ": " << misses << endl;
+
+ out << "total | load store atomic | user supervisor | sharing | touched-by" << endl;
+
+ Histogram remaining_records(1, 100);
+ Histogram all_records(1, 100);
+ Histogram remaining_records_log(-1);
+ Histogram all_records_log(-1);
+
+ // Allows us to track how many lines where touched by n processors
+ Vector<int64> m_touched_vec;
+ Vector<int64> m_touched_weighted_vec;
+ m_touched_vec.setSize(RubyConfig::numberOfProcessors()+1);
+ m_touched_weighted_vec.setSize(RubyConfig::numberOfProcessors()+1);
+ for (int i=0; i<m_touched_vec.size(); i++) {
+ m_touched_vec[i] = 0;
+ m_touched_weighted_vec[i] = 0;
+ }
+
+ int counter = 0;
+ while((heap.size() > 0) && (counter < records_printed)) {
+ AccessTraceForAddress* record = heap.extractMin();
+ double percent = 100.0*(record->getTotal()/double(misses));
+ out << description << " | " << percent << " % " << *record << endl;
+ all_records.add(record->getTotal());
+ all_records_log.add(record->getTotal());
+ counter++;
+ m_touched_vec[record->getTouchedBy()]++;
+ m_touched_weighted_vec[record->getTouchedBy()] += record->getTotal();
+ }
+
+ while(heap.size() > 0) {
+ AccessTraceForAddress* record = heap.extractMin();
+ all_records.add(record->getTotal());
+ remaining_records.add(record->getTotal());
+ all_records_log.add(record->getTotal());
+ remaining_records_log.add(record->getTotal());
+ m_touched_vec[record->getTouchedBy()]++;
+ m_touched_weighted_vec[record->getTouchedBy()] += record->getTotal();
+ }
+ out << endl;
+ out << "all_records_" << description << ": " << all_records << endl;
+ out << "all_records_log_" << description << ": " << all_records_log << endl;
+ out << "remaining_records_" << description << ": " << remaining_records << endl;
+ out << "remaining_records_log_" << description << ": " << remaining_records_log << endl;
+ out << "touched_by_" << description << ": " << m_touched_vec << endl;
+ out << "touched_by_weighted_" << description << ": " << m_touched_weighted_vec << endl;
+ out << endl;
+}
+
+static AccessTraceForAddress& lookupTraceForAddress(const Address& addr, Map<Address, AccessTraceForAddress>* record_map)
+{
+ if(record_map->exist(addr) == false){
+ record_map->add(addr, AccessTraceForAddress(addr));
+ }
+ return record_map->lookup(addr);
+}
diff --git a/src/mem/ruby/profiler/AddressProfiler.hh b/src/mem/ruby/profiler/AddressProfiler.hh
new file mode 100644
index 000000000..4895a7857
--- /dev/null
+++ b/src/mem/ruby/profiler/AddressProfiler.hh
@@ -0,0 +1,109 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * AddressProfiler.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef ADDRESSPROFILER_H
+#define ADDRESSPROFILER_H
+
+#include "Global.hh"
+#include "NodeID.hh"
+#include "Histogram.hh"
+#include "Address.hh"
+#include "CacheMsg.hh"
+#include "AccessType.hh"
+
+class AccessTraceForAddress;
+class Set;
+template <class KEY_TYPE, class VALUE_TYPE> class Map;
+
+class AddressProfiler {
+public:
+ // Constructors
+ AddressProfiler();
+
+ // Destructor
+ ~AddressProfiler();
+
+ // Public Methods
+ void printStats(ostream& out) const;
+ void clearStats();
+
+ void addTraceSample(Address data_addr, Address pc_addr, CacheRequestType type, AccessModeType access_mode, NodeID id, bool sharing_miss);
+ void profilePersistentPrediction(const Address& data_addr, AccessType type);
+ void profileRetry(const Address& data_addr, AccessType type, int count);
+ void profileGetX(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor);
+ void profileGetS(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor);
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ AddressProfiler(const AddressProfiler& obj);
+ AddressProfiler& operator=(const AddressProfiler& obj);
+
+ // Data Members (m_ prefix)
+ int64 m_sharing_miss_counter;
+
+ Map<Address, AccessTraceForAddress>* m_dataAccessTrace;
+ Map<Address, AccessTraceForAddress>* m_macroBlockAccessTrace;
+ Map<Address, AccessTraceForAddress>* m_programCounterAccessTrace;
+ Map<Address, AccessTraceForAddress>* m_retryProfileMap;
+ Map<Address, AccessTraceForAddress>* m_persistentPredictionProfileMap;
+ Histogram m_persistentPredictionProfileHisto;
+ Histogram m_retryProfileHisto;
+ Histogram m_retryProfileHistoWrite;
+ Histogram m_retryProfileHistoRead;
+ Histogram m_getx_sharing_histogram;
+ Histogram m_gets_sharing_histogram;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const AddressProfiler& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const AddressProfiler& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //ADDRESSPROFILER_H
diff --git a/src/mem/ruby/profiler/CacheProfiler.cc b/src/mem/ruby/profiler/CacheProfiler.cc
new file mode 100644
index 000000000..666a99632
--- /dev/null
+++ b/src/mem/ruby/profiler/CacheProfiler.cc
@@ -0,0 +1,151 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * CacheProfiler.C
+ *
+ * Description: See CacheProfiler.h
+ *
+ * $Id$
+ *
+ */
+
+#include "CacheProfiler.hh"
+#include "AccessTraceForAddress.hh"
+#include "PrioHeap.hh"
+#include "System.hh"
+#include "Profiler.hh"
+#include "Vector.hh"
+
+CacheProfiler::CacheProfiler(string description)
+ : m_requestSize(-1)
+{
+ m_description = description;
+ m_requestTypeVec_ptr = new Vector<int>;
+ m_requestTypeVec_ptr->setSize(int(GenericRequestType_NUM));
+
+ clearStats();
+}
+
+CacheProfiler::~CacheProfiler()
+{
+ delete m_requestTypeVec_ptr;
+}
+
+void CacheProfiler::printStats(ostream& out) const
+{
+ out << m_description << " cache stats: " << endl;
+ string description = " " + m_description;
+
+ out << description << "_total_misses: " << m_misses << endl;
+ out << description << "_total_demand_misses: " << m_demand_misses << endl;
+ out << description << "_total_prefetches: " << m_prefetches << endl;
+ out << description << "_total_sw_prefetches: " << m_sw_prefetches << endl;
+ out << description << "_total_hw_prefetches: " << m_hw_prefetches << endl;
+
+ double trans_executed = double(g_system_ptr->getProfiler()->getTotalTransactionsExecuted());
+ double inst_executed = double(g_system_ptr->getProfiler()->getTotalInstructionsExecuted());
+
+ out << description << "_misses_per_transaction: " << double(m_misses) / trans_executed << endl;
+ out << description << "_misses_per_instruction: " << double(m_misses) / inst_executed << endl;
+ out << description << "_instructions_per_misses: ";
+ if (m_misses > 0) {
+ out << inst_executed / double(m_misses) << endl;
+ } else {
+ out << "NaN" << endl;
+ }
+ out << endl;
+
+ int requests = 0;
+
+ for(int i=0; i<int(GenericRequestType_NUM); i++) {
+ requests += m_requestTypeVec_ptr->ref(i);
+ }
+
+ assert(m_misses == requests);
+
+ if (requests > 0) {
+ for(int i=0; i<int(GenericRequestType_NUM); i++){
+ if (m_requestTypeVec_ptr->ref(i) > 0) {
+ out << description << "_request_type_" << GenericRequestType_to_string(GenericRequestType(i)) << ": "
+ << (100.0 * double((m_requestTypeVec_ptr->ref(i)))) / double(requests)
+ << "%" << endl;
+ }
+ }
+
+ out << endl;
+
+ for(int i=0; i<AccessModeType_NUM; i++){
+ if (m_accessModeTypeHistogram[i] > 0) {
+ out << description << "_access_mode_type_" << (AccessModeType) i << ": " << m_accessModeTypeHistogram[i]
+ << " " << (100.0 * m_accessModeTypeHistogram[i]) / requests << "%" << endl;
+ }
+ }
+ }
+
+ out << description << "_request_size: " << m_requestSize << endl;
+ out << endl;
+
+}
+
+void CacheProfiler::clearStats()
+{
+ for(int i=0; i<int(GenericRequestType_NUM); i++) {
+ m_requestTypeVec_ptr->ref(i) = 0;
+ }
+ m_requestSize.clear();
+ m_misses = 0;
+ m_demand_misses = 0;
+ m_prefetches = 0;
+ m_sw_prefetches = 0;
+ m_hw_prefetches = 0;
+ for(int i=0; i<AccessModeType_NUM; i++){
+ m_accessModeTypeHistogram[i] = 0;
+ }
+}
+
+void CacheProfiler::addStatSample(GenericRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit)
+{
+ m_misses++;
+
+ m_requestTypeVec_ptr->ref(requestType)++;
+
+ m_accessModeTypeHistogram[type]++;
+ m_requestSize.add(msgSize);
+ if (pfBit == PrefetchBit_No) {
+ m_demand_misses++;
+ } else if (pfBit == PrefetchBit_Yes) {
+ m_prefetches++;
+ m_sw_prefetches++;
+ } else { // must be L1_HW || L2_HW prefetch
+ m_prefetches++;
+ m_hw_prefetches++;
+ }
+}
+
diff --git a/src/mem/ruby/profiler/CacheProfiler.hh b/src/mem/ruby/profiler/CacheProfiler.hh
new file mode 100644
index 000000000..2d538d0e3
--- /dev/null
+++ b/src/mem/ruby/profiler/CacheProfiler.hh
@@ -0,0 +1,100 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * CacheProfiler.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef CACHEPROFILER_H
+#define CACHEPROFILER_H
+
+#include "Global.hh"
+#include "NodeID.hh"
+#include "Histogram.hh"
+#include "AccessModeType.hh"
+#include "PrefetchBit.hh"
+#include "GenericRequestType.hh"
+
+template <class TYPE> class Vector;
+
+class CacheProfiler {
+public:
+ // Constructors
+ CacheProfiler(string description);
+
+ // Destructor
+ ~CacheProfiler();
+
+ // Public Methods
+ void printStats(ostream& out) const;
+ void clearStats();
+
+ void addStatSample(GenericRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit);
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ CacheProfiler(const CacheProfiler& obj);
+ CacheProfiler& operator=(const CacheProfiler& obj);
+
+ // Data Members (m_ prefix)
+ string m_description;
+ Histogram m_requestSize;
+ int64 m_misses;
+ int64 m_demand_misses;
+ int64 m_prefetches;
+ int64 m_sw_prefetches;
+ int64 m_hw_prefetches;
+ int64 m_accessModeTypeHistogram[AccessModeType_NUM];
+
+ Vector < int >* m_requestTypeVec_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const CacheProfiler& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const CacheProfiler& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //CACHEPROFILER_H
diff --git a/src/mem/ruby/profiler/Profiler.cc b/src/mem/ruby/profiler/Profiler.cc
new file mode 100644
index 000000000..46c6c37bc
--- /dev/null
+++ b/src/mem/ruby/profiler/Profiler.cc
@@ -0,0 +1,2294 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ This file has been modified by Kevin Moore and Dan Nussbaum of the
+ Scalable Systems Research Group at Sun Microsystems Laboratories
+ (http://research.sun.com/scalable/) to support the Adaptive
+ Transactional Memory Test Platform (ATMTP).
+
+ Please send email to atmtp-interest@sun.com with feedback, questions, or
+ to request future announcements about ATMTP.
+
+ ----------------------------------------------------------------------
+
+ File modification date: 2008-02-23
+
+ ----------------------------------------------------------------------
+*/
+
+/*
+ * Profiler.C
+ *
+ * Description: See Profiler.h
+ *
+ * $Id$
+ *
+ */
+
+#include "Profiler.hh"
+#include "CacheProfiler.hh"
+#include "AddressProfiler.hh"
+#include "System.hh"
+#include "Network.hh"
+#include "PrioHeap.hh"
+#include "CacheMsg.hh"
+#include "Driver.hh"
+#include "Protocol.hh"
+#include "util.hh"
+#include "Map.hh"
+#include "Debug.hh"
+#include "MachineType.hh"
+// #include "TransactionInterfaceManager.hh"
+#include "interface.hh"
+//#include "XactVisualizer.hh" //gem5:Arka for decomissioning log_tm
+//#include "XactProfiler.hh" //gem5:Arka for decomissioning log_tm
+
+// extern "C" {
+// #include "Rock.hh"
+// }
+
+// Allows use of times() library call, which determines virtual runtime
+#include <sys/times.h>
+
+extern std::ostream * debug_cout_ptr;
+extern std::ostream * xact_cout_ptr;
+
+static double process_memory_total();
+static double process_memory_resident();
+
+Profiler::Profiler()
+ : m_conflicting_histogram(-1)
+{
+ m_requestProfileMap_ptr = new Map<string, int>;
+ m_L1D_cache_profiler_ptr = new CacheProfiler("L1D_cache");
+ m_L1I_cache_profiler_ptr = new CacheProfiler("L1I_cache");
+
+ m_L2_cache_profiler_ptr = new CacheProfiler("L2_cache");
+
+ m_address_profiler_ptr = new AddressProfiler;
+ m_inst_profiler_ptr = NULL;
+ if (PROFILE_ALL_INSTRUCTIONS) {
+ m_inst_profiler_ptr = new AddressProfiler;
+ }
+
+ //m_xact_profiler_ptr = new XactProfiler; //gem5:Arka for decomissioning og log_tm
+
+ m_conflicting_map_ptr = new Map<Address, Time>;
+
+ m_real_time_start_time = time(NULL); // Not reset in clearStats()
+ m_stats_period = 1000000; // Default
+ m_periodic_output_file_ptr = &cerr;
+ m_xact_visualizer_ptr = &cout;
+
+ //---- begin XACT_MEM code
+ m_xactExceptionMap_ptr = new Map<int, int>;
+ m_procsInXactMap_ptr = new Map<int, int>;
+ m_abortIDMap_ptr = new Map<int, int>;
+ m_commitIDMap_ptr = new Map<int, int>;
+ m_xactRetryIDMap_ptr = new Map<int, int>;
+ m_xactCyclesIDMap_ptr = new Map<int, int>;
+ m_xactReadSetIDMap_ptr = new Map<int, int>;
+ m_xactWriteSetIDMap_ptr = new Map<int, int>;
+ m_xactLoadMissIDMap_ptr = new Map<int, int>;
+ m_xactStoreMissIDMap_ptr = new Map<int, int>;
+ m_xactInstrCountIDMap_ptr = new Map<int, integer_t>;
+ m_abortPCMap_ptr = new Map<Address, int>;
+ m_abortAddressMap_ptr = new Map<Address, int>;
+ m_nackXIDMap_ptr = new Map<int, int>;
+ m_nackXIDPairMap_ptr = new Map<int, Map<int, int> * >;
+ m_nackPCMap_ptr = new Map<Address, int>;
+ m_watch_address_list_ptr = new Map<Address, int>;
+ m_readSetMatch_ptr = new Map<Address, int>;
+ m_readSetNoMatch_ptr = new Map<Address, int>;
+ m_writeSetMatch_ptr = new Map<Address, int>;
+ m_writeSetNoMatch_ptr = new Map<Address, int>;
+ m_xactReadFilterBitsSetOnCommit = new Map<int, Histogram>;
+ m_xactReadFilterBitsSetOnAbort = new Map<int, Histogram>;
+ m_xactWriteFilterBitsSetOnCommit = new Map<int, Histogram>;
+ m_xactWriteFilterBitsSetOnAbort = new Map<int, Histogram>;
+ //---- end XACT_MEM code
+
+ // for MemoryControl:
+ m_memReq = 0;
+ m_memBankBusy = 0;
+ m_memBusBusy = 0;
+ m_memReadWriteBusy = 0;
+ m_memDataBusBusy = 0;
+ m_memTfawBusy = 0;
+ m_memRefresh = 0;
+ m_memRead = 0;
+ m_memWrite = 0;
+ m_memWaitCycles = 0;
+ m_memInputQ = 0;
+ m_memBankQ = 0;
+ m_memArbWait = 0;
+ m_memRandBusy = 0;
+ m_memNotOld = 0;
+
+
+ int totalBanks = RubyConfig::banksPerRank()
+ * RubyConfig::ranksPerDimm()
+ * RubyConfig::dimmsPerChannel();
+ m_memBankCount.setSize(totalBanks);
+
+ clearStats();
+}
+
+Profiler::~Profiler()
+{
+ if (m_periodic_output_file_ptr != &cerr) {
+ delete m_periodic_output_file_ptr;
+ }
+ delete m_address_profiler_ptr;
+ delete m_L1D_cache_profiler_ptr;
+ delete m_L1I_cache_profiler_ptr;
+ delete m_L2_cache_profiler_ptr;
+ //delete m_xact_profiler_ptr; //gem5:Arka for decomissioning of log_tm
+ delete m_requestProfileMap_ptr;
+ delete m_conflicting_map_ptr;
+}
+
+void Profiler::wakeup()
+{
+ // FIXME - avoid the repeated code
+
+ Vector<integer_t> perProcInstructionCount;
+ perProcInstructionCount.setSize(RubyConfig::numberOfProcessors());
+
+ Vector<integer_t> perProcCycleCount;
+ perProcCycleCount.setSize(RubyConfig::numberOfProcessors());
+
+ for(int i=0; i < RubyConfig::numberOfProcessors(); i++) {
+ perProcInstructionCount[i] = g_system_ptr->getDriver()->getInstructionCount(i) - m_instructions_executed_at_start[i] + 1;
+ perProcCycleCount[i] = g_system_ptr->getDriver()->getCycleCount(i) - m_cycles_executed_at_start[i] + 1;
+ // The +1 allows us to avoid division by zero
+ }
+
+ integer_t total_misses = m_perProcTotalMisses.sum();
+ integer_t instruction_executed = perProcInstructionCount.sum();
+ integer_t simics_cycles_executed = perProcCycleCount.sum();
+ integer_t transactions_started = m_perProcStartTransaction.sum();
+ integer_t transactions_ended = m_perProcEndTransaction.sum();
+
+ (*m_periodic_output_file_ptr) << "ruby_cycles: " << g_eventQueue_ptr->getTime()-m_ruby_start << endl;
+ (*m_periodic_output_file_ptr) << "total_misses: " << total_misses << " " << m_perProcTotalMisses << endl;
+ (*m_periodic_output_file_ptr) << "instruction_executed: " << instruction_executed << " " << perProcInstructionCount << endl;
+ (*m_periodic_output_file_ptr) << "simics_cycles_executed: " << simics_cycles_executed << " " << perProcCycleCount << endl;
+ (*m_periodic_output_file_ptr) << "transactions_started: " << transactions_started << " " << m_perProcStartTransaction << endl;
+ (*m_periodic_output_file_ptr) << "transactions_ended: " << transactions_ended << " " << m_perProcEndTransaction << endl;
+ (*m_periodic_output_file_ptr) << "L1TBE_usage: " << m_L1tbeProfile << endl;
+ (*m_periodic_output_file_ptr) << "L2TBE_usage: " << m_L2tbeProfile << endl;
+ (*m_periodic_output_file_ptr) << "mbytes_resident: " << process_memory_resident() << endl;
+ (*m_periodic_output_file_ptr) << "mbytes_total: " << process_memory_total() << endl;
+ if (process_memory_total() > 0) {
+ (*m_periodic_output_file_ptr) << "resident_ratio: " << process_memory_resident()/process_memory_total() << endl;
+ }
+ (*m_periodic_output_file_ptr) << "miss_latency: " << m_allMissLatencyHistogram << endl;
+
+ *m_periodic_output_file_ptr << endl;
+
+ if (PROFILE_ALL_INSTRUCTIONS) {
+ m_inst_profiler_ptr->printStats(*m_periodic_output_file_ptr);
+ }
+
+ //g_system_ptr->getNetwork()->printStats(*m_periodic_output_file_ptr);
+ g_eventQueue_ptr->scheduleEvent(this, m_stats_period);
+}
+
+void Profiler::setPeriodicStatsFile(const string& filename)
+{
+ cout << "Recording periodic statistics to file '" << filename << "' every "
+ << m_stats_period << " Ruby cycles" << endl;
+
+ if (m_periodic_output_file_ptr != &cerr) {
+ delete m_periodic_output_file_ptr;
+ }
+
+ m_periodic_output_file_ptr = new ofstream(filename.c_str());
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+}
+
+void Profiler::setPeriodicStatsInterval(integer_t period)
+{
+ cout << "Recording periodic statistics every " << m_stats_period << " Ruby cycles" << endl;
+ m_stats_period = period;
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+}
+
+void Profiler::printConfig(ostream& out) const
+{
+ out << endl;
+ out << "Profiler Configuration" << endl;
+ out << "----------------------" << endl;
+ out << "periodic_stats_period: " << m_stats_period << endl;
+}
+
+void Profiler::print(ostream& out) const
+{
+ out << "[Profiler]";
+}
+
+void Profiler::printStats(ostream& out, bool short_stats)
+{
+ out << endl;
+ if (short_stats) {
+ out << "SHORT ";
+ }
+ out << "Profiler Stats" << endl;
+ out << "--------------" << endl;
+
+ time_t real_time_current = time(NULL);
+ double seconds = difftime(real_time_current, m_real_time_start_time);
+ double minutes = seconds/60.0;
+ double hours = minutes/60.0;
+ double days = hours/24.0;
+ Time ruby_cycles = g_eventQueue_ptr->getTime()-m_ruby_start;
+
+ if (!short_stats) {
+ out << "Elapsed_time_in_seconds: " << seconds << endl;
+ out << "Elapsed_time_in_minutes: " << minutes << endl;
+ out << "Elapsed_time_in_hours: " << hours << endl;
+ out << "Elapsed_time_in_days: " << days << endl;
+ out << endl;
+ }
+
+ // print the virtual runtimes as well
+ struct tms vtime;
+ times(&vtime);
+ seconds = (vtime.tms_utime + vtime.tms_stime) / 100.0;
+ minutes = seconds / 60.0;
+ hours = minutes / 60.0;
+ days = hours / 24.0;
+ out << "Virtual_time_in_seconds: " << seconds << endl;
+ out << "Virtual_time_in_minutes: " << minutes << endl;
+ out << "Virtual_time_in_hours: " << hours << endl;
+ out << "Virtual_time_in_days: " << hours << endl;
+ out << endl;
+
+ out << "Ruby_current_time: " << g_eventQueue_ptr->getTime() << endl;
+ out << "Ruby_start_time: " << m_ruby_start << endl;
+ out << "Ruby_cycles: " << ruby_cycles << endl;
+ out << endl;
+
+ if (!short_stats) {
+ out << "mbytes_resident: " << process_memory_resident() << endl;
+ out << "mbytes_total: " << process_memory_total() << endl;
+ if (process_memory_total() > 0) {
+ out << "resident_ratio: " << process_memory_resident()/process_memory_total() << endl;
+ }
+ out << endl;
+
+ if(m_num_BA_broadcasts + m_num_BA_unicasts != 0){
+ out << endl;
+ out << "Broadcast_percent: " << (float)m_num_BA_broadcasts/(m_num_BA_broadcasts+m_num_BA_unicasts) << endl;
+ }
+ }
+
+ Vector<integer_t> perProcInstructionCount;
+ Vector<integer_t> perProcCycleCount;
+ Vector<double> perProcCPI;
+ Vector<double> perProcMissesPerInsn;
+ Vector<double> perProcInsnPerTrans;
+ Vector<double> perProcCyclesPerTrans;
+ Vector<double> perProcMissesPerTrans;
+
+ perProcInstructionCount.setSize(RubyConfig::numberOfProcessors());
+ perProcCycleCount.setSize(RubyConfig::numberOfProcessors());
+ perProcCPI.setSize(RubyConfig::numberOfProcessors());
+ perProcMissesPerInsn.setSize(RubyConfig::numberOfProcessors());
+
+ perProcInsnPerTrans.setSize(RubyConfig::numberOfProcessors());
+ perProcCyclesPerTrans.setSize(RubyConfig::numberOfProcessors());
+ perProcMissesPerTrans.setSize(RubyConfig::numberOfProcessors());
+
+ for(int i=0; i < RubyConfig::numberOfProcessors(); i++) {
+ perProcInstructionCount[i] = g_system_ptr->getDriver()->getInstructionCount(i) - m_instructions_executed_at_start[i] + 1;
+ perProcCycleCount[i] = g_system_ptr->getDriver()->getCycleCount(i) - m_cycles_executed_at_start[i] + 1;
+ // The +1 allows us to avoid division by zero
+ perProcCPI[i] = double(ruby_cycles)/perProcInstructionCount[i];
+ perProcMissesPerInsn[i] = 1000.0 * (double(m_perProcTotalMisses[i]) / double(perProcInstructionCount[i]));
+
+ int trans = m_perProcEndTransaction[i];
+ if (trans == 0) {
+ perProcInsnPerTrans[i] = 0;
+ perProcCyclesPerTrans[i] = 0;
+ perProcMissesPerTrans[i] = 0;
+ } else {
+ perProcInsnPerTrans[i] = perProcInstructionCount[i] / double(trans);
+ perProcCyclesPerTrans[i] = ruby_cycles / double(trans);
+ perProcMissesPerTrans[i] = m_perProcTotalMisses[i] / double(trans);
+ }
+ }
+
+ integer_t total_misses = m_perProcTotalMisses.sum();
+ integer_t user_misses = m_perProcUserMisses.sum();
+ integer_t supervisor_misses = m_perProcSupervisorMisses.sum();
+ integer_t instruction_executed = perProcInstructionCount.sum();
+ integer_t simics_cycles_executed = perProcCycleCount.sum();
+ integer_t transactions_started = m_perProcStartTransaction.sum();
+ integer_t transactions_ended = m_perProcEndTransaction.sum();
+
+ double instructions_per_transaction = (transactions_ended != 0) ? double(instruction_executed) / double(transactions_ended) : 0;
+ double cycles_per_transaction = (transactions_ended != 0) ? (RubyConfig::numberOfProcessors() * double(ruby_cycles)) / double(transactions_ended) : 0;
+ double misses_per_transaction = (transactions_ended != 0) ? double(total_misses) / double(transactions_ended) : 0;
+
+ out << "Total_misses: " << total_misses << endl;
+ out << "total_misses: " << total_misses << " " << m_perProcTotalMisses << endl;
+ out << "user_misses: " << user_misses << " " << m_perProcUserMisses << endl;
+ out << "supervisor_misses: " << supervisor_misses << " " << m_perProcSupervisorMisses << endl;
+ out << endl;
+ out << "instruction_executed: " << instruction_executed << " " << perProcInstructionCount << endl;
+ out << "simics_cycles_executed: " << simics_cycles_executed << " " << perProcCycleCount << endl;
+ out << "cycles_per_instruction: " << (RubyConfig::numberOfProcessors()*double(ruby_cycles))/double(instruction_executed) << " " << perProcCPI << endl;
+ out << "misses_per_thousand_instructions: " << 1000.0 * (double(total_misses) / double(instruction_executed)) << " " << perProcMissesPerInsn << endl;
+ out << endl;
+ out << "transactions_started: " << transactions_started << " " << m_perProcStartTransaction << endl;
+ out << "transactions_ended: " << transactions_ended << " " << m_perProcEndTransaction << endl;
+ out << "instructions_per_transaction: " << instructions_per_transaction << " " << perProcInsnPerTrans << endl;
+ out << "cycles_per_transaction: " << cycles_per_transaction << " " << perProcCyclesPerTrans << endl;
+ out << "misses_per_transaction: " << misses_per_transaction << " " << perProcMissesPerTrans << endl;
+
+ out << endl;
+
+ m_L1D_cache_profiler_ptr->printStats(out);
+ m_L1I_cache_profiler_ptr->printStats(out);
+ m_L2_cache_profiler_ptr->printStats(out);
+
+ out << endl;
+
+ if (m_memReq || m_memRefresh) { // if there's a memory controller at all
+ long long int total_stalls = m_memInputQ + m_memBankQ + m_memWaitCycles;
+ double stallsPerReq = total_stalls * 1.0 / m_memReq;
+ out << "Memory control:" << endl;
+ out << " memory_total_requests: " << m_memReq << endl; // does not include refreshes
+ out << " memory_reads: " << m_memRead << endl;
+ out << " memory_writes: " << m_memWrite << endl;
+ out << " memory_refreshes: " << m_memRefresh << endl;
+ out << " memory_total_request_delays: " << total_stalls << endl;
+ out << " memory_delays_per_request: " << stallsPerReq << endl;
+ out << " memory_delays_in_input_queue: " << m_memInputQ << endl;
+ out << " memory_delays_behind_head_of_bank_queue: " << m_memBankQ << endl;
+ out << " memory_delays_stalled_at_head_of_bank_queue: " << m_memWaitCycles << endl;
+ // Note: The following "memory stalls" entries are a breakdown of the
+ // cycles which already showed up in m_memWaitCycles. The order is
+ // significant; it is the priority of attributing the cycles.
+ // For example, bank_busy is before arbitration because if the bank was
+ // busy, we didn't even check arbitration.
+ // Note: "not old enough" means that since we grouped waiting heads-of-queues
+ // into batches to avoid starvation, a request in a newer batch
+ // didn't try to arbitrate yet because there are older requests waiting.
+ out << " memory_stalls_for_bank_busy: " << m_memBankBusy << endl;
+ out << " memory_stalls_for_random_busy: " << m_memRandBusy << endl;
+ out << " memory_stalls_for_anti_starvation: " << m_memNotOld << endl;
+ out << " memory_stalls_for_arbitration: " << m_memArbWait << endl;
+ out << " memory_stalls_for_bus: " << m_memBusBusy << endl;
+ out << " memory_stalls_for_tfaw: " << m_memTfawBusy << endl;
+ out << " memory_stalls_for_read_write_turnaround: " << m_memReadWriteBusy << endl;
+ out << " memory_stalls_for_read_read_turnaround: " << m_memDataBusBusy << endl;
+ out << " accesses_per_bank: ";
+ for (int bank=0; bank < m_memBankCount.size(); bank++) {
+ out << m_memBankCount[bank] << " ";
+ //if ((bank % 8) == 7) out << " " << endl;
+ }
+ out << endl;
+ out << endl;
+ }
+
+ if (!short_stats) {
+ out << "Busy Controller Counts:" << endl;
+ for(int i=0; i < MachineType_NUM; i++) {
+ for(int j=0; j < MachineType_base_count((MachineType)i); j++) {
+ MachineID machID;
+ machID.type = (MachineType)i;
+ machID.num = j;
+ out << machID << ":" << m_busyControllerCount[i][j] << " ";
+ if ((j+1)%8 == 0) {
+ out << endl;
+ }
+ }
+ out << endl;
+ }
+ out << endl;
+
+ out << "Busy Bank Count:" << m_busyBankCount << endl;
+ out << endl;
+
+ out << "L1TBE_usage: " << m_L1tbeProfile << endl;
+ out << "L2TBE_usage: " << m_L2tbeProfile << endl;
+ out << "StopTable_usage: " << m_stopTableProfile << endl;
+ out << "sequencer_requests_outstanding: " << m_sequencer_requests << endl;
+ out << "store_buffer_size: " << m_store_buffer_size << endl;
+ out << "unique_blocks_in_store_buffer: " << m_store_buffer_blocks << endl;
+ out << endl;
+ }
+
+ if (!short_stats) {
+ out << "All Non-Zero Cycle Demand Cache Accesses" << endl;
+ out << "----------------------------------------" << endl;
+ out << "miss_latency: " << m_allMissLatencyHistogram << endl;
+ for(int i=0; i<m_missLatencyHistograms.size(); i++) {
+ if (m_missLatencyHistograms[i].size() > 0) {
+ out << "miss_latency_" << CacheRequestType(i) << ": " << m_missLatencyHistograms[i] << endl;
+ }
+ }
+ for(int i=0; i<m_machLatencyHistograms.size(); i++) {
+ if (m_machLatencyHistograms[i].size() > 0) {
+ out << "miss_latency_" << GenericMachineType(i) << ": " << m_machLatencyHistograms[i] << endl;
+ }
+ }
+ out << "miss_latency_L2Miss: " << m_L2MissLatencyHistogram << endl;
+
+ out << endl;
+
+ out << "All Non-Zero Cycle SW Prefetch Requests" << endl;
+ out << "------------------------------------" << endl;
+ out << "prefetch_latency: " << m_allSWPrefetchLatencyHistogram << endl;
+ for(int i=0; i<m_SWPrefetchLatencyHistograms.size(); i++) {
+ if (m_SWPrefetchLatencyHistograms[i].size() > 0) {
+ out << "prefetch_latency_" << CacheRequestType(i) << ": " << m_SWPrefetchLatencyHistograms[i] << endl;
+ }
+ }
+ for(int i=0; i<m_SWPrefetchMachLatencyHistograms.size(); i++) {
+ if (m_SWPrefetchMachLatencyHistograms[i].size() > 0) {
+ out << "prefetch_latency_" << GenericMachineType(i) << ": " << m_SWPrefetchMachLatencyHistograms[i] << endl;
+ }
+ }
+ out << "prefetch_latency_L2Miss:" << m_SWPrefetchL2MissLatencyHistogram << endl;
+
+ out << "multicast_retries: " << m_multicast_retry_histogram << endl;
+ out << "gets_mask_prediction_count: " << m_gets_mask_prediction << endl;
+ out << "getx_mask_prediction_count: " << m_getx_mask_prediction << endl;
+ out << "explicit_training_mask: " << m_explicit_training_mask << endl;
+ out << endl;
+
+ if (m_all_sharing_histogram.size() > 0) {
+ out << "all_sharing: " << m_all_sharing_histogram << endl;
+ out << "read_sharing: " << m_read_sharing_histogram << endl;
+ out << "write_sharing: " << m_write_sharing_histogram << endl;
+
+ out << "all_sharing_percent: "; m_all_sharing_histogram.printPercent(out); out << endl;
+ out << "read_sharing_percent: "; m_read_sharing_histogram.printPercent(out); out << endl;
+ out << "write_sharing_percent: "; m_write_sharing_histogram.printPercent(out); out << endl;
+
+ int64 total_miss = m_cache_to_cache + m_memory_to_cache;
+ out << "all_misses: " << total_miss << endl;
+ out << "cache_to_cache_misses: " << m_cache_to_cache << endl;
+ out << "memory_to_cache_misses: " << m_memory_to_cache << endl;
+ out << "cache_to_cache_percent: " << 100.0 * (double(m_cache_to_cache) / double(total_miss)) << endl;
+ out << "memory_to_cache_percent: " << 100.0 * (double(m_memory_to_cache) / double(total_miss)) << endl;
+ out << endl;
+ }
+
+ if (m_conflicting_histogram.size() > 0) {
+ out << "conflicting_histogram: " << m_conflicting_histogram << endl;
+ out << "conflicting_histogram_percent: "; m_conflicting_histogram.printPercent(out); out << endl;
+ out << endl;
+ }
+
+ if (m_outstanding_requests.size() > 0) {
+ out << "outstanding_requests: "; m_outstanding_requests.printPercent(out); out << endl;
+ if (m_outstanding_persistent_requests.size() > 0) {
+ out << "outstanding_persistent_requests: "; m_outstanding_persistent_requests.printPercent(out); out << endl;
+ }
+ out << endl;
+ }
+ }
+
+ if (XACT_MEMORY){
+ // Transactional Memory stats
+ out << "Transactional Memory Stats:" << endl;
+ out << "------- xact --------" << endl;
+ out << "xact_size_dist: " << m_xactSizes << endl;
+ out << "xact_instr_count: " << m_xactInstrCount << endl;
+ out << "xact_time_dist: " << m_xactCycles << endl;
+ out << "xact_log_size_dist: " << m_xactLogs << endl;
+ out << "xact_read_set_size_dist: " << m_xactReads << endl;
+ out << "xact_write_set_size_dist: " << m_xactWrites << endl;
+ out << "xact_overflow_read_lines_dist: " << m_xactOverflowReads << endl;
+ out << "xact_overflow_write_lines_dist: " << m_xactOverflowWrites << endl;
+ out << "xact_overflow_read_set_size_dist: " << m_xactOverflowTotalReads << endl;
+ out << "xact_overflow_write_set_size_dist: " << m_xactOverflowTotalWrites << endl;
+ out << "xact_miss_load_dist: " << m_xactLoadMisses << endl;
+ out << "xact_miss_store_dist: " << m_xactStoreMisses << endl;
+ out << "xact_nacked: " << m_xactNacked << endl;
+ out << "xact_retries: " << m_xactRetries << endl;
+ out << "xact_abort_delays: " << m_abortDelays << endl;
+ out << "xact_aborts: " << m_transactionAborts << endl;
+ if (ATMTP_ENABLED) {
+ out << "xact_log_overflows: " << m_transactionLogOverflows << endl;
+ out << "xact_cache_overflows: " << m_transactionCacheOverflows << endl;
+ out << "xact_unsup_inst_aborts: " << m_transactionUnsupInsts << endl;
+ out << "xact_save_rest_aborts: " << m_transactionSaveRestAborts << endl;
+ }
+ out << "xact_writebacks: " << m_transWBs << endl;
+ out << "xact_extra_wbs: " << m_extraWBs << endl;
+ out << "xact_handler_startup_delay: " << m_abortStarupDelay << endl;
+ out << "xact_handler_per_block_delay: " << m_abortPerBlockDelay << endl;
+ out << "xact_inferred_aborts: " << m_inferredAborts << endl;
+ //out << "xact_histogram: " << m_procsInXact << endl;
+
+ if (!short_stats) {
+ Vector<int> nackedXIDKeys = m_nackXIDMap_ptr->keys();
+ nackedXIDKeys.sortVector();
+ out << endl;
+ int total_nacks = 0;
+ out << "------- xact Nacks by XID --------" << endl;
+ for(int i=0; i<nackedXIDKeys.size(); i++) {
+ int key = nackedXIDKeys[i];
+ int count = m_nackXIDMap_ptr->lookup(key);
+ total_nacks += count;
+ out << "xact " << key << " "
+ << setw(6) << dec << count
+ << endl;
+ }
+ out << "Total Nacks: " << total_nacks << endl;
+ out << "---------------" << endl;
+ out << endl;
+
+ // Print XID Nack Pairs
+ Vector<int> nackedXIDPairKeys = m_nackXIDPairMap_ptr->keys();
+ nackedXIDPairKeys.sortVector();
+ out << endl;
+ total_nacks = 0;
+ out << "------- xact Nacks by XID Pairs --------" << endl;
+ for(int i=0; i<nackedXIDPairKeys.size(); i++) {
+ int key = nackedXIDPairKeys[i];
+ Map<int, int> * my_map = m_nackXIDPairMap_ptr->lookup(key);
+ Vector<int> my_keys = my_map->keys();
+ my_keys.sortVector();
+ for(int j=0; j<my_keys.size(); j++){
+ int nid = my_keys[j];
+ int count = my_map->lookup(nid);
+ total_nacks += count;
+ out << "xact " << key << " nacked by xact " << nid << " "
+ << setw(6) << dec << count
+ << endl;
+ }
+ }
+ out << "Total Nacks: " << total_nacks << endl;
+ out << "---------------" << endl;
+ out << endl;
+
+
+ Vector<Address> nackedPCKeys = m_nackPCMap_ptr->keys();
+ nackedPCKeys.sortVector();
+ out << endl;
+ out << "------- xact Nacks by PC --------" << endl;
+ for(int i=0; i<nackedPCKeys.size(); i++) {
+ Address key = nackedPCKeys[i];
+ int count = m_nackPCMap_ptr->lookup(key);
+ out << "xact_Nack " << key << " "
+ << setw(4) << dec << count
+ << endl;
+ }
+ out << "---------------" << endl;
+ out << endl;
+
+
+ Vector<int> xactExceptionKeys = m_xactExceptionMap_ptr->keys();
+ xactExceptionKeys.sortVector();
+ out << "------- xact exceptions --------" << endl;
+ for(int i=0; i<xactExceptionKeys.size(); i++) {
+ int key = xactExceptionKeys[i];
+ int count = m_xactExceptionMap_ptr->lookup(key);
+ out << "xact_exception("
+ << hex << key << "):"
+ << setw(4) << dec << count
+ << endl;
+ }
+ out << endl;
+ out << "---------------" << endl;
+ out << endl;
+
+ Vector<int> abortIDKeys = m_abortIDMap_ptr->keys();
+ abortIDKeys.sortVector();
+ out << "------- xact abort by XID --------" << endl;
+ for(int i=0; i<abortIDKeys.size(); i++) {
+ int count = m_abortIDMap_ptr->lookup(abortIDKeys[i]);
+ out << "xact_aborts("
+ << dec << abortIDKeys[i] << "):"
+ << setw(7) << count
+ << endl;
+ }
+ out << endl;
+ out << "---------------" << endl;
+ out << endl;
+
+ Vector<Address> abortedPCKeys = m_abortPCMap_ptr->keys();
+ abortedPCKeys.sortVector();
+ out << endl;
+ out << "------- xact Aborts by PC --------" << endl;
+ for(int i=0; i<abortedPCKeys.size(); i++) {
+ Address key = abortedPCKeys[i];
+ int count = m_abortPCMap_ptr->lookup(key);
+ out << "xact_abort_pc " << key
+ << setw(4) << dec << count
+ << endl;
+ }
+ out << "---------------" << endl;
+ out << endl;
+
+ Vector<Address> abortedAddrKeys = m_abortAddressMap_ptr->keys();
+ abortedAddrKeys.sortVector();
+ out << endl;
+ out << "------- xact Aborts by Address --------" << endl;
+ for(int i=0; i<abortedAddrKeys.size(); i++) {
+ Address key = abortedAddrKeys[i];
+ int count = m_abortAddressMap_ptr->lookup(key);
+ out << "xact_abort_address " << key
+ << setw(4) << dec << count
+ << endl;
+ }
+ out << "---------------" << endl;
+ out << endl;
+ } // !short_stats
+
+ Vector<int> commitIDKeys = m_commitIDMap_ptr->keys();
+ commitIDKeys.sortVector();
+ out << "------- xact Commit Stats by XID --------" << endl;
+ for(int i=0; i<commitIDKeys.size(); i++) {
+ int count = m_commitIDMap_ptr->lookup(commitIDKeys[i]);
+ double retry_count = (double)m_xactRetryIDMap_ptr->lookup(commitIDKeys[i]) / count;
+ double cycles_count = (double)m_xactCyclesIDMap_ptr->lookup(commitIDKeys[i]) / count;
+ double readset_count = (double)m_xactReadSetIDMap_ptr->lookup(commitIDKeys[i]) / count;
+ double writeset_count = (double)m_xactWriteSetIDMap_ptr->lookup(commitIDKeys[i]) / count;
+ double loadmiss_count = (double)m_xactLoadMissIDMap_ptr->lookup(commitIDKeys[i]) / count;
+ double storemiss_count = (double)m_xactStoreMissIDMap_ptr->lookup(commitIDKeys[i]) / count;
+ double instr_count = (double)m_xactInstrCountIDMap_ptr->lookup(commitIDKeys[i]) / count;
+ out << "xact_stats id: "
+ << dec << commitIDKeys[i]
+ << " count: " << setw(7) << count
+ << " Cycles: " << setw(7) << cycles_count
+ << " Instr: " << setw(7) << instr_count
+ << " ReadSet: " << setw(7) << readset_count
+ << " WriteSet: " << setw(7) << writeset_count
+ << " LoadMiss: " << setw(7) << loadmiss_count
+ << " StoreMiss: " << setw(7) << storemiss_count
+ << " Retry Count: " << setw(7) << retry_count
+ << endl;
+ }
+ out << endl;
+ out << "---------------" << endl;
+ out << endl;
+
+ if (!short_stats) {
+ Vector<int> procsInXactKeys = m_procsInXactMap_ptr->keys();
+ procsInXactKeys.sortVector();
+ out << "------- xact histogram --------" << endl;
+ for(int i=0; i<procsInXactKeys.size(); i++) {
+ int count = m_procsInXactMap_ptr->lookup(procsInXactKeys[i]);
+ int key = procsInXactKeys[i];
+ out << "xact_histogram("
+ << dec << key << "):"
+ << setw(8) << count
+ << endl;
+ }
+ out << endl;
+ out << "---------------" << endl;
+ out << endl;
+
+ // Read/Write set Bloom filter stats
+ //int false_reads = 0;
+ long long int false_reads = m_readSetNoMatch;
+ Vector<Address> fp_read_keys = m_readSetNoMatch_ptr->keys();
+ out << "------- xact read set false positives -------" << endl;
+ for(int i=0; i < fp_read_keys.size(); ++i){
+ int count = m_readSetNoMatch_ptr->lookup(fp_read_keys[i]);
+ //out << "read_false_positive( " << fp_read_keys[i] << " ): "
+ // << setw(8) << dec << count << endl;
+ false_reads += count;
+ }
+ out << "Total read set false positives : " << setw(8) << false_reads << endl;
+ out << "-----------------------" << endl;
+ out << endl;
+
+ //int matching_reads = 0;
+ long long int matching_reads = m_readSetMatch;
+ long long int empty_checks = m_readSetEmptyChecks;
+ Vector<Address> read_keys = m_readSetMatch_ptr->keys();
+ out << "------- xact read set matches -------" << endl;
+ for(int i=0; i < read_keys.size(); ++i){
+ int count = m_readSetMatch_ptr->lookup(read_keys[i]);
+ //out << "read_match( " << read_keys[i] << " ): "
+ // << setw(8) << dec << count << endl;
+ matching_reads += count;
+ }
+ out << "Total read set matches : " << setw(8) << matching_reads << endl;
+ out << "Total read set empty checks : " << setw(8) << empty_checks << endl;
+ double false_positive_pct = 0.0;
+ if((false_reads + matching_reads)> 0){
+ false_positive_pct = (1.0*false_reads)/(false_reads+matching_reads)*100.0;
+ }
+ out << "Read set false positives rate : " << false_positive_pct << "%" << endl;
+ out << "-----------------------" << endl;
+ out << endl;
+
+ // for write set
+ //int false_writes = 0;
+ long long int false_writes = m_writeSetNoMatch;
+ Vector<Address> fp_write_keys = m_writeSetNoMatch_ptr->keys();
+ out << "------- xact write set false positives -------" << endl;
+ for(int i=0; i < fp_write_keys.size(); ++i){
+ int count = m_writeSetNoMatch_ptr->lookup(fp_write_keys[i]);
+ //out << "write_false_positive( " << fp_write_keys[i] << " ): "
+ // << setw(8) << dec << count << endl;
+ false_writes += count;
+ }
+ out << "Total write set false positives : " << setw(8) << false_writes << endl;
+ out << "-----------------------" << endl;
+ out << endl;
+
+ //int matching_writes = 0;
+ long long int matching_writes = m_writeSetMatch;
+ empty_checks = m_writeSetEmptyChecks;
+ Vector<Address> write_keys = m_writeSetMatch_ptr->keys();
+ out << "------- xact write set matches -------" << endl;
+ for(int i=0; i < write_keys.size(); ++i){
+ int count = m_writeSetMatch_ptr->lookup(write_keys[i]);
+ //out << "write_match( " << write_keys[i] << " ): "
+ // << setw(8) << dec << count << endl;
+ matching_writes += count;
+ }
+ out << "Total write set matches : " << setw(8) << matching_writes << endl;
+ out << "Total write set empty checks : " << setw(8) << empty_checks << endl;
+ false_positive_pct = 0.0;
+ if((matching_writes+false_writes) > 0){
+ false_positive_pct = (1.0*false_writes)/(false_writes+matching_writes)*100.0;
+ }
+ out << "Write set false positives rate : " << false_positive_pct << "%" << endl;
+ out << "-----------------------" << endl;
+ out << endl;
+
+ out << "----- Xact Signature Stats ------" << endl;
+ Vector<int> xids = m_xactReadFilterBitsSetOnCommit->keys();
+ for(int i=0; i < xids.size(); ++i){
+ int xid = xids[i];
+ out << "xid " << xid << " Read set bits set on commit: " << (m_xactReadFilterBitsSetOnCommit->lookup(xid)) << endl;
+ }
+ xids = m_xactWriteFilterBitsSetOnCommit->keys();
+ for(int i=0; i < xids.size(); ++i){
+ int xid = xids[i];
+ out << "xid " << xid << " Write set bits set on commit: " << (m_xactWriteFilterBitsSetOnCommit->lookup(xid)) << endl;
+ }
+ xids = m_xactReadFilterBitsSetOnAbort->keys();
+ for(int i=0; i < xids.size(); ++i){
+ int xid = xids[i];
+ out << "xid " << xid << " Read set bits set on abort: " << (m_xactReadFilterBitsSetOnAbort->lookup(xid)) << endl;
+ }
+ xids = m_xactWriteFilterBitsSetOnAbort->keys();
+ for(int i=0; i < xids.size(); ++i){
+ int xid = xids[i];
+ out << "xid " << xid << " Write set bits set on abort: " << (m_xactWriteFilterBitsSetOnAbort->lookup(xid)) << endl;
+ }
+ out << endl;
+
+ cout << "------- WATCHPOINTS --------" << endl;
+ cout << "False Triggers : " << m_watchpointsFalsePositiveTrigger << endl;
+ cout << "True Triggers : " << m_watchpointsTrueTrigger << endl;
+ cout << "Total Triggers : " << m_watchpointsTrueTrigger + m_watchpointsFalsePositiveTrigger << endl;
+ cout << "---------------" << endl;
+ cout << endl;
+ } // !short_stats
+ //m_xact_profiler_ptr->printStats(out, short_stats); // gem5:Arka for decomissioning of log_tm
+ } // XACT_MEMORY
+
+ if (!short_stats) {
+ out << "Request vs. System State Profile" << endl;
+ out << "--------------------------------" << endl;
+ out << endl;
+
+ Vector<string> requestProfileKeys = m_requestProfileMap_ptr->keys();
+ requestProfileKeys.sortVector();
+
+ for(int i=0; i<requestProfileKeys.size(); i++) {
+ int temp_int = m_requestProfileMap_ptr->lookup(requestProfileKeys[i]);
+ double percent = (100.0*double(temp_int))/double(m_requests);
+ while (requestProfileKeys[i] != "") {
+ out << setw(10) << string_split(requestProfileKeys[i], ':');
+ }
+ out << setw(11) << temp_int;
+ out << setw(14) << percent << endl;
+ }
+ out << endl;
+
+ out << "filter_action: " << m_filter_action_histogram << endl;
+
+ if (!PROFILE_ALL_INSTRUCTIONS) {
+ m_address_profiler_ptr->printStats(out);
+ }
+
+ if (PROFILE_ALL_INSTRUCTIONS) {
+ m_inst_profiler_ptr->printStats(out);
+ }
+
+ out << endl;
+ out << "Message Delayed Cycles" << endl;
+ out << "----------------------" << endl;
+ out << "Total_delay_cycles: " << m_delayedCyclesHistogram << endl;
+ out << "Total_nonPF_delay_cycles: " << m_delayedCyclesNonPFHistogram << endl;
+ for (int i = 0; i < m_delayedCyclesVCHistograms.size(); i++) {
+ out << " virtual_network_" << i << "_delay_cycles: " << m_delayedCyclesVCHistograms[i] << endl;
+ }
+
+ printResourceUsage(out);
+ }
+
+}
+
+void Profiler::printResourceUsage(ostream& out) const
+{
+ out << endl;
+ out << "Resource Usage" << endl;
+ out << "--------------" << endl;
+
+ integer_t pagesize = getpagesize(); // page size in bytes
+ out << "page_size: " << pagesize << endl;
+
+ rusage usage;
+ getrusage (RUSAGE_SELF, &usage);
+
+ out << "user_time: " << usage.ru_utime.tv_sec << endl;
+ out << "system_time: " << usage.ru_stime.tv_sec << endl;
+ out << "page_reclaims: " << usage.ru_minflt << endl;
+ out << "page_faults: " << usage.ru_majflt << endl;
+ out << "swaps: " << usage.ru_nswap << endl;
+ out << "block_inputs: " << usage.ru_inblock << endl;
+ out << "block_outputs: " << usage.ru_oublock << endl;
+}
+
+void Profiler::clearStats()
+{
+ m_num_BA_unicasts = 0;
+ m_num_BA_broadcasts = 0;
+
+ m_ruby_start = g_eventQueue_ptr->getTime();
+
+ m_instructions_executed_at_start.setSize(RubyConfig::numberOfProcessors());
+ m_cycles_executed_at_start.setSize(RubyConfig::numberOfProcessors());
+ for (int i=0; i < RubyConfig::numberOfProcessors(); i++) {
+ if (g_system_ptr == NULL) {
+ m_instructions_executed_at_start[i] = 0;
+ m_cycles_executed_at_start[i] = 0;
+ } else {
+ m_instructions_executed_at_start[i] = g_system_ptr->getDriver()->getInstructionCount(i);
+ m_cycles_executed_at_start[i] = g_system_ptr->getDriver()->getCycleCount(i);
+ }
+ }
+
+ m_perProcTotalMisses.setSize(RubyConfig::numberOfProcessors());
+ m_perProcUserMisses.setSize(RubyConfig::numberOfProcessors());
+ m_perProcSupervisorMisses.setSize(RubyConfig::numberOfProcessors());
+ m_perProcStartTransaction.setSize(RubyConfig::numberOfProcessors());
+ m_perProcEndTransaction.setSize(RubyConfig::numberOfProcessors());
+
+ for(int i=0; i < RubyConfig::numberOfProcessors(); i++) {
+ m_perProcTotalMisses[i] = 0;
+ m_perProcUserMisses[i] = 0;
+ m_perProcSupervisorMisses[i] = 0;
+ m_perProcStartTransaction[i] = 0;
+ m_perProcEndTransaction[i] = 0;
+ }
+
+ m_busyControllerCount.setSize(MachineType_NUM); // all machines
+ for(int i=0; i < MachineType_NUM; i++) {
+ m_busyControllerCount[i].setSize(MachineType_base_count((MachineType)i));
+ for(int j=0; j < MachineType_base_count((MachineType)i); j++) {
+ m_busyControllerCount[i][j] = 0;
+ }
+ }
+ m_busyBankCount = 0;
+
+ m_delayedCyclesHistogram.clear();
+ m_delayedCyclesNonPFHistogram.clear();
+ m_delayedCyclesVCHistograms.setSize(NUMBER_OF_VIRTUAL_NETWORKS);
+ for (int i = 0; i < NUMBER_OF_VIRTUAL_NETWORKS; i++) {
+ m_delayedCyclesVCHistograms[i].clear();
+ }
+
+ m_gets_mask_prediction.clear();
+ m_getx_mask_prediction.clear();
+ m_explicit_training_mask.clear();
+
+ m_missLatencyHistograms.setSize(CacheRequestType_NUM);
+ for(int i=0; i<m_missLatencyHistograms.size(); i++) {
+ m_missLatencyHistograms[i].clear(200);
+ }
+ m_machLatencyHistograms.setSize(GenericMachineType_NUM+1);
+ for(int i=0; i<m_machLatencyHistograms.size(); i++) {
+ m_machLatencyHistograms[i].clear(200);
+ }
+ m_allMissLatencyHistogram.clear(200);
+ m_L2MissLatencyHistogram.clear(200);
+
+ m_SWPrefetchLatencyHistograms.setSize(CacheRequestType_NUM);
+ for(int i=0; i<m_SWPrefetchLatencyHistograms.size(); i++) {
+ m_SWPrefetchLatencyHistograms[i].clear(200);
+ }
+ m_SWPrefetchMachLatencyHistograms.setSize(GenericMachineType_NUM+1);
+ for(int i=0; i<m_SWPrefetchMachLatencyHistograms.size(); i++) {
+ m_SWPrefetchMachLatencyHistograms[i].clear(200);
+ }
+ m_allSWPrefetchLatencyHistogram.clear(200);
+ m_SWPrefetchL2MissLatencyHistogram.clear(200);
+
+ m_multicast_retry_histogram.clear();
+
+ m_L1tbeProfile.clear();
+ m_L2tbeProfile.clear();
+ m_stopTableProfile.clear();
+ m_filter_action_histogram.clear();
+
+ m_sequencer_requests.clear();
+ m_store_buffer_size.clear();
+ m_store_buffer_blocks.clear();
+ m_read_sharing_histogram.clear();
+ m_write_sharing_histogram.clear();
+ m_all_sharing_histogram.clear();
+ m_cache_to_cache = 0;
+ m_memory_to_cache = 0;
+
+ m_predictions = 0;
+ m_predictionOpportunities = 0;
+ m_goodPredictions = 0;
+
+ // clear HashMaps
+ m_requestProfileMap_ptr->clear();
+
+ // count requests profiled
+ m_requests = 0;
+
+ // Conflicting requests
+ m_conflicting_map_ptr->clear();
+ m_conflicting_histogram.clear();
+
+ m_outstanding_requests.clear();
+ m_outstanding_persistent_requests.clear();
+
+ m_L1D_cache_profiler_ptr->clearStats();
+ m_L1I_cache_profiler_ptr->clearStats();
+ m_L2_cache_profiler_ptr->clearStats();
+ //m_xact_profiler_ptr->clearStats(); //gem5:Arka for decomissiong of log_tm
+
+ //---- begin XACT_MEM code
+ ASSERT(m_xactExceptionMap_ptr != NULL);
+ ASSERT(m_procsInXactMap_ptr != NULL);
+ ASSERT(m_abortIDMap_ptr != NULL);
+ ASSERT(m_abortPCMap_ptr != NULL);
+ ASSERT( m_nackXIDMap_ptr != NULL);
+ ASSERT(m_nackPCMap_ptr != NULL);
+
+ m_abortStarupDelay = -1;
+ m_abortPerBlockDelay = -1;
+ m_transWBs = 0;
+ m_extraWBs = 0;
+ m_transactionAborts = 0;
+ m_transactionLogOverflows = 0;
+ m_transactionCacheOverflows = 0;
+ m_transactionUnsupInsts = 0;
+ m_transactionSaveRestAborts = 0;
+ m_inferredAborts = 0;
+ m_xactNacked = 0;
+
+ m_xactLogs.clear();
+ m_xactCycles.clear();
+ m_xactReads.clear();
+ m_xactWrites.clear();
+ m_xactSizes.clear();
+ m_abortDelays.clear();
+ m_xactRetries.clear();
+ m_xactOverflowReads.clear();
+ m_xactOverflowWrites.clear();
+ m_xactLoadMisses.clear();
+ m_xactStoreMisses.clear();
+ m_xactOverflowTotalReads.clear();
+ m_xactOverflowTotalWrites.clear();
+
+ m_xactExceptionMap_ptr->clear();
+ m_procsInXactMap_ptr->clear();
+ m_abortIDMap_ptr->clear();
+ m_commitIDMap_ptr->clear();
+ m_xactRetryIDMap_ptr->clear();
+ m_xactCyclesIDMap_ptr->clear();
+ m_xactReadSetIDMap_ptr->clear();
+ m_xactWriteSetIDMap_ptr->clear();
+ m_xactLoadMissIDMap_ptr->clear();
+ m_xactStoreMissIDMap_ptr->clear();
+ m_xactInstrCountIDMap_ptr->clear();
+ m_abortPCMap_ptr->clear();
+ m_abortAddressMap_ptr->clear();
+ m_nackXIDMap_ptr->clear();
+ m_nackXIDPairMap_ptr->clear();
+ m_nackPCMap_ptr->clear();
+
+ m_xactReadFilterBitsSetOnCommit->clear();
+ m_xactReadFilterBitsSetOnAbort->clear();
+ m_xactWriteFilterBitsSetOnCommit->clear();
+ m_xactWriteFilterBitsSetOnAbort->clear();
+
+ m_readSetEmptyChecks = 0;
+ m_readSetMatch = 0;
+ m_readSetNoMatch = 0;
+ m_writeSetEmptyChecks = 0;
+ m_writeSetMatch = 0;
+ m_writeSetNoMatch = 0;
+
+ m_xact_visualizer_last = 0;
+ m_watchpointsFalsePositiveTrigger = 0;
+ m_watchpointsTrueTrigger = 0;
+ //---- end XACT_MEM code
+
+ // for MemoryControl:
+ m_memReq = 0;
+ m_memBankBusy = 0;
+ m_memBusBusy = 0;
+ m_memTfawBusy = 0;
+ m_memReadWriteBusy = 0;
+ m_memDataBusBusy = 0;
+ m_memRefresh = 0;
+ m_memRead = 0;
+ m_memWrite = 0;
+ m_memWaitCycles = 0;
+ m_memInputQ = 0;
+ m_memBankQ = 0;
+ m_memArbWait = 0;
+ m_memRandBusy = 0;
+ m_memNotOld = 0;
+
+ for (int bank=0; bank < m_memBankCount.size(); bank++) {
+ m_memBankCount[bank] = 0;
+ }
+
+ // Flush the prefetches through the system - used so that there are no outstanding requests after stats are cleared
+ //g_eventQueue_ptr->triggerAllEvents();
+
+ // update the start time
+ m_ruby_start = g_eventQueue_ptr->getTime();
+}
+
+void Profiler::addPrimaryStatSample(const CacheMsg& msg, NodeID id)
+{
+ if (Protocol::m_TwoLevelCache) {
+ if (msg.getType() == CacheRequestType_IFETCH) {
+ addL1IStatSample(msg, id);
+ } else {
+ addL1DStatSample(msg, id);
+ }
+ // profile the address after an L1 miss (outside of the processor for CMP)
+ if (Protocol::m_CMP) {
+ addAddressTraceSample(msg, id);
+ }
+ } else {
+ addL2StatSample(CacheRequestType_to_GenericRequestType(msg.getType()),
+ msg.getAccessMode(), msg.getSize(), msg.getPrefetch(), id);
+ addAddressTraceSample(msg, id);
+ }
+}
+
+void Profiler::profileConflictingRequests(const Address& addr)
+{
+ assert(addr == line_address(addr));
+ Time last_time = m_ruby_start;
+ if (m_conflicting_map_ptr->exist(addr)) {
+ Time last_time = m_conflicting_map_ptr->lookup(addr);
+ }
+ Time current_time = g_eventQueue_ptr->getTime();
+ assert (current_time - last_time > 0);
+ m_conflicting_histogram.add(current_time - last_time);
+ m_conflicting_map_ptr->add(addr, current_time);
+}
+
+void Profiler::addSecondaryStatSample(CacheRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID id)
+{
+ addSecondaryStatSample(CacheRequestType_to_GenericRequestType(requestType), type, msgSize, pfBit, id);
+}
+
+void Profiler::addSecondaryStatSample(GenericRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID id)
+{
+ addL2StatSample(requestType, type, msgSize, pfBit, id);
+}
+
+void Profiler::addL2StatSample(GenericRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID id)
+{
+ m_perProcTotalMisses[id]++;
+ if (type == AccessModeType_SupervisorMode) {
+ m_perProcSupervisorMisses[id]++;
+ } else {
+ m_perProcUserMisses[id]++;
+ }
+ m_L2_cache_profiler_ptr->addStatSample(requestType, type, msgSize, pfBit);
+}
+
+void Profiler::addL1DStatSample(const CacheMsg& msg, NodeID id)
+{
+ m_L1D_cache_profiler_ptr->addStatSample(CacheRequestType_to_GenericRequestType(msg.getType()),
+ msg.getAccessMode(), msg.getSize(), msg.getPrefetch());
+}
+
+void Profiler::addL1IStatSample(const CacheMsg& msg, NodeID id)
+{
+ m_L1I_cache_profiler_ptr->addStatSample(CacheRequestType_to_GenericRequestType(msg.getType()),
+ msg.getAccessMode(), msg.getSize(), msg.getPrefetch());
+}
+
+void Profiler::addAddressTraceSample(const CacheMsg& msg, NodeID id)
+{
+ if (msg.getType() != CacheRequestType_IFETCH) {
+
+ // Note: The following line should be commented out if you want to
+ // use the special profiling that is part of the GS320 protocol
+
+ // NOTE: Unless PROFILE_HOT_LINES or PROFILE_ALL_INSTRUCTIONS are enabled, nothing will be profiled by the AddressProfiler
+ m_address_profiler_ptr->addTraceSample(msg.getAddress(), msg.getProgramCounter(), msg.getType(), msg.getAccessMode(), id, false);
+ }
+}
+
+void Profiler::profileSharing(const Address& addr, AccessType type, NodeID requestor, const Set& sharers, const Set& owner)
+{
+ Set set_contacted(owner);
+ if (type == AccessType_Write) {
+ set_contacted.addSet(sharers);
+ }
+ set_contacted.remove(requestor);
+ int number_contacted = set_contacted.count();
+
+ if (type == AccessType_Write) {
+ m_write_sharing_histogram.add(number_contacted);
+ } else {
+ m_read_sharing_histogram.add(number_contacted);
+ }
+ m_all_sharing_histogram.add(number_contacted);
+
+ if (number_contacted == 0) {
+ m_memory_to_cache++;
+ } else {
+ m_cache_to_cache++;
+ }
+
+}
+
+void Profiler::profileMsgDelay(int virtualNetwork, int delayCycles) {
+ assert(virtualNetwork < m_delayedCyclesVCHistograms.size());
+ m_delayedCyclesHistogram.add(delayCycles);
+ m_delayedCyclesVCHistograms[virtualNetwork].add(delayCycles);
+ if (virtualNetwork != 0) {
+ m_delayedCyclesNonPFHistogram.add(delayCycles);
+ }
+}
+
+// profiles original cache requests including PUTs
+void Profiler::profileRequest(const string& requestStr)
+{
+ m_requests++;
+
+ if (m_requestProfileMap_ptr->exist(requestStr)) {
+ (m_requestProfileMap_ptr->lookup(requestStr))++;
+ } else {
+ m_requestProfileMap_ptr->add(requestStr, 1);
+ }
+}
+
+void Profiler::recordPrediction(bool wasGood, bool wasPredicted)
+{
+ m_predictionOpportunities++;
+ if(wasPredicted){
+ m_predictions++;
+ if(wasGood){
+ m_goodPredictions++;
+ }
+ }
+}
+
+void Profiler::profileFilterAction(int action)
+{
+ m_filter_action_histogram.add(action);
+}
+
+void Profiler::profileMulticastRetry(const Address& addr, int count)
+{
+ m_multicast_retry_histogram.add(count);
+}
+
+void Profiler::startTransaction(int cpu)
+{
+ m_perProcStartTransaction[cpu]++;
+}
+
+void Profiler::endTransaction(int cpu)
+{
+ m_perProcEndTransaction[cpu]++;
+}
+
+void Profiler::controllerBusy(MachineID machID)
+{
+ m_busyControllerCount[(int)machID.type][(int)machID.num]++;
+}
+
+void Profiler::profilePFWait(Time waitTime)
+{
+ m_prefetchWaitHistogram.add(waitTime);
+}
+
+void Profiler::bankBusy()
+{
+ m_busyBankCount++;
+}
+
+// non-zero cycle demand request
+void Profiler::missLatency(Time t, CacheRequestType type, GenericMachineType respondingMach)
+{
+ m_allMissLatencyHistogram.add(t);
+ m_missLatencyHistograms[type].add(t);
+ m_machLatencyHistograms[respondingMach].add(t);
+ if(respondingMach == GenericMachineType_Directory || respondingMach == GenericMachineType_NUM) {
+ m_L2MissLatencyHistogram.add(t);
+ }
+}
+
+// non-zero cycle prefetch request
+void Profiler::swPrefetchLatency(Time t, CacheRequestType type, GenericMachineType respondingMach)
+{
+ m_allSWPrefetchLatencyHistogram.add(t);
+ m_SWPrefetchLatencyHistograms[type].add(t);
+ m_SWPrefetchMachLatencyHistograms[respondingMach].add(t);
+ if(respondingMach == GenericMachineType_Directory || respondingMach == GenericMachineType_NUM) {
+ m_SWPrefetchL2MissLatencyHistogram.add(t);
+ }
+}
+
+void Profiler::profileTransition(const string& component, NodeID id, NodeID version, Address addr,
+ const string& state, const string& event,
+ const string& next_state, const string& note)
+{
+ const int EVENT_SPACES = 20;
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ const int COMP_SPACES = 10;
+ const int STATE_SPACES = 6;
+
+ if ((g_debug_ptr->getDebugTime() > 0) &&
+ (g_eventQueue_ptr->getTime() >= g_debug_ptr->getDebugTime())) {
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << id << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << version << " ";
+ (* debug_cout_ptr) << setw(COMP_SPACES) << component;
+ (* debug_cout_ptr) << setw(EVENT_SPACES) << event << " ";
+ for (int i=0; i < RubyConfig::numberOfProcessors(); i++) {
+
+ if (i == id) {
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(STATE_SPACES) << state;
+ (* debug_cout_ptr) << ">";
+ (* debug_cout_ptr).flags(ios::left);
+ (* debug_cout_ptr) << setw(STATE_SPACES) << next_state;
+ } else {
+ // cout << setw(STATE_SPACES) << " " << " " << setw(STATE_SPACES) << " ";
+ }
+ }
+ (* debug_cout_ptr) << " " << addr << " " << note;
+
+ (* debug_cout_ptr) << endl;
+ }
+}
+
+// Helper function
+static double process_memory_total()
+{
+ const double MULTIPLIER = 4096.0/(1024.0*1024.0); // 4kB page size, 1024*1024 bytes per MB,
+ ifstream proc_file;
+ proc_file.open("/proc/self/statm");
+ int total_size_in_pages = 0;
+ int res_size_in_pages = 0;
+ proc_file >> total_size_in_pages;
+ proc_file >> res_size_in_pages;
+ return double(total_size_in_pages)*MULTIPLIER; // size in megabytes
+}
+
+static double process_memory_resident()
+{
+ const double MULTIPLIER = 4096.0/(1024.0*1024.0); // 4kB page size, 1024*1024 bytes per MB,
+ ifstream proc_file;
+ proc_file.open("/proc/self/statm");
+ int total_size_in_pages = 0;
+ int res_size_in_pages = 0;
+ proc_file >> total_size_in_pages;
+ proc_file >> res_size_in_pages;
+ return double(res_size_in_pages)*MULTIPLIER; // size in megabytes
+}
+
+void Profiler::profileGetXMaskPrediction(const Set& pred_set)
+{
+ m_getx_mask_prediction.add(pred_set.count());
+}
+
+void Profiler::profileGetSMaskPrediction(const Set& pred_set)
+{
+ m_gets_mask_prediction.add(pred_set.count());
+}
+
+void Profiler::profileTrainingMask(const Set& pred_set)
+{
+ m_explicit_training_mask.add(pred_set.count());
+}
+
+int64 Profiler::getTotalInstructionsExecuted() const
+{
+ int64 sum = 1; // Starting at 1 allows us to avoid division by zero
+ for(int i=0; i < RubyConfig::numberOfProcessors(); i++) {
+ sum += (g_system_ptr->getDriver()->getInstructionCount(i) - m_instructions_executed_at_start[i]);
+ }
+ return sum;
+}
+
+int64 Profiler::getTotalTransactionsExecuted() const
+{
+ int64 sum = m_perProcEndTransaction.sum();
+ if (sum > 0) {
+ return sum;
+ } else {
+ return 1; // Avoid division by zero errors
+ }
+}
+
+
+// The following case statement converts CacheRequestTypes to GenericRequestTypes
+// allowing all profiling to be done with a single enum type instead of slow strings
+GenericRequestType Profiler::CacheRequestType_to_GenericRequestType(const CacheRequestType& type) {
+ switch (type) {
+ case CacheRequestType_LD:
+ return GenericRequestType_LD;
+ break;
+ case CacheRequestType_ST:
+ return GenericRequestType_ST;
+ break;
+ case CacheRequestType_ATOMIC:
+ return GenericRequestType_ATOMIC;
+ break;
+ case CacheRequestType_IFETCH:
+ return GenericRequestType_IFETCH;
+ break;
+ case CacheRequestType_LD_XACT:
+ return GenericRequestType_LD_XACT;
+ break;
+ case CacheRequestType_LDX_XACT:
+ return GenericRequestType_LDX_XACT;
+ break;
+ case CacheRequestType_ST_XACT:
+ return GenericRequestType_ST_XACT;
+ break;
+ case CacheRequestType_NULL:
+ return GenericRequestType_NULL;
+ break;
+ default:
+ ERROR_MSG("Unexpected cache request type");
+ }
+}
+
+//---- begin Transactional Memory CODE
+void Profiler::profileTransaction(int size, int logSize, int readS, int writeS, int overflow_readS, int overflow_writeS, int retries, int useful_cycles, bool nacked, int loadMisses, int storeMisses, int instrCount, int xid){
+ m_xactLogs.add(logSize);
+ m_xactSizes.add(size);
+ m_xactReads.add(readS);
+ m_xactWrites.add(writeS);
+ m_xactRetries.add(retries);
+ m_xactCycles.add(useful_cycles);
+ m_xactLoadMisses.add(loadMisses);
+ m_xactStoreMisses.add(storeMisses);
+ m_xactInstrCount.add(instrCount);
+
+ // was this transaction nacked?
+ if(nacked){
+ m_xactNacked++;
+ }
+
+ // for overflowed transactions
+ if(overflow_readS > 0 || overflow_writeS > 0){
+ m_xactOverflowReads.add(overflow_readS);
+ m_xactOverflowWrites.add(overflow_writeS);
+ m_xactOverflowTotalReads.add(readS);
+ m_xactOverflowTotalWrites.add(writeS);
+ }
+
+ // Record commits by xid
+ if(!m_commitIDMap_ptr->exist(xid)){
+ m_commitIDMap_ptr->add(xid, 1);
+ m_xactRetryIDMap_ptr->add(xid, retries);
+ m_xactCyclesIDMap_ptr->add(xid, useful_cycles);
+ m_xactReadSetIDMap_ptr->add(xid, readS);
+ m_xactWriteSetIDMap_ptr->add(xid, writeS);
+ m_xactLoadMissIDMap_ptr->add(xid, loadMisses);
+ m_xactStoreMissIDMap_ptr->add(xid, storeMisses);
+ m_xactInstrCountIDMap_ptr->add(xid, instrCount);
+ } else {
+ (m_commitIDMap_ptr->lookup(xid))++;
+ (m_xactRetryIDMap_ptr->lookup(xid)) += retries;
+ (m_xactCyclesIDMap_ptr->lookup(xid)) += useful_cycles;
+ (m_xactReadSetIDMap_ptr->lookup(xid)) += readS;
+ (m_xactWriteSetIDMap_ptr->lookup(xid)) += writeS;
+ (m_xactLoadMissIDMap_ptr->lookup(xid)) += loadMisses;
+ (m_xactStoreMissIDMap_ptr->lookup(xid)) += storeMisses;
+ (m_xactInstrCountIDMap_ptr->lookup(xid)) += instrCount;
+ }
+}
+
+void Profiler::profileBeginTransaction(NodeID id, int tid, int xid, int thread, Address pc, bool isOpen){
+ //- if(PROFILE_XACT){
+ if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 2)){
+ const char* openStr = isOpen ? " OPEN" : " CLOSED";
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
+ integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
+ const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
+ // The actual processor number
+ int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
+ << " XACT BEGIN " << xid
+ << " PC 0x" << hex << pc.getAddress()
+ << dec
+ << " *PC 0x" << hex << myInst << dec
+ << " '" << myInstStr << "'"
+ << openStr
+ << endl;
+ }
+}
+
+void Profiler::profileCommitTransaction(NodeID id, int tid, int xid, int thread, Address pc, bool isOpen){
+ //- if(PROFILE_XACT){
+ if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 2)){
+ const char* openStr = isOpen ? " OPEN" : " CLOSED";
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
+ integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
+ const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
+ // The actual processor number
+ int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
+ << " XACT COMMIT " << xid
+ << " PC 0x" << hex << pc.getAddress()
+ << dec
+ << " *PC 0x" << hex << myInst << dec
+ << " '" << myInstStr << "'"
+ << openStr
+ << endl;
+ }
+
+}
+
+// for profiling overflows
+void Profiler::profileLoadOverflow(NodeID id, int tid, int xid, int thread, Address addr, bool l1_overflow){
+ //- if(PROFILE_XACT){
+ if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ string overflow_str = " XACT LOAD L1 OVERFLOW ";
+ if(!l1_overflow){
+ overflow_str = " XACT LOAD L2 OVERFLOW ";
+ }
+ // The actual processor number
+ int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
+ << overflow_str << xid
+ << " ADDR " << addr
+ << endl;
+ }
+}
+
+// for profiling overflows
+void Profiler::profileStoreOverflow(NodeID id, int tid, int xid, int thread, Address addr, bool l1_overflow){
+ //- if(PROFILE_XACT){
+ if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ string overflow_str = " XACT STORE L1 OVERFLOW ";
+ if(!l1_overflow){
+ overflow_str = " XACT STORE L2 OVERFLOW ";
+ }
+ // The actual processor number
+ int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
+ << overflow_str << xid
+ << " ADDR " << addr
+ << endl;
+ }
+}
+
+void Profiler::profileLoadTransaction(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc){
+ //- if(PROFILE_XACT){
+ if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 3)){
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
+ integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
+ const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
+ // The actual processor number
+ int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
+ << " XACT LOAD " << xid
+ << " " << addr
+ << " VA " << logicalAddress
+ << " PC " << pc
+ << " *PC 0x" << hex << myInst << dec
+ << " '" << myInstStr << "'"
+ //<< " VAL 0x" << hex << SIMICS_read_physical_memory(proc_no, SIMICS_translate_data_address(proc_no, logicalAddress), 4) << dec
+ << " VAL 0x" << hex << g_system_ptr->getDriver()->readPhysicalMemory(proc_no, addr.getAddress(), 4) << dec
+ << endl;
+ }
+}
+
+void Profiler::profileLoad(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc){
+ if(PROFILE_NONXACT){
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ // The actual processor number
+ int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
+ << " LOAD " << xid
+ << " " << addr
+ << " VA " << logicalAddress
+ << " PC " << pc
+ //<< " VAL 0x" << hex << SIMICS_read_physical_memory(proc_no, SIMICS_translate_data_address(proc_no, logicalAddress), 4) << dec
+ << " VAL 0x" << hex << g_system_ptr->getDriver()->readPhysicalMemory(proc_no, addr.getAddress(), 4) << dec
+ << endl;
+ }
+}
+
+void Profiler::profileStoreTransaction(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc){
+ //- if(PROFILE_XACT){
+ if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 3)){
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
+ integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
+ const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
+ // The actual processor number
+ int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
+ << " XACT STORE " << xid
+ << " " << addr
+ << " VA " << logicalAddress
+ << " PC " << pc
+ << " *PC 0x" << hex << myInst << dec
+ << " '" << myInstStr << "'"
+ << endl;
+ }
+}
+
+void Profiler::profileStore(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc){
+ if(PROFILE_NONXACT){
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ // The actual processor number
+ int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
+ << " STORE " << xid
+ << " " << addr
+ << " VA " << logicalAddress
+ << " PC " << pc
+ << endl;
+ }
+}
+
+void Profiler::profileNack(NodeID id, int tid, int xid, int thread, int nacking_thread, NodeID nackedBy, Address addr, Address logicalAddress, Address pc, uint64 seq_ts, uint64 nack_ts, bool possibleCycle){
+ int nid = 0; // g_system_ptr->getChip(nackedBy/RubyConfig::numberOfProcsPerChip())->getTransactionInterfaceManager(nackedBy%RubyConfig::numberOfProcsPerChip())->getXID(nacking_thread);
+ assert(0);
+ //- if(PROFILE_XACT){
+ if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
+ integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
+ const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
+ // The actual processor number
+ int proc_no = id*g_NUM_SMT_THREADS + thread;
+ int nack_proc_no = nackedBy*g_NUM_SMT_THREADS + nacking_thread;
+ Address nack_pc = SIMICS_get_program_counter(nack_proc_no);
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
+ << " XACT NACK " << xid
+ << " by " << nack_proc_no
+ << " [ " << nackedBy
+ << ", " << nacking_thread
+ << " ]"
+ << " NID: " << nid
+ << " " << addr
+ << " VA " << logicalAddress
+ << " PC " << pc
+ << " *PC 0x" << hex << myInst << dec
+ << " '" << myInstStr << "'"
+ << " NackerPC " << nack_pc
+ << " my_ts " << seq_ts
+ << " nack_ts " << nack_ts
+ << " possible_cycle " << possibleCycle
+ << endl;
+ }
+
+ // Record nacks by xid
+ if(!m_nackXIDMap_ptr->exist(xid)){
+ m_nackXIDMap_ptr->add(xid, 1);
+ } else {
+ (m_nackXIDMap_ptr->lookup(xid))++;
+ }
+
+ // Record nack ID pairs by xid
+ if(!m_nackXIDPairMap_ptr->exist(xid)){
+ Map<int, int> * new_map = new Map<int, int>;
+ new_map->add(nid, 1);
+ m_nackXIDPairMap_ptr->add(xid, new_map);
+ }
+ else{
+ // retrieve existing map
+ Map<int, int> * my_map = m_nackXIDPairMap_ptr->lookup(xid);
+ if(!my_map->exist(nid)){
+ my_map->add(nid, 1);
+ }
+ else{
+ (my_map->lookup(nid))++;
+ }
+ }
+
+ // Record nacks by pc
+ if(!m_nackPCMap_ptr->exist(pc)){
+ m_nackPCMap_ptr->add(pc, 1);
+ } else {
+ (m_nackPCMap_ptr->lookup(pc))++;
+ }
+}
+
+void Profiler::profileExposedConflict(NodeID id, int xid, int thread, Address addr, Address pc){
+ //if(PROFILE_XACT){
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ // The actual processor number
+ int proc_no = id*g_NUM_SMT_THREADS + thread;
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " "
+ << " EXPOSED ACTION CONFLICT " << xid
+ << " ADDR " << addr
+ << " PC " << pc
+ << endl;
+ //}
+}
+
+void Profiler::profileInferredAbort(){
+ m_inferredAborts++;
+}
+
+void Profiler::profileAbortDelayConstants(int startupDelay, int perBlock){
+ m_abortStarupDelay = startupDelay;
+ m_abortPerBlockDelay = perBlock;
+}
+
+void Profiler::profileAbortTransaction(NodeID id, int tid, int xid, int thread, int delay, int abortingThread, int abortingProc, Address addr, Address pc){
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ int abortingXID = -1;
+ // The actual processor number
+ int proc_no = id*g_NUM_SMT_THREADS + thread;
+ // we are passed in physical proc number. Compute logical abort proc_no
+ int logical_abort_proc_no = abortingProc/g_NUM_SMT_THREADS;
+ if(abortingProc >= 0){
+ AbstractChip * c = g_system_ptr->getChip(logical_abort_proc_no/RubyConfig::numberOfProcsPerChip());
+ abortingXID = 0; // c->getTransactionInterfaceManager(logical_abort_proc_no%RubyConfig::numberOfProcsPerChip())->getXID(abortingThread);
+ assert(0);
+ }
+ //- if(PROFILE_XACT){
+ if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
+ physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
+ integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
+ const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
+ << " XACT ABORT " << xid
+ << " caused by " << abortingProc
+ << " [ " << logical_abort_proc_no
+ << ", " << abortingThread
+ << " ]"
+ << " xid: " << abortingXID << " "
+ << " address: " << addr
+ << " delay: " << delay
+ << " PC " << pc
+ << " *PC 0x" << hex << myInst << dec
+ << " '" << myInstStr << "'"
+ << endl;
+ }
+ m_transactionAborts++;
+
+ // Record aborts by xid
+ if(!m_abortIDMap_ptr->exist(xid)){
+ m_abortIDMap_ptr->add(xid, 1);
+ } else {
+ (m_abortIDMap_ptr->lookup(xid))++;
+ }
+ m_abortDelays.add(delay);
+
+ // Record aborts by pc
+ if(!m_abortPCMap_ptr->exist(pc)){
+ m_abortPCMap_ptr->add(pc, 1);
+ } else {
+ (m_abortPCMap_ptr->lookup(pc))++;
+ }
+
+ // Record aborts by address
+ if(!m_abortAddressMap_ptr->exist(addr)){
+ m_abortAddressMap_ptr->add(addr, 1);
+ } else {
+ (m_abortAddressMap_ptr->lookup(addr))++;
+ }
+}
+
+void Profiler::profileTransWB(){
+ m_transWBs++;
+}
+
+void Profiler::profileExtraWB(){
+ m_extraWBs++;
+}
+
+void Profiler::profileXactChange(int procs, int cycles){
+ if(!m_procsInXactMap_ptr->exist(procs)){
+ m_procsInXactMap_ptr->add(procs, cycles);
+ } else {
+ (m_procsInXactMap_ptr->lookup(procs)) += cycles;
+ }
+}
+
+void Profiler::profileReadSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread){
+ // do NOT count instances when signature is empty!
+ if(!bf_filter_result && !perfect_filter_result){
+ m_readSetEmptyChecks++;
+ return;
+ }
+
+ if(bf_filter_result != perfect_filter_result){
+ m_readSetNoMatch++;
+ /*
+ // we have a false positive
+ if(!m_readSetNoMatch_ptr->exist(addr)){
+ m_readSetNoMatch_ptr->add(addr, 1);
+ }
+ else{
+ (m_readSetNoMatch_ptr->lookup(addr))++;
+ }
+ */
+ }
+ else{
+ m_readSetMatch++;
+ /*
+ // Bloom filter agrees with perfect filter
+ if(!m_readSetMatch_ptr->exist(addr)){
+ m_readSetMatch_ptr->add(addr, 1);
+ }
+ else{
+ (m_readSetMatch_ptr->lookup(addr))++;
+ }
+ */
+ }
+}
+
+
+void Profiler::profileRemoteReadSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread){
+ if(bf_filter_result != perfect_filter_result){
+ // we have a false positive
+ if(!m_remoteReadSetNoMatch_ptr->exist(addr)){
+ m_remoteReadSetNoMatch_ptr->add(addr, 1);
+ }
+ else{
+ (m_remoteReadSetNoMatch_ptr->lookup(addr))++;
+ }
+ }
+ else{
+ // Bloom filter agrees with perfect filter
+ if(!m_remoteReadSetMatch_ptr->exist(addr)){
+ m_remoteReadSetMatch_ptr->add(addr, 1);
+ }
+ else{
+ (m_remoteReadSetMatch_ptr->lookup(addr))++;
+ }
+ }
+}
+
+void Profiler::profileWriteSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread){
+ // do NOT count instances when signature is empty!
+ if(!bf_filter_result && !perfect_filter_result){
+ m_writeSetEmptyChecks++;
+ return;
+ }
+
+ if(bf_filter_result != perfect_filter_result){
+ m_writeSetNoMatch++;
+ /*
+ // we have a false positive
+ if(!m_writeSetNoMatch_ptr->exist(addr)){
+ m_writeSetNoMatch_ptr->add(addr, 1);
+ }
+ else{
+ (m_writeSetNoMatch_ptr->lookup(addr))++;
+ }
+ */
+ }
+ else{
+ m_writeSetMatch++;
+ /*
+ // Bloom filter agrees with perfect filter
+ if(!m_writeSetMatch_ptr->exist(addr)){
+ m_writeSetMatch_ptr->add(addr, 1);
+ }
+ else{
+ (m_writeSetMatch_ptr->lookup(addr))++;
+ }
+ */
+ }
+}
+
+
+void Profiler::profileRemoteWriteSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread){
+ if(bf_filter_result != perfect_filter_result){
+ // we have a false positive
+ if(!m_remoteWriteSetNoMatch_ptr->exist(addr)){
+ m_remoteWriteSetNoMatch_ptr->add(addr, 1);
+ }
+ else{
+ (m_remoteWriteSetNoMatch_ptr->lookup(addr))++;
+ }
+ }
+ else{
+ // Bloom filter agrees with perfect filter
+ if(!m_remoteWriteSetMatch_ptr->exist(addr)){
+ m_remoteWriteSetMatch_ptr->add(addr, 1);
+ }
+ else{
+ (m_remoteWriteSetMatch_ptr->lookup(addr))++;
+ }
+ }
+}
+
+void Profiler::profileTransactionLogOverflow(NodeID id, Address addr, Address pc){
+ if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
+ integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
+ const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << id << " "
+ << " XACT LOG OVERFLOW"
+ << " ADDR " << addr
+ << " PC " << pc
+ << " *PC 0x" << hex << myInst << dec
+ << " '" << myInstStr << "'"
+ << endl;
+
+ }
+ m_transactionLogOverflows++;
+}
+
+void Profiler::profileTransactionCacheOverflow(NodeID id, Address addr, Address pc){
+ if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
+ integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
+ const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << id << " "
+ << " XACT CACHE OVERFLOW "
+ << " ADDR " << addr
+ << " PC " << pc
+ << " *PC 0x" << hex << myInst << dec
+ << " '" << myInstStr << "'"
+ << endl;
+
+ }
+ m_transactionCacheOverflows++;
+}
+
+void Profiler::profileGetCPS(NodeID id, uint32 cps, Address pc){
+ if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
+ integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
+ const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
+
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << id << " "
+ << " XACT GET CPS"
+ << " PC " << pc
+ << " *PC 0x" << hex << myInst << dec
+ << " '" << myInstStr << "'"
+ << " CPS 0x" << hex << cps << dec
+ << endl;
+ }
+}
+//---- end Transactional Memory CODE
+
+
+void Profiler::profileExceptionStart(bool xact, NodeID id, int thread, int val, int trap_level, uinteger_t pc, uinteger_t npc){
+ if(xact){
+ if(!m_xactExceptionMap_ptr->exist(val)){
+ m_xactExceptionMap_ptr->add(val, 1);
+ } else {
+ (m_xactExceptionMap_ptr->lookup(val))++;
+ }
+ }
+
+ if (!xact && !PROFILE_NONXACT) return;
+
+ if(PROFILE_EXCEPTIONS){
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ // The actual processor number
+ int proc_no = id*g_NUM_SMT_THREADS + thread;
+
+ // get the excepting instruction
+ const char * instruction;
+ physical_address_t addr = SIMICS_translate_address( proc_no, Address(pc));
+ if(val != 0x64 && addr != 0x0){
+ // ignore instruction TLB miss
+ instruction = SIMICS_disassemble_physical( proc_no, addr );
+ }
+
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << " ]" << " ";
+ if (xact)
+ (* debug_cout_ptr) << " XACT Exception(";
+ else
+ (* debug_cout_ptr) << " Exception(";
+
+ (* debug_cout_ptr) << hex << val << dec << ")_START--Trap Level " << trap_level
+ << "--(PC=0x" << hex << pc << ", " << npc << ")"
+ << dec;
+
+ if(val != 0x64 && addr != 0x0){
+ (* debug_cout_ptr) << " instruction = " << instruction;
+ }
+ else{
+ (* debug_cout_ptr) << " instruction = INSTRUCTION TLB MISS";
+ }
+ (* debug_cout_ptr) << dec << endl;
+ }
+}
+
+void Profiler::profileExceptionDone(bool xact, NodeID id, int thread, int val, int trap_level, uinteger_t pc, uinteger_t npc, uinteger_t tpc, uinteger_t tnpc){
+ if (!xact && !PROFILE_NONXACT) return;
+
+ if (PROFILE_EXCEPTIONS){
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ // The actual processor number
+ int proc_no = id*g_NUM_SMT_THREADS + thread;
+
+ // get the excepting instruction
+ const char * instruction;
+ instruction = SIMICS_disassemble_physical( proc_no, SIMICS_translate_address( proc_no, Address(pc) ) );
+
+
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << " ]" << " ";
+ if (xact)
+ (* debug_cout_ptr) << " XACT Exception(";
+ else
+ (* debug_cout_ptr) << " Exception(";
+
+ (* debug_cout_ptr) << hex << val << dec << ")_DONE--Trap Level " << trap_level
+ << "--(PC=0x" << hex << pc << ", " << npc << dec << ")"
+ << "--(TPC=0x" << hex << tpc << ", " << tnpc << dec << ")"
+ << endl;
+ }
+}
+
+void Profiler::rubyWatch(int id){
+ int rn_g1 = SIMICS_get_register_number(id, "g1");
+ uint64 tr = SIMICS_read_register(id, rn_g1);
+ Address watch_address = Address(tr);
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << id << " "
+ << "RUBY WATCH "
+ << watch_address
+ << endl;
+
+ if(!m_watch_address_list_ptr->exist(watch_address)){
+ m_watch_address_list_ptr->add(watch_address, 1);
+ }
+}
+
+bool Profiler::watchAddress(Address addr){
+ if (m_watch_address_list_ptr->exist(addr))
+ return true;
+ else
+ return false;
+}
+
+void Profiler::profileReadFilterBitsSet(int xid, int bits, bool isCommit) {
+ if (isCommit) {
+ if(!m_xactReadFilterBitsSetOnCommit->exist(xid)){
+ Histogram hist;
+ hist.add(bits);
+ m_xactReadFilterBitsSetOnCommit->add(xid, hist);
+ }
+ else{
+ (m_xactReadFilterBitsSetOnCommit->lookup(xid)).add(bits);
+ }
+ } else {
+ if(!m_xactReadFilterBitsSetOnAbort->exist(xid)){
+ Histogram hist;
+ hist.add(bits);
+ m_xactReadFilterBitsSetOnAbort->add(xid, hist);
+ }
+ else{
+ (m_xactReadFilterBitsSetOnAbort->lookup(xid)).add(bits);
+ }
+ }
+}
+
+void Profiler::profileWriteFilterBitsSet(int xid, int bits, bool isCommit) {
+ if (isCommit) {
+ if(!m_xactWriteFilterBitsSetOnCommit->exist(xid)){
+ Histogram hist;
+ hist.add(bits);
+ m_xactWriteFilterBitsSetOnCommit->add(xid, hist);
+ }
+ else{
+ (m_xactWriteFilterBitsSetOnCommit->lookup(xid)).add(bits);
+ }
+ } else {
+ if(!m_xactWriteFilterBitsSetOnAbort->exist(xid)){
+ Histogram hist;
+ hist.add(bits);
+ m_xactWriteFilterBitsSetOnAbort->add(xid, hist);
+ }
+ else{
+ (m_xactWriteFilterBitsSetOnAbort->lookup(xid)).add(bits);
+ }
+ }
+}
+/*
+ //gem5:Arka for decomissioning log_tm
+
+void Profiler::setXactVisualizerFile(char * filename){
+ if ( (filename == NULL) ||
+ (!strcmp(filename, "none")) ) {
+ m_xact_visualizer_ptr = &cout;
+ return;
+ }
+
+ if (m_xact_visualizer.is_open() ) {
+ m_xact_visualizer.close ();
+ }
+ m_xact_visualizer.open (filename, std::ios::out);
+ if (! m_xact_visualizer.is_open() ) {
+ cerr << "setXactVisualizer: can't open file " << filename << endl;
+ }
+ else {
+ m_xact_visualizer_ptr = &m_xact_visualizer;
+ }
+ cout << "setXactVisualizer file " << filename << endl;
+}
+
+void Profiler::printTransactionState(bool can_skip){
+ if (!XACT_VISUALIZER) return;
+ int num_processors = RubyConfig::numberOfProcessors() * RubyConfig::numberofSMTThreads();
+
+ if (!g_system_ptr->getXactVisualizer()->existXactActivity() && can_skip)
+ return;
+
+ if (can_skip && ((g_eventQueue_ptr->getTime()/10000) <= m_xact_visualizer_last))
+ return;
+
+ Vector<char> xactStateVector = g_system_ptr->getXactVisualizer()->getTransactionStateVector();
+ for (int i = 0 ; i < num_processors; i++){
+ (* m_xact_visualizer_ptr) << xactStateVector[i] << " ";
+ }
+ (* m_xact_visualizer_ptr) << " " << g_eventQueue_ptr->getTime() << endl;
+ m_xact_visualizer_last = g_eventQueue_ptr->getTime() / 10000;
+}
+*/
+void Profiler::watchpointsFalsePositiveTrigger()
+{
+ m_watchpointsFalsePositiveTrigger++;
+}
+
+void Profiler::watchpointsTrueTrigger()
+{
+ m_watchpointsTrueTrigger++;
+}
+
+// For MemoryControl:
+void Profiler::profileMemReq(int bank) {
+ m_memReq++;
+ m_memBankCount[bank]++;
+}
+void Profiler::profileMemBankBusy() { m_memBankBusy++; }
+void Profiler::profileMemBusBusy() { m_memBusBusy++; }
+void Profiler::profileMemReadWriteBusy() { m_memReadWriteBusy++; }
+void Profiler::profileMemDataBusBusy() { m_memDataBusBusy++; }
+void Profiler::profileMemTfawBusy() { m_memTfawBusy++; }
+void Profiler::profileMemRefresh() { m_memRefresh++; }
+void Profiler::profileMemRead() { m_memRead++; }
+void Profiler::profileMemWrite() { m_memWrite++; }
+void Profiler::profileMemWaitCycles(int cycles) { m_memWaitCycles += cycles; }
+void Profiler::profileMemInputQ(int cycles) { m_memInputQ += cycles; }
+void Profiler::profileMemBankQ(int cycles) { m_memBankQ += cycles; }
+void Profiler::profileMemArbWait(int cycles) { m_memArbWait += cycles; }
+void Profiler::profileMemRandBusy() { m_memRandBusy++; }
+void Profiler::profileMemNotOld() { m_memNotOld++; }
+
+
+//----------- ATMTP -------------------//
+
+void Profiler::profileTransactionTCC(NodeID id, Address pc){
+ if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
+ physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
+ integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
+ const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
+
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ cout.flags(ios::right);
+ cout << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ cout << setw(ID_SPACES) << id << " "
+ << " XACT Aborting! Executed TCC "
+ << " PC: " << pc
+ << " *PC: 0x" << hex << myInst << dec
+ << " '" << myInstStr << "'"
+ << endl;
+ }
+ m_transactionUnsupInsts++;
+}
+
+void Profiler::profileTransactionUnsupInst(NodeID id, Address pc){
+ if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
+ physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
+ integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
+ const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
+
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ cout.flags(ios::right);
+ cout << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ cout << setw(ID_SPACES) << id << " "
+ << " XACT Aborting! Executed Unsupported Instruction "
+ << " PC: " << pc
+ << " *PC: 0x" << hex << myInst << dec
+ << " '" << myInstStr << "'"
+ << endl;
+ }
+ m_transactionUnsupInsts++;
+}
+
+void Profiler::profileTransactionSaveInst(NodeID id, Address pc){
+ if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
+ physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
+ integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
+ const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
+
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ cout.flags(ios::right);
+ cout << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ cout << setw(ID_SPACES) << id << " "
+ << " XACT Aborting! Executed Save Instruction "
+ << " PC: " << pc
+ << " *PC: 0x" << hex << myInst << dec
+ << " '" << myInstStr << "'"
+ << endl;
+ }
+ m_transactionSaveRestAborts++;
+}
+
+void Profiler::profileTransactionRestoreInst(NodeID id, Address pc){
+ if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
+ physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
+ integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
+ const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
+
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ cout.flags(ios::right);
+ cout << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ cout << setw(ID_SPACES) << id << " "
+ << " XACT Aborting! Executed Restore Instruction "
+ << " PC: " << pc
+ << " *PC: 0x" << hex << myInst << dec
+ << " '" << myInstStr << "'"
+ << endl;
+ }
+ m_transactionSaveRestAborts++;
+}
+
+void Profiler::profileTimerInterrupt(NodeID id,
+ uinteger_t tick, uinteger_t tick_cmpr,
+ uinteger_t stick, uinteger_t stick_cmpr,
+ int trap_level,
+ uinteger_t pc, uinteger_t npc,
+ uinteger_t pstate, int pil){
+ if (PROFILE_EXCEPTIONS) {
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+ cout.flags(ios::right);
+ cout << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ cout << setw(ID_SPACES) << id << " ";
+ cout << hex << "Timer--(Tick=0x" << tick << ", TckCmp=0x" << tick_cmpr
+ << ", STick=0x" << stick << ", STickCmp=0x" << stick_cmpr
+ << ")--(PC=" << pc << ", " << npc
+ << dec << ")--(TL=" << trap_level << ", pil=" << pil
+ << hex << ", pstate=0x" << pstate
+ << dec << ")" << endl;
+ }
+}
diff --git a/src/mem/ruby/profiler/Profiler.hh b/src/mem/ruby/profiler/Profiler.hh
new file mode 100644
index 000000000..2961a81d1
--- /dev/null
+++ b/src/mem/ruby/profiler/Profiler.hh
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ This file has been modified by Kevin Moore and Dan Nussbaum of the
+ Scalable Systems Research Group at Sun Microsystems Laboratories
+ (http://research.sun.com/scalable/) to support the Adaptive
+ Transactional Memory Test Platform (ATMTP).
+
+ Please send email to atmtp-interest@sun.com with feedback, questions, or
+ to request future announcements about ATMTP.
+
+ ----------------------------------------------------------------------
+
+ File modification date: 2008-02-23
+
+ ----------------------------------------------------------------------
+*/
+
+/*
+ * Profiler.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef PROFILER_H
+#define PROFILER_H
+
+#include "Global.hh"
+#include "GenericMachineType.hh"
+#include "RubyConfig.hh"
+#include "Histogram.hh"
+#include "Consumer.hh"
+#include "AccessModeType.hh"
+#include "AccessType.hh"
+#include "NodeID.hh"
+#include "MachineID.hh"
+#include "PrefetchBit.hh"
+#include "Address.hh"
+#include "Set.hh"
+#include "CacheRequestType.hh"
+#include "GenericRequestType.hh"
+//#include "XactProfiler.hh" //gem5:Arka for decomissioning og log_tm
+
+class CacheMsg;
+class CacheProfiler;
+class AddressProfiler;
+
+template <class KEY_TYPE, class VALUE_TYPE> class Map;
+
+class Profiler : public Consumer {
+public:
+ // Constructors
+ Profiler();
+
+ // Destructor
+ ~Profiler();
+
+ // Public Methods
+ void wakeup();
+
+ void setPeriodicStatsFile(const string& filename);
+ void setPeriodicStatsInterval(integer_t period);
+
+ void setXactVisualizerFile(char* filename);
+
+ void printStats(ostream& out, bool short_stats=false);
+ void printShortStats(ostream& out) { printStats(out, true); }
+ void printTraceStats(ostream& out) const;
+ void clearStats();
+ void printConfig(ostream& out) const;
+ void printResourceUsage(ostream& out) const;
+
+ AddressProfiler* getAddressProfiler() { return m_address_profiler_ptr; }
+ AddressProfiler* getInstructionProfiler() { return m_inst_profiler_ptr; }
+ //XactProfiler* getXactProfiler() { return m_xact_profiler_ptr;} //gem5:Arka for decomissioning og log_tm
+
+ void addPrimaryStatSample(const CacheMsg& msg, NodeID id);
+ void addSecondaryStatSample(GenericRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID id);
+ void addSecondaryStatSample(CacheRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID id);
+ void addAddressTraceSample(const CacheMsg& msg, NodeID id);
+
+ void profileRequest(const string& requestStr);
+ void profileSharing(const Address& addr, AccessType type, NodeID requestor, const Set& sharers, const Set& owner);
+
+ void profileMulticastRetry(const Address& addr, int count);
+
+ void profileFilterAction(int action);
+
+ void profileConflictingRequests(const Address& addr);
+ void profileOutstandingRequest(int outstanding) { m_outstanding_requests.add(outstanding); }
+ void profileOutstandingPersistentRequest(int outstanding) { m_outstanding_persistent_requests.add(outstanding); }
+ void profileAverageLatencyEstimate(int latency) { m_average_latency_estimate.add(latency); }
+
+ void countBAUnicast() { m_num_BA_unicasts++; }
+ void countBABroadcast() { m_num_BA_broadcasts++; }
+
+ void recordPrediction(bool wasGood, bool wasPredicted);
+
+ void startTransaction(int cpu);
+ void endTransaction(int cpu);
+ void profilePFWait(Time waitTime);
+
+ void controllerBusy(MachineID machID);
+ void bankBusy();
+ void missLatency(Time t, CacheRequestType type, GenericMachineType respondingMach);
+ void swPrefetchLatency(Time t, CacheRequestType type, GenericMachineType respondingMach);
+ void stopTableUsageSample(int num) { m_stopTableProfile.add(num); }
+ void L1tbeUsageSample(int num) { m_L1tbeProfile.add(num); }
+ void L2tbeUsageSample(int num) { m_L2tbeProfile.add(num); }
+ void sequencerRequests(int num) { m_sequencer_requests.add(num); }
+ void storeBuffer(int size, int blocks) { m_store_buffer_size.add(size); m_store_buffer_blocks.add(blocks);}
+
+ void profileGetXMaskPrediction(const Set& pred_set);
+ void profileGetSMaskPrediction(const Set& pred_set);
+ void profileTrainingMask(const Set& pred_set);
+ void profileTransition(const string& component, NodeID id, NodeID version, Address addr,
+ const string& state, const string& event,
+ const string& next_state, const string& note);
+ void profileMsgDelay(int virtualNetwork, int delayCycles);
+
+ void print(ostream& out) const;
+
+ int64 getTotalInstructionsExecuted() const;
+ int64 getTotalTransactionsExecuted() const;
+
+ //---- begin Transactional Memory CODE
+ #if 0 //gem5:Arka for decomissioning og log_tm
+ void profileTransCycles(int proc, int cycles) { getXactProfiler()->profileTransCycles(proc, cycles);}
+ void profileNonTransCycles(int proc, int cycles) { getXactProfiler()->profileNonTransCycles(proc, cycles);}
+ void profileStallTransCycles(int proc, int cycles) { getXactProfiler()->profileStallTransCycles(proc, cycles); }
+ void profileStallNonTransCycles(int proc, int cycles) { getXactProfiler()->profileStallNonTransCycles(proc, cycles); }
+ void profileAbortingTransCycles(int proc, int cycles) { getXactProfiler()->profileAbortingTransCycles(proc, cycles); }
+ void profileCommitingTransCycles(int proc, int cycles) { getXactProfiler()->profileCommitingTransCycles(proc, cycles); }
+ void profileBarrierCycles(int proc, int cycles) { getXactProfiler()->profileBarrierCycles(proc, cycles);}
+ void profileBackoffTransCycles(int proc, int cycles) { getXactProfiler()->profileBackoffTransCycles(proc, cycles); }
+ void profileGoodTransCycles(int proc, int cycles) {getXactProfiler()->profileGoodTransCycles(proc, cycles); }
+
+ #endif //gem5:Arka TODO clean up the rest of this functions as well
+ void profileTransaction(int size, int logSize, int readS, int writeS, int overflow_readS, int overflow_writeS, int retries, int cycles, bool nacked, int loadMisses, int storeMisses, int instrCount, int xid);
+ void profileBeginTransaction(NodeID id, int tid, int xid, int thread, Address pc, bool isOpen);
+ void profileCommitTransaction(NodeID id, int tid, int xid, int thread, Address pc, bool isOpen);
+ void profileLoadTransaction(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc);
+ void profileLoad(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc);
+ void profileStoreTransaction(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc);
+ void profileStore(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc);
+ void profileLoadOverflow(NodeID id, int tid, int xid, int thread, Address addr, bool l1_overflow);
+ void profileStoreOverflow(NodeID id, int tid, int xid, int thread, Address addr, bool l1_overflow);
+ void profileNack(NodeID id, int tid, int xid, int thread, int nacking_thread, NodeID nackedBy, Address addr, Address logicalAddress, Address pc, uint64 seq_ts, uint64 nack_ts, bool possibleCycle);
+ void profileExposedConflict(NodeID id, int xid, int thread, Address addr, Address pc);
+ void profileTransWB();
+ void profileExtraWB();
+ void profileInferredAbort();
+ void profileAbortTransaction(NodeID id, int tid, int xid, int thread, int delay, int abortingThread, int abortingProc, Address addr, Address pc);
+ void profileExceptionStart(bool xact, NodeID proc_no, int thread, int val, int trap_level, uinteger_t pc, uinteger_t npc);
+ void profileExceptionDone(bool xact, NodeID proc_no, int thread, int val, int trap_level, uinteger_t pc, uinteger_t npc, uinteger_t tpc, uinteger_t tnpc);
+ void profileTimerInterrupt(NodeID id,
+ uinteger_t tick, uinteger_t tick_cmpr,
+ uinteger_t stick, uinteger_t stick_cmpr,
+ int trap_level,
+ uinteger_t pc, uinteger_t npc,
+ uinteger_t pstate, int pil);
+
+ void profileAbortDelayConstants(int handlerStartupDelay, int handlerPerBlockDelay);
+ void profileXactChange(int procs, int cycles);
+ void profileReadSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread);
+ void profileWriteSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread);
+ void profileRemoteReadSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread);
+ void profileRemoteWriteSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread);
+
+
+ void profileReadFilterBitsSet(int xid, int bits, bool isCommit);
+ void profileWriteFilterBitsSet(int xid, int bits, bool isCommit);
+
+ void printTransactionState(bool can_skip);
+
+ void watchpointsFalsePositiveTrigger();
+ void watchpointsTrueTrigger();
+
+ void profileTransactionLogOverflow(NodeID id, Address addr, Address pc);
+ void profileTransactionCacheOverflow(NodeID id, Address addr, Address pc);
+ void profileGetCPS(NodeID id, uint32 cps, Address pc);
+ void profileTransactionTCC(NodeID id, Address pc);
+ void profileTransactionUnsupInst(NodeID id, Address pc);
+ void profileTransactionSaveInst(NodeID id, Address pc);
+ void profileTransactionRestoreInst(NodeID id, Address pc);
+
+ //---- end Transactional Memory CODE
+
+ void rubyWatch(int proc);
+ bool watchAddress(Address addr);
+
+ // return Ruby's start time
+ Time getRubyStartTime(){
+ return m_ruby_start;
+ }
+
+ // added for MemoryControl:
+ void profileMemReq(int bank);
+ void profileMemBankBusy();
+ void profileMemBusBusy();
+ void profileMemTfawBusy();
+ void profileMemReadWriteBusy();
+ void profileMemDataBusBusy();
+ void profileMemRefresh();
+ void profileMemRead();
+ void profileMemWrite();
+ void profileMemWaitCycles(int cycles);
+ void profileMemInputQ(int cycles);
+ void profileMemBankQ(int cycles);
+ void profileMemArbWait(int cycles);
+ void profileMemRandBusy();
+ void profileMemNotOld();
+
+private:
+ // Private Methods
+ void addL2StatSample(GenericRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID id);
+ void addL1DStatSample(const CacheMsg& msg, NodeID id);
+ void addL1IStatSample(const CacheMsg& msg, NodeID id);
+
+ GenericRequestType CacheRequestType_to_GenericRequestType(const CacheRequestType& type);
+
+ // Private copy constructor and assignment operator
+ Profiler(const Profiler& obj);
+ Profiler& operator=(const Profiler& obj);
+
+ // Data Members (m_ prefix)
+ CacheProfiler* m_L1D_cache_profiler_ptr;
+ CacheProfiler* m_L1I_cache_profiler_ptr;
+ CacheProfiler* m_L2_cache_profiler_ptr;
+ AddressProfiler* m_address_profiler_ptr;
+ AddressProfiler* m_inst_profiler_ptr;
+
+// XactProfiler* m_xact_profiler_ptr; // gem5:Arka for decomissioning of log_tm
+
+ Vector<int64> m_instructions_executed_at_start;
+ Vector<int64> m_cycles_executed_at_start;
+
+ ostream* m_periodic_output_file_ptr;
+ integer_t m_stats_period;
+ std::fstream m_xact_visualizer;
+ std::ostream *m_xact_visualizer_ptr;
+
+ Time m_ruby_start;
+ time_t m_real_time_start_time;
+
+ int m_num_BA_unicasts;
+ int m_num_BA_broadcasts;
+
+ Vector<integer_t> m_perProcTotalMisses;
+ Vector<integer_t> m_perProcUserMisses;
+ Vector<integer_t> m_perProcSupervisorMisses;
+ Vector<integer_t> m_perProcStartTransaction;
+ Vector<integer_t> m_perProcEndTransaction;
+ Vector < Vector < integer_t > > m_busyControllerCount;
+ integer_t m_busyBankCount;
+ Histogram m_multicast_retry_histogram;
+
+ Histogram m_L1tbeProfile;
+ Histogram m_L2tbeProfile;
+ Histogram m_stopTableProfile;
+
+ Histogram m_filter_action_histogram;
+ Histogram m_tbeProfile;
+
+ Histogram m_sequencer_requests;
+ Histogram m_store_buffer_size;
+ Histogram m_store_buffer_blocks;
+ Histogram m_read_sharing_histogram;
+ Histogram m_write_sharing_histogram;
+ Histogram m_all_sharing_histogram;
+ int64 m_cache_to_cache;
+ int64 m_memory_to_cache;
+
+ Histogram m_prefetchWaitHistogram;
+
+ Vector<Histogram> m_missLatencyHistograms;
+ Vector<Histogram> m_machLatencyHistograms;
+ Histogram m_L2MissLatencyHistogram;
+ Histogram m_allMissLatencyHistogram;
+
+ Histogram m_allSWPrefetchLatencyHistogram;
+ Histogram m_SWPrefetchL2MissLatencyHistogram;
+ Vector<Histogram> m_SWPrefetchLatencyHistograms;
+ Vector<Histogram> m_SWPrefetchMachLatencyHistograms;
+
+ Histogram m_delayedCyclesHistogram;
+ Histogram m_delayedCyclesNonPFHistogram;
+ Vector<Histogram> m_delayedCyclesVCHistograms;
+
+ int m_predictions;
+ int m_predictionOpportunities;
+ int m_goodPredictions;
+
+ Histogram m_gets_mask_prediction;
+ Histogram m_getx_mask_prediction;
+ Histogram m_explicit_training_mask;
+
+ // For profiling possibly conflicting requests
+ Map<Address, Time>* m_conflicting_map_ptr;
+ Histogram m_conflicting_histogram;
+
+ Histogram m_outstanding_requests;
+ Histogram m_outstanding_persistent_requests;
+
+ Histogram m_average_latency_estimate;
+
+ //---- begin Transactional Memory CODE
+ Map <int, int>* m_procsInXactMap_ptr;
+
+ Histogram m_xactCycles;
+ Histogram m_xactLogs;
+ Histogram m_xactReads;
+ Histogram m_xactWrites;
+ Histogram m_xactOverflowReads;
+ Histogram m_xactOverflowWrites;
+ Histogram m_xactOverflowTotalReads;
+ Histogram m_xactOverflowTotalWrites;
+ Histogram m_xactSizes;
+ Histogram m_xactRetries;
+ Histogram m_abortDelays;
+ Histogram m_xactLoadMisses;
+ Histogram m_xactStoreMisses;
+ Histogram m_xactInstrCount;
+ int m_xactNacked;
+ int m_transactionAborts;
+ int m_transWBs;
+ int m_extraWBs;
+ int m_abortStarupDelay;
+ int m_abortPerBlockDelay;
+ int m_inferredAborts;
+ Map <int, int>* m_nackXIDMap_ptr;
+ // pairs of XIDs involved in NACKs
+ Map<int, Map<int, int> * > * m_nackXIDPairMap_ptr;
+ Map <Address, int>* m_nackPCMap_ptr;
+ Map <int, int>* m_xactExceptionMap_ptr;
+ Map <int, int>* m_abortIDMap_ptr;
+ Map <int, int>* m_commitIDMap_ptr;
+ Map <int, int>* m_xactRetryIDMap_ptr;
+ Map <int, int>* m_xactCyclesIDMap_ptr;
+ Map <int, int>* m_xactReadSetIDMap_ptr;
+ Map <int, int>* m_xactWriteSetIDMap_ptr;
+ Map <int, int>* m_xactLoadMissIDMap_ptr;
+ Map <int, int>* m_xactStoreMissIDMap_ptr;
+ Map <int, integer_t> *m_xactInstrCountIDMap_ptr;
+ Map <Address, int>* m_abortPCMap_ptr;
+ Map <Address, int>* m_abortAddressMap_ptr;
+ Map <Address, int>* m_readSetMatch_ptr;
+ Map <Address, int>* m_readSetNoMatch_ptr;
+ Map <Address, int>* m_writeSetMatch_ptr;
+ Map <Address, int>* m_writeSetNoMatch_ptr;
+ Map <Address, int>* m_remoteReadSetMatch_ptr;
+ Map <Address, int>* m_remoteReadSetNoMatch_ptr;
+ Map <Address, int>* m_remoteWriteSetMatch_ptr;
+ Map <Address, int>* m_remoteWriteSetNoMatch_ptr;
+ long long int m_readSetEmptyChecks;
+ long long int m_readSetMatch;
+ long long int m_readSetNoMatch;
+ long long int m_writeSetEmptyChecks;
+ long long int m_writeSetMatch;
+ long long int m_writeSetNoMatch;
+ Map<int, Histogram> * m_xactReadFilterBitsSetOnCommit;
+ Map<int, Histogram> * m_xactReadFilterBitsSetOnAbort;
+ Map<int, Histogram> * m_xactWriteFilterBitsSetOnCommit;
+ Map<int, Histogram> * m_xactWriteFilterBitsSetOnAbort;
+
+ unsigned int m_watchpointsFalsePositiveTrigger;
+ unsigned int m_watchpointsTrueTrigger;
+
+ int m_transactionUnsupInsts;
+ int m_transactionSaveRestAborts;
+
+ int m_transactionLogOverflows;
+ int m_transactionCacheOverflows;
+
+ //---- end Transactional Memory CODE
+
+ Map<Address, int>* m_watch_address_list_ptr;
+ // counts all initiated cache request including PUTs
+ int m_requests;
+ Map <string, int>* m_requestProfileMap_ptr;
+
+ Time m_xact_visualizer_last;
+
+ // added for MemoryControl:
+ long long int m_memReq;
+ long long int m_memBankBusy;
+ long long int m_memBusBusy;
+ long long int m_memTfawBusy;
+ long long int m_memReadWriteBusy;
+ long long int m_memDataBusBusy;
+ long long int m_memRefresh;
+ long long int m_memRead;
+ long long int m_memWrite;
+ long long int m_memWaitCycles;
+ long long int m_memInputQ;
+ long long int m_memBankQ;
+ long long int m_memArbWait;
+ long long int m_memRandBusy;
+ long long int m_memNotOld;
+ Vector<long long int> m_memBankCount;
+
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Profiler& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Profiler& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //PROFILER_H
+
+
diff --git a/src/mem/ruby/profiler/StoreTrace.cc b/src/mem/ruby/profiler/StoreTrace.cc
new file mode 100644
index 000000000..c53f590ac
--- /dev/null
+++ b/src/mem/ruby/profiler/StoreTrace.cc
@@ -0,0 +1,158 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "StoreTrace.hh"
+#include "EventQueue.hh"
+
+bool StoreTrace::s_init = false; // Total number of store lifetimes of all lines
+int64 StoreTrace::s_total_samples = 0; // Total number of store lifetimes of all lines
+Histogram* StoreTrace::s_store_count_ptr = NULL;
+Histogram* StoreTrace::s_store_first_to_stolen_ptr = NULL;
+Histogram* StoreTrace::s_store_last_to_stolen_ptr = NULL;
+Histogram* StoreTrace::s_store_first_to_last_ptr = NULL;
+
+StoreTrace::StoreTrace(const Address& addr) :
+ m_store_count(-1), m_store_first_to_stolen(-1), m_store_last_to_stolen(-1), m_store_first_to_last(-1)
+{
+ StoreTrace::initSummary();
+ m_addr = addr;
+ m_total_samples = 0;
+ m_last_writer = -1; // Really -1 isn't valid, so this will trigger the initilization code
+ m_stores_this_interval = 0;
+}
+
+StoreTrace::~StoreTrace()
+{
+}
+
+void StoreTrace::print(ostream& out) const
+{
+ out << m_addr;
+ out << " total_samples: " << m_total_samples << endl;
+ out << "store_count: " << m_store_count << endl;
+ out << "store_first_to_stolen: " << m_store_first_to_stolen << endl;
+ out << "store_last_to_stolen: " << m_store_last_to_stolen << endl;
+ out << "store_first_to_last: " << m_store_first_to_last << endl;
+}
+
+// Class method
+void StoreTrace::initSummary()
+{
+ if (!s_init) {
+ s_total_samples = 0;
+ s_store_count_ptr = new Histogram(-1);
+ s_store_first_to_stolen_ptr = new Histogram(-1);
+ s_store_last_to_stolen_ptr = new Histogram(-1);
+ s_store_first_to_last_ptr = new Histogram(-1);
+ }
+ s_init = true;
+}
+
+// Class method
+void StoreTrace::printSummary(ostream& out)
+{
+ out << "total_samples: " << s_total_samples << endl;
+ out << "store_count: " << (*s_store_count_ptr) << endl;
+ out << "store_first_to_stolen: " << (*s_store_first_to_stolen_ptr) << endl;
+ out << "store_last_to_stolen: " << (*s_store_last_to_stolen_ptr) << endl;
+ out << "store_first_to_last: " << (*s_store_first_to_last_ptr) << endl;
+}
+
+// Class method
+void StoreTrace::clearSummary()
+{
+ StoreTrace::initSummary();
+ s_total_samples = 0;
+ s_store_count_ptr->clear();
+ s_store_first_to_stolen_ptr->clear();
+ s_store_last_to_stolen_ptr->clear();
+ s_store_first_to_last_ptr->clear();
+}
+
+void StoreTrace::store(NodeID node)
+{
+ Time current = g_eventQueue_ptr->getTime();
+
+ assert((m_last_writer == -1) || (m_last_writer == node));
+
+ m_last_writer = node;
+ if (m_last_writer == -1) {
+ assert(m_stores_this_interval == 0);
+ }
+
+ if (m_stores_this_interval == 0) {
+ // A new proessor just wrote the line, so reset the stats
+ m_first_store = current;
+ }
+
+ m_last_store = current;
+ m_stores_this_interval++;
+}
+
+void StoreTrace::downgrade(NodeID node)
+{
+ if (node == m_last_writer) {
+ Time current = g_eventQueue_ptr->getTime();
+ assert(m_stores_this_interval != 0);
+ assert(m_last_store != 0);
+ assert(m_first_store != 0);
+ assert(m_last_writer != -1);
+
+ // Per line stats
+ m_store_first_to_stolen.add(current - m_first_store);
+ m_store_count.add(m_stores_this_interval);
+ m_store_last_to_stolen.add(current - m_last_store);
+ m_store_first_to_last.add(m_last_store - m_first_store);
+ m_total_samples++;
+
+ // Global stats
+ assert(s_store_first_to_stolen_ptr != NULL);
+ s_store_first_to_stolen_ptr->add(current - m_first_store);
+ s_store_count_ptr->add(m_stores_this_interval);
+ s_store_last_to_stolen_ptr->add(current - m_last_store);
+ s_store_first_to_last_ptr->add(m_last_store - m_first_store);
+ s_total_samples++;
+
+ // Initilize for next go round
+ m_stores_this_interval = 0;
+ m_last_store = 0;
+ m_first_store = 0;
+ m_last_writer = -1;
+ }
+}
+
+bool node_less_then_eq(const StoreTrace* n1, const StoreTrace* n2)
+{
+ return (n1->getTotal() > n2->getTotal());
+}
diff --git a/src/mem/ruby/profiler/StoreTrace.hh b/src/mem/ruby/profiler/StoreTrace.hh
new file mode 100644
index 000000000..821345aba
--- /dev/null
+++ b/src/mem/ruby/profiler/StoreTrace.hh
@@ -0,0 +1,109 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef StoreTrace_H
+#define StoreTrace_H
+
+#include "Global.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "Histogram.hh"
+
+class StoreTrace {
+public:
+ // Constructors
+ StoreTrace() { }
+ explicit StoreTrace(const Address& addr);
+
+ // Destructor
+ ~StoreTrace();
+
+ // Public Methods
+ void store(NodeID node);
+ void downgrade(NodeID node);
+ int getTotal() const { return m_total_samples; }
+ static void initSummary();
+ static void printSummary(ostream& out);
+ static void clearSummary();
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ // StoreTrace(const StoreTrace& obj);
+ // StoreTrace& operator=(const StoreTrace& obj);
+
+ // Class Members (s_ prefix)
+ static bool s_init;
+ static int64 s_total_samples; // Total number of store lifetimes of all lines
+ static Histogram* s_store_count_ptr;
+ static Histogram* s_store_first_to_stolen_ptr;
+ static Histogram* s_store_last_to_stolen_ptr;
+ static Histogram* s_store_first_to_last_ptr;
+
+ // Data Members (m_ prefix)
+
+ Address m_addr;
+ NodeID m_last_writer;
+ Time m_first_store;
+ Time m_last_store;
+ int m_stores_this_interval;
+
+ int64 m_total_samples; // Total number of store lifetimes of this line
+ Histogram m_store_count;
+ Histogram m_store_first_to_stolen;
+ Histogram m_store_last_to_stolen;
+ Histogram m_store_first_to_last;
+};
+
+bool node_less_then_eq(const StoreTrace* n1, const StoreTrace* n2);
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const StoreTrace& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const StoreTrace& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //StoreTrace_H
diff --git a/src/mem/ruby/profiler/XactProfiler.cc b/src/mem/ruby/profiler/XactProfiler.cc
new file mode 100644
index 000000000..55fb86472
--- /dev/null
+++ b/src/mem/ruby/profiler/XactProfiler.cc
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "XactProfiler.hh"
+#include "CacheMsg.hh"
+#include "Map.hh"
+#include "Debug.hh"
+#include "MachineType.hh"
+#include "TransactionInterfaceManager.hh"
+#include "Driver.hh"
+#include "interface.hh"
+
+extern std::ostream * debug_cout_ptr;
+
+XactProfiler::XactProfiler()
+{
+ int num_processors = RubyConfig::numberOfProcessors() * RubyConfig::numberofSMTThreads();
+
+ m_xactTransCycles = new long long int[num_processors];
+ m_xactStallTransCycles = new long long int[num_processors];
+ m_xactStallNonTransCycles = new long long int[num_processors];
+ m_xactAbortingCycles = new long long int[num_processors];
+ m_xactCommitingCycles = new long long int[num_processors];
+ m_xactBackoffCycles = new long long int[num_processors];
+ m_BarrierCycles = new long long int[num_processors];
+
+ m_xactGoodTransCycles = new long long int[num_processors];
+ m_xactNonTransCycles = new long long int[num_processors];
+
+ m_xactTimedCycles = new long long int[num_processors];
+ m_xactBeginTimer = new long long int[num_processors];
+
+ clearStats();
+}
+
+XactProfiler::~XactProfiler()
+{
+ delete [] m_xactTransCycles;
+ delete [] m_xactStallTransCycles;
+ delete [] m_xactStallNonTransCycles;
+ delete [] m_xactAbortingCycles;
+ delete [] m_xactBackoffCycles;
+
+ delete [] m_xactGoodTransCycles;
+ delete [] m_xactNonTransCycles;
+
+ delete [] m_xactTimedCycles;
+ delete [] m_xactBeginTimer;
+}
+
+void XactProfiler::printConfig(ostream& out) const
+{
+ out << endl;
+ out << "XactProfiler Configuration" << endl;
+ out << "----------------------" << endl;
+}
+
+void XactProfiler::print(ostream& out) const
+{
+ out << "[XactProfiler]";
+}
+
+void XactProfiler::printStats(ostream& out, bool short_stats)
+{
+ int num_processors = RubyConfig::numberOfProcessors() * RubyConfig::numberofSMTThreads();
+ out << endl;
+
+ out << "XactProfiler Stats" << endl;
+ out << "--------------" << endl;
+
+ if (max_hashFunction >= 0){
+ out << "Hash values distribution" << endl;
+ out << "------------------------" << endl;
+ for(int i = 0; i <= max_hashFunction; i++){
+ out << "Hash function " << i << ": " << m_hashProfile[i] << endl;
+ }
+ out << endl;
+ }
+
+ out << "xact_cycle_breakdown" << endl;
+ out << "--------------------" << endl;
+ long long int total_trans_cycles = 0;
+ long long int total_aborting_trans_cycles = 0;
+ long long int total_commiting_trans_cycles = 0;
+ long long int total_backoff_trans_cycles = 0;
+ long long int total_stall_trans_cycles = 0;
+ long long int total_stall_nontrans_cycles = 0;
+ long long int total_barrier_cycles = 0;
+
+ long long int total_good_trans_cycles = 0;
+ long long int total_nontrans_cycles = 0;
+
+ long long int total_timed_cycles = 0;
+
+ for(int i=0; i < num_processors; ++i){
+ if (!short_stats){
+ out << "xact_trans_cycles_processor_" << i << ": " << m_xactTransCycles[i] << endl;
+ out << "xact_aborting_cycles_processor_" << i << ": " << m_xactAbortingCycles[i] << endl;
+ out << "xact_barrier_cycles_processor_" << i << ": " << m_BarrierCycles[i] << endl;
+ out << "xact_backoff_cycles_processor_" << i << ": " << m_xactBackoffCycles[i] << endl;
+ out << "xact_stall_trans_cycles_processor_" << i << ": " << m_xactStallTransCycles[i] << endl;
+ out << "xact_nontrans_cycles_processor_" << i << ": " << m_xactNonTransCycles[i] << endl;
+ out << "xact_stall_nontrans_cycles_processor_" << i << ": " << m_xactStallNonTransCycles[i] << endl;
+ out << "timed_cycles_processor_" << i << ": " << m_xactTimedCycles[i] << endl;
+ }
+
+ total_trans_cycles += m_xactTransCycles[i];
+ total_stall_trans_cycles += m_xactStallTransCycles[i];
+ total_aborting_trans_cycles += m_xactAbortingCycles[i];
+ total_commiting_trans_cycles += m_xactCommitingCycles[i];
+ total_backoff_trans_cycles += m_xactBackoffCycles[i];
+ total_barrier_cycles += m_BarrierCycles[i];
+ total_nontrans_cycles += m_xactNonTransCycles[i];
+ total_good_trans_cycles += m_xactGoodTransCycles[i];
+ total_stall_nontrans_cycles += m_xactStallNonTransCycles[i];
+ total_timed_cycles += m_xactTimedCycles[i];
+
+ }
+ out << endl;
+ out << " XACT CYCLE BREAKDOWN " << endl;
+ out << " XACT_BREAKDOWN_NON_TRANS_CYCLES: " << total_nontrans_cycles << endl;
+ out << " XACT_BREAKDOWN_TRANS_CYCLES: " << total_trans_cycles << endl;
+ out << " XACT_BREAKDOWN_GOOD_TRANS_CYCLES: " << total_good_trans_cycles << endl;
+ out << " XACT_BREAKDOWN_ABORTING_CYCLES: " << total_aborting_trans_cycles << endl;
+ out << " XACT_BREAKDOWN_COMMITING_CYCLES: " << total_commiting_trans_cycles << endl;
+ out << " XACT_BREAKDOWN_BACKOFF_CYCLES: " << total_backoff_trans_cycles << endl;
+ out << " XACT_BREAKDOWN_BARRIER_CYCLES: " << total_barrier_cycles << endl;
+ out << " XACT_BREAKDOWN_STALL_CYCLES: " << total_stall_trans_cycles << endl;
+ out << endl;
+ out << " XACT_TIMED_CYCLES: " << total_timed_cycles << endl;
+ out << endl;
+
+}
+
+void XactProfiler::clearStats()
+{
+ int num_processors = RubyConfig::numberOfProcessors() * RubyConfig::numberofSMTThreads();
+ for(int i=0; i < num_processors; ++i){
+ m_xactTransCycles[i] = 0;
+ m_xactStallTransCycles[i] = 0;
+ m_xactGoodTransCycles[i] = 0;
+ m_xactBackoffCycles[i] = 0;
+ m_xactAbortingCycles[i] = 0;
+ m_xactCommitingCycles[i] = 0;
+ m_xactNonTransCycles[i] = 0;
+ m_xactStallNonTransCycles[i] = 0;
+ m_BarrierCycles[i] = 0;
+ m_xactTimedCycles[i] = 0;
+ }
+
+ max_hashFunction = -1;
+ m_hashProfile.setSize(16);
+ for (int i = 0; i < 15; i++) {
+ m_hashProfile[i].clear();
+ }
+}
+
+void XactProfiler::profileTransCycles(int proc, int cycles){
+ m_xactTransCycles[proc] += cycles;
+}
+
+long long int XactProfiler::getTransCycles(int proc){
+ return m_xactTransCycles[proc];
+}
+
+void XactProfiler::profileStallTransCycles(int proc, int cycles){
+ m_xactStallTransCycles[proc] += cycles;
+}
+
+long long int XactProfiler::getStallTransCycles(int proc){
+ return m_xactStallTransCycles[proc];
+}
+
+void XactProfiler::profileGoodTransCycles(int proc, int cycles){
+ m_xactGoodTransCycles[proc] += cycles;
+}
+
+long long int XactProfiler::getGoodTransCycles(int proc){
+ return m_xactGoodTransCycles[proc];
+}
+
+void XactProfiler::profileAbortingTransCycles(int proc, int cycles){
+ m_xactAbortingCycles[proc] += cycles;
+}
+
+long long int XactProfiler::getAbortingTransCycles(int proc){
+ return m_xactAbortingCycles[proc];
+}
+
+void XactProfiler::profileCommitingTransCycles(int proc, int cycles){
+ m_xactCommitingCycles[proc] += cycles;
+}
+
+long long int XactProfiler::getCommitingTransCycles(int proc){
+ return m_xactCommitingCycles[proc];
+}
+
+void XactProfiler::profileBackoffTransCycles(int proc, int cycles){
+ m_xactBackoffCycles[proc] += cycles;
+}
+
+long long int XactProfiler::getBackoffTransCycles(int proc){
+ return m_xactBackoffCycles[proc];
+}
+
+void XactProfiler::profileStallNonTransCycles(int proc, int cycles){
+ m_xactStallNonTransCycles[proc] += cycles;
+}
+
+void XactProfiler::profileNonTransCycles(int proc, int cycles){
+ m_xactNonTransCycles[proc] += cycles;
+}
+
+long long int XactProfiler::getNonTransCycles(int proc){
+ return m_xactNonTransCycles[proc];
+}
+
+void XactProfiler::profileBarrierCycles(int proc, int cycles){
+ m_BarrierCycles[proc] += cycles;
+}
+
+long long int XactProfiler::getBarrierCycles(int proc){
+ return m_BarrierCycles[proc];
+}
+
+void XactProfiler::profileBeginTimer(int proc){
+ m_xactBeginTimer[proc] = (long long int) g_eventQueue_ptr->getTime();
+}
+
+void XactProfiler::profileEndTimer(int proc){
+ m_xactTimedCycles[proc] += (long long int) g_eventQueue_ptr->getTime() - m_xactBeginTimer[proc];
+}
+
+void XactProfiler::profileHashValue(int hashFunction, int hashValue){
+ if (hashFunction > max_hashFunction) max_hashFunction = hashFunction;
+
+ m_hashProfile[hashFunction].add(hashValue);
+}
diff --git a/src/mem/ruby/profiler/XactProfiler.hh b/src/mem/ruby/profiler/XactProfiler.hh
new file mode 100644
index 000000000..46584de65
--- /dev/null
+++ b/src/mem/ruby/profiler/XactProfiler.hh
@@ -0,0 +1,125 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef XACTPROFILER_H
+#define XACTPROFILER_H
+
+#include "Global.hh"
+#include "GenericMachineType.hh"
+#include "RubyConfig.hh"
+#include "Histogram.hh"
+#include "Consumer.hh"
+#include "AccessModeType.hh"
+#include "AccessType.hh"
+#include "NodeID.hh"
+#include "MachineID.hh"
+#include "PrefetchBit.hh"
+#include "Address.hh"
+#include "Set.hh"
+#include "CacheRequestType.hh"
+#include "GenericRequestType.hh"
+
+template <class KEY_TYPE, class VALUE_TYPE> class Map;
+
+class XactProfiler {
+public:
+ // Constructors
+ XactProfiler();
+
+ // Destructor
+ ~XactProfiler();
+
+ void printStats(ostream& out, bool short_stats=false);
+ void printShortStats(ostream& out) { printStats(out, true); }
+ void clearStats();
+ void printConfig(ostream& out) const;
+
+ void print(ostream& out) const;
+
+ void profileTransCycles(int proc, int cycles);
+ void profileNonTransCycles(int proc, int cycles);
+ void profileStallTransCycles(int proc, int cycles);
+ void profileStallNonTransCycles(int proc, int cycles);
+ void profileAbortingTransCycles(int proc, int cycles);
+ void profileCommitingTransCycles(int proc, int cycles);
+ void profileBarrierCycles(int proc, int cycles);
+ void profileBackoffTransCycles(int proc, int cycles);
+ void profileGoodTransCycles(int proc, int cycles);
+
+ void profileBeginTimer(int proc);
+ void profileEndTimer(int proc);
+
+ long long int getTransCycles(int proc_no);
+ long long int getGoodTransCycles(int proc_no);
+ long long int getStallTransCycles(int proc_no);
+ long long int getAbortingTransCycles(int proc_no);
+ long long int getCommitingTransCycles(int proc_no);
+ long long int getBackoffTransCycles(int proc_no);
+ long long int getNonTransCycles(int proc_no);
+ long long int getBarrierCycles(int proc_no);
+
+ void profileHashValue(int hashFunction, int hashValue);
+
+private:
+
+ long long int * m_xactTransCycles;
+ long long int * m_xactStallTransCycles;
+ long long int * m_xactStallNonTransCycles;
+ long long int * m_xactAbortingCycles;
+ long long int * m_xactCommitingCycles;
+ long long int * m_xactBackoffCycles;
+ long long int * m_BarrierCycles;
+
+ long long int * m_xactGoodTransCycles;
+ long long int * m_xactNonTransCycles;
+
+ long long int * m_xactTimedCycles;
+ long long int * m_xactBeginTimer;
+
+ int max_hashFunction;
+ Vector<Histogram> m_hashProfile;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const XactProfiler& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const XactProfiler& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //XACTPROFILER_H
+
+
diff --git a/src/mem/ruby/recorder/CacheRecorder.cc b/src/mem/ruby/recorder/CacheRecorder.cc
new file mode 100644
index 000000000..e858f618e
--- /dev/null
+++ b/src/mem/ruby/recorder/CacheRecorder.cc
@@ -0,0 +1,75 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "CacheRecorder.hh"
+#include "TraceRecord.hh"
+#include "EventQueue.hh"
+#include "PrioHeap.hh"
+#include "gzstream.hh"
+
+CacheRecorder::CacheRecorder()
+{
+ m_records_ptr = new PrioHeap<TraceRecord>;
+}
+
+CacheRecorder::~CacheRecorder()
+{
+ delete m_records_ptr;
+}
+
+void CacheRecorder::addRecord(NodeID id, const Address& data_addr, const Address& pc_addr, CacheRequestType type, Time time)
+{
+ m_records_ptr->insert(TraceRecord(id, data_addr, pc_addr, type, time));
+}
+
+int CacheRecorder::dumpRecords(string filename)
+{
+ ogzstream out(filename.c_str());
+ if (out.fail()) {
+ cout << "Error: error opening file '" << filename << "'" << endl;
+ return 0;
+ }
+
+ int counter = 0;
+ while (m_records_ptr->size() != 0) {
+ TraceRecord record = m_records_ptr->extractMin();
+ record.output(out);
+ counter++;
+ }
+ return counter;
+}
+
+void CacheRecorder::print(ostream& out) const
+{
+}
diff --git a/src/mem/ruby/recorder/CacheRecorder.hh b/src/mem/ruby/recorder/CacheRecorder.hh
new file mode 100644
index 000000000..b8b56ff09
--- /dev/null
+++ b/src/mem/ruby/recorder/CacheRecorder.hh
@@ -0,0 +1,87 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: Recording cache requests made to a ruby cache at certain
+ * ruby time. Also dump the requests to a gziped file.
+ *
+ */
+
+#ifndef CACHERECORDER_H
+#define CACHERECORDER_H
+
+#include "Global.hh"
+#include "NodeID.hh"
+#include "CacheRequestType.hh"
+
+template <class TYPE> class PrioHeap;
+class Address;
+class TraceRecord;
+
+class CacheRecorder {
+public:
+ // Constructors
+ CacheRecorder();
+
+ // Destructor
+ ~CacheRecorder();
+
+ // Public Methods
+ void addRecord(NodeID id, const Address& data_addr, const Address& pc_addr, CacheRequestType type, Time time);
+ int dumpRecords(string filename);
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ CacheRecorder(const CacheRecorder& obj);
+ CacheRecorder& operator=(const CacheRecorder& obj);
+
+ // Data Members (m_ prefix)
+ PrioHeap<TraceRecord>* m_records_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const CacheRecorder& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const CacheRecorder& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //CACHERECORDER_H
diff --git a/src/mem/ruby/recorder/TraceRecord.cc b/src/mem/ruby/recorder/TraceRecord.cc
new file mode 100644
index 000000000..3116edf93
--- /dev/null
+++ b/src/mem/ruby/recorder/TraceRecord.cc
@@ -0,0 +1,132 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "TraceRecord.hh"
+#include "Sequencer.hh"
+#include "System.hh"
+#include "AbstractChip.hh"
+#include "CacheMsg.hh"
+
+TraceRecord::TraceRecord(NodeID id, const Address& data_addr, const Address& pc_addr, CacheRequestType type, Time time)
+{
+ m_node_num = id;
+ m_data_address = data_addr;
+ m_pc_address = pc_addr;
+ m_time = time;
+ m_type = type;
+
+ // Don't differentiate between store misses and atomic requests in
+ // the trace
+ if (m_type == CacheRequestType_ATOMIC) {
+ m_type = CacheRequestType_ST;
+ }
+}
+
+// Public copy constructor and assignment operator
+TraceRecord::TraceRecord(const TraceRecord& obj)
+{
+ *this = obj; // Call assignment operator
+}
+
+TraceRecord& TraceRecord::operator=(const TraceRecord& obj)
+{
+ m_node_num = obj.m_node_num;
+ m_time = obj.m_time;
+ m_data_address = obj.m_data_address;
+ m_pc_address = obj.m_pc_address;
+ m_type = obj.m_type;
+ return *this;
+}
+
+void TraceRecord::issueRequest() const
+{
+ // Lookup sequencer pointer from system
+ // Note that the chip index also needs to take into account SMT configurations
+ AbstractChip* chip_ptr = g_system_ptr->getChip(m_node_num/RubyConfig::numberOfProcsPerChip()/RubyConfig::numberofSMTThreads());
+ assert(chip_ptr != NULL);
+ Sequencer* sequencer_ptr = chip_ptr->getSequencer((m_node_num/RubyConfig::numberofSMTThreads())%RubyConfig::numberOfProcsPerChip());
+ assert(sequencer_ptr != NULL);
+
+ CacheMsg request(m_data_address, m_data_address, m_type, m_pc_address, AccessModeType_UserMode, 0, PrefetchBit_Yes, 0, Address(0), 0 /* only 1 SMT thread */, 0, false);
+
+ // Clear out the sequencer
+ while (!sequencer_ptr->empty()) {
+ g_eventQueue_ptr->triggerEvents(g_eventQueue_ptr->getTime() + 100);
+ }
+
+ sequencer_ptr->makeRequest(request);
+
+ // Clear out the sequencer
+ while (!sequencer_ptr->empty()) {
+ g_eventQueue_ptr->triggerEvents(g_eventQueue_ptr->getTime() + 100);
+ }
+}
+
+void TraceRecord::print(ostream& out) const
+{
+ out << "[TraceRecord: Node, " << m_node_num << ", " << m_data_address << ", " << m_pc_address << ", " << m_type << ", Time: " << m_time << "]";
+}
+
+void TraceRecord::output(ostream& out) const
+{
+ out << m_node_num << " ";
+ m_data_address.output(out);
+ out << " ";
+ m_pc_address.output(out);
+ out << " ";
+ out << m_type;
+ out << endl;
+}
+
+bool TraceRecord::input(istream& in)
+{
+ in >> m_node_num;
+ m_data_address.input(in);
+ m_pc_address.input(in);
+ string type;
+ if (!in.eof()) {
+ in >> type;
+ m_type = string_to_CacheRequestType(type);
+
+ // Ignore the rest of the line
+ char c = '\0';
+ while ((!in.eof()) && (c != '\n')) {
+ in.get(c);
+ }
+
+ return true;
+ } else {
+ return false;
+ }
+}
diff --git a/src/mem/ruby/recorder/TraceRecord.hh b/src/mem/ruby/recorder/TraceRecord.hh
new file mode 100644
index 000000000..df526156b
--- /dev/null
+++ b/src/mem/ruby/recorder/TraceRecord.hh
@@ -0,0 +1,101 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: A entry in the cache request record. It is aware of
+ * the ruby time and can issue the request back to the
+ * cache.
+ *
+ */
+
+#ifndef TRACERECORD_H
+#define TRACERECORD_H
+
+#include "Global.hh"
+#include "Address.hh"
+#include "NodeID.hh"
+#include "CacheRequestType.hh"
+class CacheMsg;
+
+class TraceRecord {
+public:
+ // Constructors
+ TraceRecord(NodeID id, const Address& data_addr, const Address& pc_addr, CacheRequestType type, Time time);
+ TraceRecord() { m_node_num = 0; m_time = 0; m_type = CacheRequestType_NULL; }
+
+ // Destructor
+ // ~TraceRecord();
+
+ // Public copy constructor and assignment operator
+ TraceRecord(const TraceRecord& obj);
+ TraceRecord& operator=(const TraceRecord& obj);
+
+ // Public Methods
+ bool node_less_then_eq(const TraceRecord& rec) const { return (this->m_time <= rec.m_time); }
+ void issueRequest() const;
+
+ void print(ostream& out) const;
+ void output(ostream& out) const;
+ bool input(istream& in);
+private:
+ // Private Methods
+
+ // Data Members (m_ prefix)
+ NodeID m_node_num;
+ Time m_time;
+ Address m_data_address;
+ Address m_pc_address;
+ CacheRequestType m_type;
+};
+
+inline extern bool node_less_then_eq(const TraceRecord& n1, const TraceRecord& n2);
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const TraceRecord& obj);
+
+// ******************* Definitions *******************
+
+inline extern
+bool node_less_then_eq(const TraceRecord& n1, const TraceRecord& n2)
+{
+ return n1.node_less_then_eq(n2);
+}
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const TraceRecord& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //TRACERECORD_H
diff --git a/src/mem/ruby/recorder/Tracer.cc b/src/mem/ruby/recorder/Tracer.cc
new file mode 100644
index 000000000..2a0acba46
--- /dev/null
+++ b/src/mem/ruby/recorder/Tracer.cc
@@ -0,0 +1,126 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "Tracer.hh"
+#include "TraceRecord.hh"
+#include "EventQueue.hh"
+#include "PrioHeap.hh"
+#include "System.hh"
+
+Tracer::Tracer()
+{
+ m_enabled = false;
+}
+
+Tracer::~Tracer()
+{
+}
+
+void Tracer::startTrace(string filename)
+{
+ if (m_enabled) {
+ stopTrace();
+ }
+
+ if (filename != "") {
+ m_trace_file.open(filename.c_str());
+ if (m_trace_file.fail()) {
+ cout << "Error: error opening file '" << filename << "'" << endl;
+ cout << "Trace not enabled." << endl;
+ return;
+ }
+ cout << "Request trace enabled to output file '" << filename << "'" << endl;
+ m_enabled = true;
+ }
+}
+
+void Tracer::stopTrace()
+{
+ assert(m_enabled == true);
+ m_trace_file.close();
+ cout << "Request trace file closed." << endl;
+ m_enabled = false;
+}
+
+void Tracer::traceRequest(NodeID id, const Address& data_addr, const Address& pc_addr, CacheRequestType type, Time time)
+{
+ assert(m_enabled == true);
+ TraceRecord tr(id, data_addr, pc_addr, type, time);
+ tr.output(m_trace_file);
+}
+
+// Class method
+int Tracer::playbackTrace(string filename)
+{
+ igzstream in(filename.c_str());
+ if (in.fail()) {
+ cout << "Error: error opening file '" << filename << "'" << endl;
+ return 0;
+ }
+
+ time_t start_time = time(NULL);
+
+ TraceRecord record;
+ int counter = 0;
+ // Read in the next TraceRecord
+ bool ok = record.input(in);
+ while (ok) {
+ // Put it in the right cache
+ record.issueRequest();
+ counter++;
+
+ // Read in the next TraceRecord
+ ok = record.input(in);
+
+ // Clear the statistics after warmup
+ if (counter == g_trace_warmup_length) {
+ cout << "Clearing stats after warmup of length " << g_trace_warmup_length << endl;
+ g_system_ptr->clearStats();
+ }
+ }
+
+ // Flush the prefetches through the system
+ g_eventQueue_ptr->triggerEvents(g_eventQueue_ptr->getTime() + 1000); // FIXME - should be smarter
+
+ time_t stop_time = time(NULL);
+ double seconds = difftime(stop_time, start_time);
+ double minutes = seconds / 60.0;
+ cout << "playbackTrace: " << minutes << " minutes" << endl;
+
+ return counter;
+}
+
+void Tracer::print(ostream& out) const
+{
+}
diff --git a/src/mem/ruby/recorder/Tracer.hh b/src/mem/ruby/recorder/Tracer.hh
new file mode 100644
index 000000000..eb05ae12d
--- /dev/null
+++ b/src/mem/ruby/recorder/Tracer.hh
@@ -0,0 +1,94 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: Controller class of the tracer. Can stop/start/playback
+ * the ruby cache requests trace.
+ *
+ */
+
+#ifndef TRACER_H
+#define TRACER_H
+
+#include "Global.hh"
+#include "NodeID.hh"
+#include "CacheRequestType.hh"
+#include "gzstream.hh"
+
+template <class TYPE> class PrioHeap;
+class Address;
+class TraceRecord;
+
+class Tracer {
+public:
+ // Constructors
+ Tracer();
+
+ // Destructor
+ ~Tracer();
+
+ // Public Methods
+ void startTrace(string filename);
+ void stopTrace();
+ bool traceEnabled() { return m_enabled; }
+ void traceRequest(NodeID id, const Address& data_addr, const Address& pc_addr, CacheRequestType type, Time time);
+
+ void print(ostream& out) const;
+
+ // Public Class Methods
+ static int playbackTrace(string filename);
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ Tracer(const Tracer& obj);
+ Tracer& operator=(const Tracer& obj);
+
+ // Data Members (m_ prefix)
+ ogzstream m_trace_file;
+ bool m_enabled;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Tracer& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Tracer& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //TRACER_H
diff --git a/src/mem/ruby/simics/commands.cc b/src/mem/ruby/simics/commands.cc
new file mode 100644
index 000000000..e0a4f969e
--- /dev/null
+++ b/src/mem/ruby/simics/commands.cc
@@ -0,0 +1,867 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ This file has been modified by Kevin Moore and Dan Nussbaum of the
+ Scalable Systems Research Group at Sun Microsystems Laboratories
+ (http://research.sun.com/scalable/) to support the Adaptive
+ Transactional Memory Test Platform (ATMTP).
+
+ Please send email to atmtp-interest@sun.com with feedback, questions, or
+ to request future announcements about ATMTP.
+
+ ----------------------------------------------------------------------
+
+ File modification date: 2008-02-23
+
+ ----------------------------------------------------------------------
+*/
+
+/*
+ * $Id$
+ *
+ */
+
+#include "protocol_name.hh"
+#include "Global.hh"
+#include "System.hh"
+#include "CacheRecorder.hh"
+//#include "Tracer.hh"
+#include "RubyConfig.hh"
+#include "interface.hh"
+#include "Network.hh"
+// #include "TransactionInterfaceManager.hh"
+// #include "TransactionVersionManager.hh"
+// #include "TransactionIsolationManager.hh"
+//#include "XactCommitArbiter.hh" // gem5:Arka for decomissioning of log_tm
+#include "Chip.hh"
+//#include "XactVisualizer.hh" // gem5:Arka for decomissioning of log_tm
+
+extern "C" {
+#include "commands.hh"
+}
+
+#ifdef CONTIGUOUS_ADDRESSES
+#include "ContiguousAddressTranslator.hh"
+
+/* Declared in interface.C */
+extern ContiguousAddressTranslator * g_p_ca_translator;
+
+memory_transaction_t local_memory_transaction_t_shadow;
+
+#endif // #ifdef CONTIGUOUS_ADDRESSES
+
+//////////////////////// extern "C" api ////////////////////////////////
+
+extern "C"
+void ruby_dump_cache(int cpuNumber)
+{
+ assert(0);
+ g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->dumpCaches(cout);
+}
+
+extern "C"
+void ruby_dump_cache_data(int cpuNumber, char* tag)
+{
+ assert(0);
+ if (tag == NULL) {
+ // No filename, dump to screen
+ g_system_ptr->printConfig(cout);
+ g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->dumpCacheData(cout);
+ } else {
+ // File name, dump to file
+ string filename(tag);
+
+ cout << "Dumping stats to output file '" << filename << "'..." << endl;
+ ofstream m_outputFile;
+ m_outputFile.open(filename.c_str());
+ if(m_outputFile == NULL){
+ cout << endl << "Error: error opening output file '" << filename << "'" << endl;
+ return;
+ }
+ g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->dumpCacheData(m_outputFile);
+ }
+}
+
+extern "C"
+void ruby_set_periodic_stats_file(char* filename)
+{
+ assert(0);
+ g_system_ptr->getProfiler()->setPeriodicStatsFile(filename);
+}
+
+extern "C"
+void ruby_set_periodic_stats_interval(int interval)
+{
+ assert(0);
+ g_system_ptr->getProfiler()->setPeriodicStatsInterval(interval);
+}
+
+extern "C"
+int mh_memorytracer_possible_cache_miss(memory_transaction_t *mem_trans)
+{
+
+ assert(0);
+ memory_transaction_t *p_mem_trans_shadow = mem_trans;
+
+#ifdef CONTIGUOUS_ADDRESSES
+ if(g_p_ca_translator!=NULL) {
+ memcpy( &local_memory_transaction_t_shadow, mem_trans, sizeof(memory_transaction_t) );
+ p_mem_trans_shadow = &local_memory_transaction_t_shadow;
+ uint64 contiguous_address = g_p_ca_translator->TranslateSimicsToRuby( p_mem_trans_shadow->s.physical_address );
+ p_mem_trans_shadow->s.physical_address = contiguous_address;
+ }
+#endif // #ifdef CONTIGUOUS_ADDRESSES
+
+
+ // Pass this request off to SimicsDriver::makeRequest()
+ // SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
+ // return simics_interface_ptr->makeRequest(p_mem_trans_shadow);
+ return 0;
+}
+
+extern "C"
+void mh_memorytracer_observe_memory(memory_transaction_t *mem_trans)
+{
+
+ assert(0);
+ memory_transaction_t *p_mem_trans_shadow = mem_trans;
+
+
+#ifdef CONTIGUOUS_ADDRESSES
+ if(g_p_ca_translator!=NULL) {
+ memcpy( &local_memory_transaction_t_shadow, mem_trans, sizeof(memory_transaction_t) );
+ p_mem_trans_shadow = &local_memory_transaction_t_shadow;
+ uint64 contiguous_address = g_p_ca_translator->TranslateSimicsToRuby( p_mem_trans_shadow->s.physical_address );
+ p_mem_trans_shadow->s.physical_address = contiguous_address;
+
+ }
+#endif // #ifdef CONTIGUOUS_ADDRESSES
+
+
+ // Pass this request off to SimicsDriver::makeRequest()
+ //SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
+ //simics_interface_ptr->observeMemoryAccess(p_mem_trans_shadow);
+}
+
+
+void ruby_set_g3_reg(void *cpu, void *parameter){
+ assert(0);
+#if 0
+ int proc_num = SIM_get_proc_no(cpu);
+ sparc_v9_interface_t * m_v9_interface = (sparc_v9_interface_t *) SIM_get_interface(cpu, SPARC_V9_INTERFACE);
+
+ for(int set=0; set < 4; set++) {
+ for(int i=0; i <8; i++) {
+ int registerNumber = i;
+ uinteger_t value = m_v9_interface->read_global_register((void *)cpu, set, registerNumber);
+ cout << "ruby_set_g3_reg BEFORE: proc =" << proc_num << " GSET = " << set << " GLOBAL_REG = " << i << " VALUE = " << value << endl;
+ }
+ }
+
+ uinteger_t value_ptr = (uinteger_t) parameter;
+ int g3_regnum = SIM_get_register_number(cpu, "g3");
+ SIM_write_register(cpu, g3_regnum, (uinteger_t) value_ptr);
+
+ cout << endl;
+ for(int set=0; set < 4; set++) {
+ for(int i=0; i <8; i++) {
+ int registerNumber = i;
+ uinteger_t value = m_v9_interface->read_global_register((void *)cpu, set, registerNumber);
+ cout << "ruby_set_g3_reg AFTER: proc =" << proc_num << " GSET = " << set << " GLOBAL_REG = " << i << " VALUE = " << value << endl;
+ }
+ }
+#endif
+
+}
+
+// #define XACT_MGR g_system_ptr->getChip(SIMICS_current_processor_number()/RubyConfig::numberOfProcsPerChip()/RubyConfig::numberofSMTThreads())->getTransactionInterfaceManager( (SIMICS_current_processor_number()/RubyConfig::numberofSMTThreads())%RubyConfig::numberOfProcsPerChip())
+
+extern "C"
+void magic_instruction_callback(void* desc, void* cpu, integer_t val)
+{
+ assert(0);
+#if 0
+ // Use magic callbacks to start and end transactions w/o opal
+ if (val > 0x10000) // older magic call numbers. Need to be right-shifted.
+ val = val >> 16;
+ int id = -1;
+ int proc_num = SIMICS_current_processor_number();
+ int sim_proc_num = proc_num / RubyConfig::numberofSMTThreads();
+ int thread_num = proc_num % RubyConfig::numberofSMTThreads();
+ int ruby_cycle = g_eventQueue_ptr->getTime();
+
+ if(proc_num < 0){
+ cout << "ERROR proc_num= " << proc_num << endl;
+ }
+ assert(proc_num >= 0);
+ if(thread_num < 0){
+ cout << "ERROR thread_num= " << thread_num << endl;
+ }
+ assert(thread_num >= 0);
+ if( sim_proc_num < 0){
+ cout << "ERROR sim_proc_num = " << sim_proc_num << endl;
+ }
+ assert(sim_proc_num >= 0);
+
+ if (val == 3) {
+ g_system_ptr->getProfiler()->startTransaction(sim_proc_num);
+ } else if (val == 4) {
+ ; // magic breakpoint
+ } else if (val == 5) {
+ g_system_ptr->getProfiler()->endTransaction(sim_proc_num);
+ } else if (val == 6){ // Begin Exposed Action
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << "Begin exposed action for thread " << thread_num << " of proc " << proc_num << " PC " << SIMICS_get_program_counter(proc_num) << endl;
+ XACT_MGR->beginEscapeAction(thread_num);
+ } else if (val == 7){ // Begin Exposed Action
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << "End exposed action for thread " << thread_num << " of proc " << proc_num << " PC " << SIMICS_get_program_counter(proc_num) << endl;
+ XACT_MGR->endEscapeAction(thread_num);
+ } else if (val == 8) {
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << "Set log Base Address for thread " << thread_num << " of proc " << proc_num << endl;
+ XACT_MGR->setLogBase(thread_num);
+ } else if (val == 9) {
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << "Setting Handler Address for thread " << thread_num << " of proc " << proc_num << endl;
+ XACT_MGR->setHandlerAddress(thread_num);
+ } else if (val == 10) {
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << "Release Isolation for thread " << thread_num << " of proc " << proc_num << endl;
+ XACT_MGR->releaseIsolation(thread_num);
+ } else if (val == 11) {
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << "Restart transaction for thread " << thread_num << " of proc " << proc_num << endl;
+ XACT_MGR->restartTransaction(thread_num);
+ } else if (val == 12) {
+ // NOTE: this is a functional magic call for the Java VM
+ // It is used by mfacet.py to check whether to use TM macros or JVM locking
+ return;
+ } else if (val == 13) {
+ // NOTE: this is a debug magic call for the Java VM
+ // Indicates BEGIN XACT
+ return;
+ } else if (val == 14) {
+ // NOTE: this is a debug magic call for the Java VM
+ // Indicates COMMIT_XACT
+ return;
+ } else if (val == 15) {
+ cout << "SIMICS SEG FAULT for thread " << thread_num << " of proc " << proc_num << endl;
+ SIM_break_simulation("SIMICS SEG FAULT");
+ return;
+ } else if (val == 16) {
+ // NOTE : this is a debug magic call for the Java VM
+ // Indicates LOCKING object
+ return;
+ } else if (val == 17) {
+ // NOTE : this is a debug magic call for the Java VM
+ // Indicates UNLOCKING object
+ return;
+ } else if (val == 18) {
+ // NOTE: this is a magic call to enable the xact mem macros in the Java VM
+ // The functionality is implemented in gen-scripts/mfacet.py because it can be independent of Ruby
+ return;
+ } else if (val == 19){
+ cout << "RUBY WATCH: " << endl;
+ g_system_ptr->getProfiler()->rubyWatch(SIMICS_current_processor_number());
+ } else if (val == 20) {
+ //XACT_MGR->setJavaPtrs(thread_num);
+ } else if (val == 21){
+ // NOTE : this is a debug magic call used to dump the registers for a processor
+ // Functionality is implemented in gen-scripts/mfacet.py because it can be independent of Ruby
+ return;
+ } else if (val == 23){
+ // register compensating action
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << proc_num << "," << thread_num << " REGISTER COMPENSATING ACTION " << endl;
+ XACT_MGR->registerCompensatingAction(thread_num);
+ } else if (val == 24){
+ // register commit action
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << proc_num << "," << thread_num << " REGISTER COMMIT ACTION " << endl;
+ XACT_MGR->registerCommitAction(thread_num);
+ } else if (val == 27){
+ // xmalloc
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << proc_num << "," << thread_num << " XMALLOC " << endl;
+ XACT_MGR->xmalloc(thread_num);
+ } else if (val == 29){
+ // Begin Barrier
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << proc_num << "," << thread_num << " BEGIN BARRIER " << endl;
+ g_system_ptr->getXactVisualizer()->moveToBarrier(proc_num);
+ } else if (val == 30){
+ // End Barrier
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << proc_num << "," << thread_num << " END BARRIER " << endl;
+ g_system_ptr->getXactVisualizer()->moveToNonXact(proc_num);
+ } else if (val == 28) {
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << "Continue execution for thread " << thread_num << " of proc " << proc_num << endl;
+ XACT_MGR->continueExecution(thread_num);
+ } else if (val == 31){
+ // Begin Timer
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << proc_num << "," << thread_num << " BEGIN TIMER " << endl;
+ g_system_ptr->getProfiler()->getXactProfiler()->profileBeginTimer(proc_num);
+ } else if (val == 32){
+ // End Timer
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << proc_num << "," << thread_num << " END TIMER " << endl;
+ g_system_ptr->getProfiler()->getXactProfiler()->profileEndTimer(proc_num);
+ } else if (val == 40) {
+ // register a thread for virtualization
+ if (XACT_ENABLE_VIRTUALIZATION_LOGTM_SE) {
+ SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
+ simics_interface_ptr->getHypervisor()->registerThreadWithHypervisor(proc_num);
+ }
+ } else if (val == 41) {
+ // get information about the last summary conflict
+ if (XACT_ENABLE_VIRTUALIZATION_LOGTM_SE) {
+ Address addr = XACT_MGR->getXactIsolationManager()->getSummaryConflictAddress();
+ unsigned int conflictAddress = addr.getAddress();
+ unsigned int conflictType = XACT_MGR->getXactIsolationManager()->getSummaryConflictType();
+ SIMICS_write_register(proc_num, SIMICS_get_register_number(proc_num, "g2"), conflictAddress);
+ SIMICS_write_register(proc_num, SIMICS_get_register_number(proc_num, "g3"), conflictType);
+ }
+ } else if (val == 42) {
+ // resolve summary conflict magic callback
+ if (XACT_ENABLE_VIRTUALIZATION_LOGTM_SE) {
+ SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
+ simics_interface_ptr->getHypervisor()->resolveSummarySignatureConflict(proc_num);
+ }
+ } else if (val == 50) {
+ // set summary signature bit
+ int index = SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "g2"));
+ XACT_MGR->writeBitSummaryWriteSetFilter(thread_num, index, 1);
+ } else if (val == 51) {
+ // unset summary signature bit
+ int index = SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "g2"));
+ XACT_MGR->writeBitSummaryWriteSetFilter(thread_num, index, 0);
+ } else if (val == 52) {
+ // add address in summary signature
+ Address addr = Address(SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "g2")));
+ cout << "Add to summary write set filter: " << addr << endl;
+ XACT_MGR->addToSummaryWriteSetFilter(thread_num, addr);
+ } else if (val == 53) {
+ // remove address from summary signature
+ Address addr = Address(SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "g2")));
+ XACT_MGR->removeFromSummaryWriteSetFilter(thread_num, addr);
+ } else if (val == 54) {
+ // translate address to summary signature index
+ Address addr = Address(SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "g2")));
+ SIMICS_write_register(proc_num, SIMICS_get_register_number(proc_num, "g3"), XACT_MGR->getIndexSummaryFilter(thread_num, addr));
+ } else if (val == 55) {
+ XACT_MGR->setIgnoreWatchpointFlag(thread_num, true);
+ } else if (val == 56) {
+ g_system_ptr->getProfiler()->watchpointsFalsePositiveTrigger();
+ } else if (val == 57) {
+ g_system_ptr->getProfiler()->watchpointsTrueTrigger();
+ } else if (val == 60) {
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2) {
+ cout << "Set restorePC for thread " << thread_num << " of proc " << proc_num << " XID " << id << endl;
+ }
+ unsigned int pc = SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "g2"));
+ XACT_MGR->setRestorePC(thread_num, pc);
+ } else if (val == 61) {
+ // get log size
+ SIMICS_write_register(proc_num, SIMICS_get_register_number(proc_num, "g2"), XACT_MGR->getXactVersionManager()->getLogSize(thread_num));
+ } else if (val == 62) {
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2){
+ cout << " GET THREAD ID " << thread_num << " of proc " << proc_num << " TID " << XACT_MGR->getTID(thread_num) << endl;
+ }
+ // get thread id
+ SIMICS_write_register(proc_num, SIMICS_get_register_number(proc_num, "g2"), XACT_MGR->getTID(thread_num));
+ } else if (val == 100) {
+ dump_registers((void*)cpu);
+ } else if (val >= 1024 && val < 2048) {
+ // begin closed
+ id = val - 1024;
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << "Begin CLOSED transaction for thread " << thread_num << " of proc " << proc_num << " XID " << id << endl;
+ XACT_MGR->beginTransaction(thread_num, id, false);
+ //} else if (val >= min_closed_commit && val < XACT_OPEN_MIN_ID) {
+ } else if (val >= 2048 && val < 3072) {
+ // commit closed
+ id = val - 2048;
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << "Commit CLOSED transaction for thread " << thread_num << " of proc " << proc_num << " XID " << id << endl;
+ XACT_MGR->commitTransaction(thread_num, id, false);
+ } else if (val >= 3072 && val < 4096) {
+ // begin open
+ id = val - 3072;
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << "Begin OPEN transaction for thread " << thread_num << " of proc " << proc_num << " XID " << id << endl;
+ XACT_MGR->beginTransaction(thread_num, id, true);
+ } else if (val >= 4096 && val < 5120) {
+ // commit open
+ id = val - 4096;
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << "COMMIT OPEN transaction for thread " << thread_num << " of proc " << proc_num << " XID " << id << endl;
+ XACT_MGR->commitTransaction(thread_num, id, true);
+ } else if (val >= 5120 && val < 6144){
+
+ cout << " SYSCALL " << val - 5120 << " of proc " << proc_num << " " << thread_num << " time = " << ruby_cycle << endl;
+ } else if (val >= 6144 && val < 7168) {
+ // commit open
+ id = val - 6144;
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
+ cout << "ABORT transaction for thread " << thread_num << " of proc " << proc_num << " XID " << id << endl;
+ XACT_MGR->abortTransaction(thread_num, id);
+ } else if (val == 8000) {
+ // transaction level
+ if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2) {
+ id = val - 8000;
+ cout << "Transaction Level for thread " << thread_num << " of proc " << proc_num << " XID " << id << " : "
+ << XACT_MGR->getTransactionLevel(thread_num)<< endl;
+ }
+ SIMICS_write_register(proc_num, SIMICS_get_register_number(proc_num, "i0"),(unsigned int) XACT_MGR->getTransactionLevel(thread_num));
+ } else if (val==8001) {
+ cout << " " << g_eventQueue_ptr->getTime() << " " << dec << proc_num << " [" << proc_num << "," << thread_num << " ]"
+ << " TID " << XACT_MGR->getTID(0)
+ << " DEBUGMSG " << SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "i0")) << " "
+ << SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "i1")) << " "
+ << "(0x" << hex << SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "i1")) << ") "
+ << dec << SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "i2")) << " "
+ << "(0x" << hex << SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "i2")) << ")" << dec
+ << " break = " << SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "i3")) << endl;
+ if (SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "i3")) == 1) {
+ SIM_break_simulation("DEBUGMSG");
+ }
+ } else {
+ WARN_EXPR(val);
+ WARN_EXPR(SIMICS_get_program_counter(proc_num));
+ WARN_MSG("Unexpected magic call");
+ }
+#endif
+}
+
+/* -- Handle command to change the debugging verbosity for Ruby */
+extern "C"
+void ruby_change_debug_verbosity(char* new_verbosity_str)
+{
+ assert(0);
+ g_debug_ptr->setVerbosityString(new_verbosity_str);
+}
+
+/* -- Handle command to change the debugging filter for Ruby */
+extern "C"
+void ruby_change_debug_filter(char* new_filter_str)
+{
+ assert(0);
+ g_debug_ptr->setFilterString(new_filter_str);
+}
+
+/* -- Handle command to set the debugging output file for Ruby */
+extern "C"
+void ruby_set_debug_output_file (const char * new_filename)
+{
+ assert(0);
+ string filename(new_filename);
+
+ filename += "-";
+ filename += CURRENT_PROTOCOL;
+ // get the date and time to label the debugging file
+ const time_t T = time(NULL);
+ tm *localTime = localtime(&T);
+ char buf[100];
+ strftime(buf, 100, ".%b%d.%Y-%H.%M.%S", localTime);
+
+ filename += buf;
+ filename += ".debug";
+
+ cout << "Dumping debugging output to file '" << filename << "'...";
+ g_debug_ptr->setDebugOutputFile (filename.c_str());
+}
+
+extern "C"
+void ruby_set_debug_start_time(char* start_time_str)
+{
+ assert(0);
+ int startTime = atoi(start_time_str);
+ g_debug_ptr->setDebugTime(startTime);
+}
+
+/* -- Clear stats */
+extern "C"
+void ruby_clear_stats()
+{
+ assert(0);
+ cout << "Clearing stats...";
+ fflush(stdout);
+ g_system_ptr->clearStats();
+ cout << "Done." << endl;
+}
+
+/* -- Dump stats */
+extern "C"
+// File name, dump to file
+void ruby_dump_stats(char* filename)
+{
+ assert(0);
+ /*g_debug_ptr->closeDebugOutputFile();*/
+ if (filename == NULL) {
+ // No output file, dump to screen
+ cout << "Dumping stats to standard output..." << endl;
+ g_system_ptr->printConfig(cout);
+ g_system_ptr->printStats(cout);
+ } else {
+ cout << "Dumping stats to output file '" << filename << "'..." << endl;
+ ofstream m_outputFile;
+ m_outputFile.open(filename);
+ if(m_outputFile == NULL) {
+ cout << "Error: error opening output file '" << filename << "'" << endl;
+ return;
+ }
+ g_system_ptr->printConfig(m_outputFile);
+ g_system_ptr->printStats(m_outputFile);
+ }
+ cout << "Dumping stats completed." << endl;
+}
+
+/* -- Dump stats */
+extern "C"
+// File name, dump to file
+void ruby_dump_short_stats(char* filename)
+{
+ assert(0);
+ g_debug_ptr->closeDebugOutputFile();
+ if (filename == NULL) {
+ // No output file, dump to screen
+ //cout << "Dumping short stats to standard output..." << endl;
+ //g_system_ptr->printConfig(cout);
+ g_system_ptr->getProfiler()->printStats(cout, true);
+ } else {
+ cout << "Dumping stats to output file '" << filename << "'..." << endl;
+ ofstream m_outputFile;
+ m_outputFile.open(filename);
+ if(m_outputFile == NULL) {
+ cout << "Error: error opening output file '" << filename << "'" << endl;
+ return;
+ }
+ g_system_ptr->getProfiler()->printShortStats(m_outputFile);
+ cout << "Dumping stats completed." << endl;
+ }
+}
+
+extern "C"
+void ruby_load_caches(char* name)
+{
+ assert(0);
+ if (name == NULL) {
+ cout << "Error: ruby_load_caches requires a file name" << endl;
+ return;
+ }
+
+ cout << "Reading cache contents from '" << name << "'...";
+ /* gem5:Binkert for decomissiong of tracer
+ int read = Tracer::playbackTrace(name);
+ cout << "done. (" << read << " cache lines read)" << endl;
+ */
+ cout << "done. (TRACER DISABLED!)" << endl;
+ ruby_clear_stats();
+}
+
+extern "C"
+void ruby_save_caches(char* name)
+{
+ assert(0);
+ if (name == NULL) {
+ cout << "Error: ruby_save_caches requires a file name" << endl;
+ return;
+ }
+
+ cout << "Writing cache contents to '" << name << "'...";
+ CacheRecorder recorder;
+ g_system_ptr->recordCacheContents(recorder);
+ int written = recorder.dumpRecords(name);
+ cout << "done. (" << written << " cache lines written)" << endl;
+}
+
+extern "C"
+void ruby_set_tracer_output_file (const char * new_filename)
+{
+ assert(0);
+ //g_system_ptr->getTracer()->startTrace(string(new_filename));
+}
+
+/* -- Handle command to set the xact visualizer file for Ruby */
+extern "C"
+void ruby_xact_visualizer_file (char * new_filename)
+{
+ cout << "Dumping xact visualizer output to file '" << new_filename << "'...";
+ // g_system_ptr->getProfiler()->setXactVisualizerFile (new_filename);
+}
+
+extern "C"
+void ctrl_exception_start(void* desc, void* cpu, integer_t val)
+{
+#if 0
+ int proc_no = SIM_get_proc_no((void*) cpu);
+ void* cpu_obj = (void*) cpu;
+ uinteger_t trap_level = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tl"));
+
+ if (!XACT_MEMORY) return;
+ TransactionInterfaceManager *xact_mgr = XACT_MGR;
+
+ // level {10,14} interrupt
+ //
+ if (val == 0x4a || val == 0x4e) {
+ int rn_tick = SIM_get_register_number(cpu_obj, "tick");
+ uinteger_t tick = SIM_read_register(cpu_obj, rn_tick);
+ int rn_tick_cmpr = SIM_get_register_number(cpu_obj, "tick_cmpr");
+ uinteger_t tick_cmpr = SIM_read_register(cpu_obj, rn_tick_cmpr);
+ int rn_stick = SIM_get_register_number(cpu_obj, "stick");
+ uinteger_t stick = SIM_read_register(cpu_obj, rn_stick);
+ int rn_stick_cmpr = SIM_get_register_number(cpu_obj, "stick_cmpr");
+ uinteger_t stick_cmpr = SIM_read_register(cpu_obj, rn_stick_cmpr);
+ int rn_pc = SIM_get_register_number(cpu_obj, "pc");
+ uinteger_t pc = SIM_read_register(cpu_obj, rn_pc);
+ int rn_npc = SIM_get_register_number(cpu_obj, "npc");
+ uinteger_t npc = SIM_read_register(cpu_obj, rn_npc);
+ int rn_pstate = SIM_get_register_number(cpu_obj, "pstate");
+ uinteger_t pstate = SIM_read_register(cpu_obj, rn_pstate);
+ int rn_pil = SIM_get_register_number(cpu_obj, "pil");
+ int pil = SIM_read_register(cpu_obj, rn_pil);
+ g_system_ptr->getProfiler()->profileTimerInterrupt(proc_no,
+ tick, tick_cmpr,
+ stick, stick_cmpr,
+ trap_level,
+ pc, npc,
+ pstate, pil);
+ }
+
+ int smt_thread_num = proc_no % RubyConfig::numberofSMTThreads();
+ // The simulated processor number
+ int sim_proc_no = proc_no / RubyConfig::numberofSMTThreads();
+
+ uinteger_t pc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "pc"));
+ uinteger_t npc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "npc"));
+
+ g_system_ptr->getProfiler()->profileExceptionStart(xact_mgr->getTransactionLevel(smt_thread_num) > 0, sim_proc_no, smt_thread_num, val, trap_level, pc, npc);
+
+ if((val >= 0x80 && val <= 0x9f) || (val >= 0xc0 && val <= 0xdf)){
+ //xact_mgr->setLoggedException(smt_thread_num);
+ }
+ // CORNER CASE - You take an exception while stalling for a commit token
+ if (XACT_LAZY_VM && !XACT_EAGER_CD){
+ if (g_system_ptr->getXactCommitArbiter()->getTokenOwner() == proc_no)
+ g_system_ptr->getXactCommitArbiter()->releaseCommitToken(proc_no);
+ }
+#endif
+ assert(0);
+}
+
+extern "C"
+void ctrl_exception_done(void* desc, void* cpu, integer_t val)
+{
+ assert(0);
+#if 0
+ int proc_no = SIM_get_proc_no((void*) cpu);
+ void* cpu_obj = (void*) cpu;
+ uinteger_t trap_level = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tl"));
+ uinteger_t pc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "pc"));
+ uinteger_t npc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "npc"));
+ uinteger_t tpc = 0;
+ uinteger_t tnpc = 0;
+ //get the return PC,NPC pair based on the trap level
+ ASSERT(1 <= trap_level && trap_level <= 5);
+ if(trap_level == 1){
+ tpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tpc1"));
+ tnpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tnpc1"));
+ }
+ if(trap_level == 2){
+ tpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tpc2"));
+ tnpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tnpc2"));
+ }
+ if(trap_level == 3){
+ tpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tpc3"));
+ tnpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tnpc3"));
+ }
+ if(trap_level == 4){
+ tpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tpc4"));
+ tnpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tnpc4"));
+ }
+ if(trap_level == 5){
+ tpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tpc5"));
+ tnpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tnpc5"));
+ }
+
+ if (!XACT_MEMORY) return;
+ TransactionInterfaceManager *xact_mgr = XACT_MGR;
+
+ int smt_thread_num = proc_no % RubyConfig::numberofSMTThreads();
+ // The simulated processor number
+ int sim_proc_no = proc_no / RubyConfig::numberofSMTThreads();
+
+ if (proc_no != SIMICS_current_processor_number()){
+ WARN_EXPR(proc_no);
+ WARN_EXPR(SIMICS_current_processor_number());
+ WARN_MSG("Callback for a different processor");
+ }
+
+ g_system_ptr->getProfiler()->profileExceptionDone(xact_mgr->getTransactionLevel(smt_thread_num) > 0, sim_proc_no, smt_thread_num, val, trap_level, pc, npc, tpc, tnpc);
+
+ if((val >= 0x80 && val <= 0x9f) || (val >= 0xc0 && val <= 0xdf)){
+ //xact_mgr->clearLoggedException(smt_thread_num);
+ }
+
+ if ((val == 0x122) && xact_mgr->shouldTrap(smt_thread_num)){
+ // use software handler
+ if (xact_mgr->shouldUseHardwareAbort(smt_thread_num)){
+ xact_mgr->hardwareAbort(smt_thread_num);
+ } else {
+ xact_mgr->trapToHandler(smt_thread_num);
+ }
+ }
+#endif
+}
+
+extern "C"
+void change_mode_callback(void* desc, void* cpu, integer_t old_mode, integer_t new_mode)
+{
+ assert(0);
+#if 0
+ if (XACT_ENABLE_VIRTUALIZATION_LOGTM_SE) {
+ SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
+ simics_interface_ptr->getHypervisor()->change_mode_callback(desc, cpu, old_mode, new_mode);
+ }
+#endif
+}
+
+extern "C"
+void dtlb_map_callback(void* desc, void* chmmu, integer_t tag_reg, integer_t data_reg){
+ assert(0);
+#if 0
+ if (XACT_ENABLE_VIRTUALIZATION_LOGTM_SE) {
+ SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
+ simics_interface_ptr->getHypervisor()->dtlb_map_callback(desc, chmmu, tag_reg, data_reg);
+ }
+#endif
+}
+
+extern "C"
+void dtlb_demap_callback(void* desc, void* chmmu, integer_t tag_reg, integer_t data_reg){
+ assert(0);
+#if 0
+ if (XACT_ENABLE_VIRTUALIZATION_LOGTM_SE) {
+ SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
+ simics_interface_ptr->getHypervisor()->dtlb_demap_callback(desc, chmmu, tag_reg, data_reg);
+ }
+#endif
+}
+
+extern "C"
+void dtlb_replace_callback(void* desc, void* chmmu, integer_t tag_reg, integer_t data_reg){
+ assert(0);
+#if 0
+ if (XACT_ENABLE_VIRTUALIZATION_LOGTM_SE) {
+ SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
+ simics_interface_ptr->getHypervisor()->dtlb_replace_callback(desc, chmmu, tag_reg, data_reg);
+ }
+#endif
+}
+
+extern "C"
+void dtlb_overwrite_callback(void* desc, void* chmmu, integer_t tag_reg, integer_t data_reg){
+ assert(0);
+#if 0
+ if (XACT_ENABLE_VIRTUALIZATION_LOGTM_SE) {
+ SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
+ simics_interface_ptr->getHypervisor()->dtlb_overwrite_callback(desc, chmmu, tag_reg, data_reg);
+ }
+#endif
+}
+
+extern "C"
+void core_control_register_write_callback(void* desc, void* cpu, integer_t register_number, integer_t value) {
+ assert(0);
+#if 0
+ int proc_no = SIM_get_proc_no((void*) cpu);
+ void* cpu_obj = (void*) cpu;
+#endif
+}
+
+integer_t
+read_reg(void *cpu, const char* reg_name)
+{
+ assert(0);
+#if 0
+ int reg_num = SIM_get_register_number(SIM_current_processor(), reg_name);
+ if (SIM_clear_exception()) {
+ fprintf(stderr, "read_reg: SIM_get_register_number(%s, %s) failed!\n",
+ cpu->name, reg_name);
+ assert(0);
+ }
+ integer_t val = SIM_read_register(cpu, reg_num);
+ if (SIM_clear_exception()) {
+ fprintf(stderr, "read_reg: SIM_read_register(%s, %d) failed!\n",
+ cpu->name, reg_num);
+ assert(0);
+ }
+ return val;
+#endif
+ return 0;
+}
+
+extern "C"
+void dump_registers(void *cpu)
+{
+ assert(0);
+#if 0
+ const char* reg_names[] = {
+ "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
+ "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
+ "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
+ "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
+ "ccr", "pc", "npc"
+ };
+
+ printf("Registers for %s\n", cpu->name);
+ printf("------------------\n");
+
+ for (int i = 0; i < (sizeof(reg_names) / sizeof(char*)); i++) {
+ const char* reg_name = reg_names[i];
+ printf(" %3s: 0x%016llx\n", reg_name, read_reg(cpu, reg_name));
+ if (i % 8 == 7) {
+ printf("\n");
+ }
+ }
+
+ int myID = SIMICS_get_proc_no(cpu);
+ Address myPC = SIMICS_get_program_counter(myID);
+ physical_address_t myPhysPC = SIMICS_translate_address(myID, myPC);
+ integer_t myInst = SIMICS_read_physical_memory(myID, myPhysPC, 4);
+ const char *myInstStr = SIMICS_disassemble_physical(myID, myPhysPC);
+ printf("\n *pc: 0x%llx: %s\n", myInst, myInstStr);
+
+ printf("\n\n");
+#endif
+}
diff --git a/src/mem/ruby/simics/commands.hh b/src/mem/ruby/simics/commands.hh
new file mode 100644
index 000000000..e7593c2c3
--- /dev/null
+++ b/src/mem/ruby/simics/commands.hh
@@ -0,0 +1,106 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ This file has been modified by Kevin Moore and Dan Nussbaum of the
+ Scalable Systems Research Group at Sun Microsystems Laboratories
+ (http://research.sun.com/scalable/) to support the Adaptive
+ Transactional Memory Test Platform (ATMTP).
+
+ Please send email to atmtp-interest@sun.com with feedback, questions, or
+ to request future announcements about ATMTP.
+
+ ----------------------------------------------------------------------
+
+ File modification date: 2008-02-23
+
+ ----------------------------------------------------------------------
+*/
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef COMMANDS_H
+#define COMMANDS_H
+
+#ifdef SPARC
+ #define MEMORY_TRANSACTION_TYPE void
+#else
+ #define MEMORY_TRANSACTION_TYPE void
+#endif
+
+int mh_memorytracer_possible_cache_miss(MEMORY_TRANSACTION_TYPE *mem_trans);
+void mh_memorytracer_observe_memory(MEMORY_TRANSACTION_TYPE *mem_trans);
+
+void magic_instruction_callback(void* desc, void * cpu, integer_t val);
+
+void ruby_change_debug_verbosity(char* new_verbosity_str);
+void ruby_change_debug_filter(char* new_filter_str);
+void ruby_set_debug_output_file (const char * new_filename);
+void ruby_set_debug_start_time(char* start_time_str);
+
+void ruby_clear_stats();
+void ruby_dump_stats(char* tag);
+void ruby_dump_short_stats(char* tag);
+
+void ruby_set_periodic_stats_file(char* filename);
+void ruby_set_periodic_stats_interval(int interval);
+
+void ruby_load_caches(char* name);
+void ruby_save_caches(char* name);
+
+void ruby_dump_cache(int cpuNumber);
+void ruby_dump_cache_data(int cpuNumber, char *tag);
+
+void ruby_set_tracer_output_file (const char * new_filename);
+void ruby_xact_visualizer_file (char * new_filename);
+
+void ctrl_exception_start(void* desc, void* cpu, integer_t val);
+void ctrl_exception_done(void* desc, void* cpu, integer_t val);
+
+void change_mode_callback(void* desc, void* cpu, integer_t old_mode, integer_t new_mode);
+void dtlb_map_callback(void* desc, void* chmmu, integer_t tag_reg, integer_t data_reg);
+void dtlb_demap_callback(void* desc, void* chmmu, integer_t tag_reg, integer_t data_reg);
+void dtlb_replace_callback(void* desc, void* chmmu, integer_t tag_reg, integer_t data_reg);
+void dtlb_overwrite_callback(void* desc, void* chmmu, integer_t tag_reg, integer_t data_reg);
+
+integer_t read_reg(void *cpu, const char* reg_name);
+void dump_registers(void *cpu);
+
+// Needed so that the ruby module will compile, but functions are
+// implemented in Rock.C.
+//
+void rock_exception_start(void* desc, void* cpu, integer_t val);
+void rock_exception_done(void* desc, void* cpu, integer_t val);
+
+#endif //COMMANDS_H
diff --git a/src/mem/ruby/simics/interface.cc b/src/mem/ruby/simics/interface.cc
new file mode 100644
index 000000000..92c30c23e
--- /dev/null
+++ b/src/mem/ruby/simics/interface.cc
@@ -0,0 +1,935 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: interface.C 1.39 05/01/19 13:12:31-06:00 mikem@maya.cs.wisc.edu $
+ *
+ */
+
+#include "Global.hh"
+#include "System.hh"
+#include "OpalInterface.hh"
+#include "EventQueue.hh"
+#include "mf_api.hh"
+#include "interface.hh"
+#include "Sequencer.hh"
+// #include "TransactionInterfaceManager.hh"
+
+#ifdef CONTIGUOUS_ADDRESSES
+#include "ContiguousAddressTranslator.hh"
+
+/* Also used in init.C, commands.C */
+ContiguousAddressTranslator * g_p_ca_translator = NULL;
+
+#endif // #ifdef CONTIGUOUS_ADDRESSES
+
+//////////////////////// Local helper functions //////////////////////
+
+// Callback when exception occur
+static void core_exception_callback(void *data, void *cpu,
+ integer_t exc)
+{
+ // SimicsDriver *simics_intf = dynamic_cast<SimicsDriver*>(g_system_ptr->getDriver());
+ // ASSERT( simics_intf );
+ // simics_intf->exceptionCallback(cpu, exc);
+ assert(0);
+}
+
+#ifdef SPARC
+// Callback when asi accesses occur
+// static exception_type_t core_asi_callback(void * cpu, generic_transaction_t *g)
+// {
+// SimicsDriver *simics_intf = dynamic_cast<SimicsDriver*>(g_system_ptr->getDriver());
+// assert( simics_intf );
+// return simics_intf->asiCallback(cpu, g);
+// }
+#endif
+
+static void runRubyEventQueue(void* obj, void* arg)
+{
+ Time time = g_eventQueue_ptr->getTime() + 1;
+ DEBUG_EXPR(NODE_COMP, HighPrio, time);
+ g_eventQueue_ptr->triggerEvents(time);
+// void* obj_ptr = (void*) SIM_proc_no_2_ptr(0); // Maurice
+// SIM_time_post_cycle(obj_ptr, SIMICS_RUBY_MULTIPLIER, Sim_Sync_Processor, &runRubyEventQueue, NULL); // Maurice
+ assert(0);
+}
+
+//////////////////////// Simics API functions //////////////////////
+
+int SIMICS_number_processors()
+{
+// return SIM_number_processors(); // Maurice
+ assert(0);
+ return 0;
+}
+
+void SIMICS_wakeup_ruby()
+{
+// void* obj_ptr = (void*) SIM_proc_no_2_ptr(0); // Maurice
+// SIM_time_post_cycle(obj_ptr, SIMICS_RUBY_MULTIPLIER, Sim_Sync_Processor, &runRubyEventQueue, NULL); // Maurice
+ assert(0);
+}
+
+// an analogue to wakeup ruby, this function ends the callbacks ruby normally
+// recieves from simics. (it removes ruby from simics's event queue). This
+// function should only be called when opal is installed. Opal advances ruby's
+// event queue independently of simics.
+void SIMICS_remove_ruby_callback( void )
+{
+// void* obj_ptr = (void*) SIM_proc_no_2_ptr(0); // Maurice
+// SIM_time_clean( obj_ptr, Sim_Sync_Processor, &runRubyEventQueue, NULL); // Maurice
+ assert(0);
+}
+
+// Install ruby as the timing model (analogous code exists in ruby/ruby.c)
+void SIMICS_install_timing_model( void )
+{
+// // void *phys_mem0 = SIM_get_object("phys_mem0"); // Maurice
+// attr_value_t val;
+// // val.kind = Sim_Val_String; // Maurice
+// val.u.string = "ruby0";
+// set_error_t install_error;
+//
+// if(phys_mem0==NULL) {
+// /* Look for "phys_mem" instead */
+// // SIM_clear_exception(); // Maurice
+// // phys_mem0 = SIM_get_object("phys_mem"); // Maurice
+// }
+//
+// if(phys_mem0==NULL) {
+// /* Okay, now panic... can't install ruby without a physical memory object */
+// WARN_MSG( "Cannot Install Ruby... no phys_mem0 or phys_mem object found" );
+// WARN_MSG( "Ruby is NOT installed." );
+// // SIM_clear_exception(); // Maurice
+// return;
+// }
+//
+// // install_error = SIM_set_attribute(phys_mem0, "timing_model", &val); // Maurice
+//
+// // if (install_error == Sim_Set_Ok) { // Maurice
+// WARN_MSG( "successful installation of the ruby timing model" );
+// } else {
+// WARN_MSG( "error installing ruby timing model" );
+// // WARN_MSG( SIM_last_error() ); // Maurice
+// }
+
+ assert(0);
+}
+
+// Removes ruby as the timing model interface
+void SIMICS_remove_timing_model( void )
+{
+// void *phys_mem0 = SIM_get_object("phys_mem0"); // Maurice
+// attr_value_t val;
+// memset( &val, 0, sizeof(attr_value_t) );
+// // val.kind = Sim_Val_Nil; // Maurice
+//
+// if(phys_mem0==NULL) {
+// /* Look for "phys_mem" instead */
+// // SIM_clear_exception(); // Maurice
+// // phys_mem0 = SIM_get_object("phys_mem"); // Maurice
+// }
+//
+// if(phys_mem0==NULL) {
+// /* Okay, now panic... can't uninstall ruby without a physical memory object */
+// WARN_MSG( "Cannot Uninstall Ruby... no phys_mem0 or phys_mem object found" );
+// WARN_MSG( "Uninstall NOT performed." );
+// // SIM_clear_exception(); // Maurice
+// return;
+// }
+//
+// // SIM_set_attribute(phys_mem0, "timing_model", &val); // Maurice
+ assert(0);
+}
+
+// Installs the (SimicsDriver) function to recieve the exeception callback
+void SIMICS_install_exception_callback( void )
+{
+ // install exception callback
+ // s_exception_hap_handle =
+// SIM_hap_add_callback("Core_Exception", // Maurice
+ // (obj_hap_func_t)core_exception_callback, NULL );
+ assert(0);
+}
+
+// removes the exception callback
+void SIMICS_remove_exception_callback( void )
+{
+ // uninstall exception callback
+// SIM_hap_delete_callback_id( "Core_Exception", // Maurice
+ // s_exception_hap_handle );
+ assert(0);
+}
+
+#ifdef SPARC
+// Installs the (SimicsDriver) function to recieve the asi callback
+void SIMICS_install_asi_callback( void )
+{
+// for(int i = 0; i < SIM_number_processors(); i++) { // Maurice
+ // sparc_v9_interface_t *v9_interface = (sparc_v9_interface_t *)
+// SIM_get_interface(SIM_proc_no_2_ptr(i), SPARC_V9_INTERFACE); // Maurice
+
+ // init asi callbacks, 16bit ASI
+ // for(int j = 0; j < MAX_ADDRESS_SPACE_ID; j++) {
+ // v9_interface->install_user_asi_handler(core_asi_callback, j);
+ // }
+ // }
+ assert(0);
+}
+
+// removes the asi callback
+void SIMICS_remove_asi_callback( void )
+{
+// for(int i = 0; i < SIM_number_processors(); i++) { // Maurice
+// sparc_v9_interface_t *v9_interface = (sparc_v9_interface_t *)
+// SIM_get_interface(SIM_proc_no_2_ptr(i), SPARC_V9_INTERFACE); // Maurice
+
+ // disable asi callback
+ // for(int j = 0; j < MAX_ADDRESS_SPACE_ID; j++) {
+ // v9_interface->remove_user_asi_handler(core_asi_callback, j);
+ // }
+ // }
+ assert(0);
+}
+#endif
+
+// Query simics for the presence of the opal object.
+// returns its interface if found, NULL otherwise
+mf_opal_api_t *SIMICS_get_opal_interface( void )
+{
+// void *opal = SIM_get_object("opal0"); // Maurice
+ //if (opal != NULL) {
+// mf_opal_api_t *opal_intf = (mf_opal_api_t *) SIM_get_interface( opal, "mf-opal-api" ); // Maurice
+ // if ( opal_intf != NULL ) {
+ // return opal_intf;
+// } else {
+// WARN_MSG("error: OpalInterface: opal does not implement mf-opal-api interface.\n");
+// return NULL;
+// }
+// }
+// SIM_clear_exception(); // Maurice
+ assert(0);
+ return NULL;
+}
+
+void * SIMICS_current_processor(){
+// return SIM_current_processor(); // Maurice
+ assert(0);
+ return NULL;
+}
+
+int SIMICS_current_processor_number()
+{
+// return (SIM_get_proc_no((processor_t *) SIM_current_processor())); // Maurice
+ assert(0);
+ return 0;
+}
+
+integer_t SIMICS_get_insn_count(int cpuNumber)
+{
+ // NOTE: we already pass in the logical cpuNumber (ie Simics simulated cpu number)
+ int num_smt_threads = RubyConfig::numberofSMTThreads();
+ integer_t total_insn = 0;
+// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
+// total_insn += SIM_step_count((void*) cpu); // Maurice
+ assert(0);
+ return total_insn;
+}
+
+integer_t SIMICS_get_cycle_count(int cpuNumber)
+{
+// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
+// integer_t result = SIM_cycle_count((void*) cpu); // Maurice
+ assert(0);
+ return 0;
+}
+
+void SIMICS_unstall_proc(int cpuNumber)
+{
+// void* proc_ptr = (void *) SIM_proc_no_2_ptr(cpuNumber); // Maurice
+// SIM_stall_cycle(proc_ptr, 0); // Maurice
+ assert(0);
+}
+
+void SIMICS_unstall_proc(int cpuNumber, int cycles)
+{
+// void* proc_ptr = (void *) SIM_proc_no_2_ptr(cpuNumber); // Maurice
+// SIM_stall_cycle(proc_ptr, cycles); // Maurice
+ assert(0);
+}
+
+void SIMICS_stall_proc(int cpuNumber, int cycles)
+{
+// void* proc_ptr = (void*) SIM_proc_no_2_ptr(cpuNumber); // Maurice
+// if (SIM_stalled_until(proc_ptr) != 0){ // Maurice
+// cout << cpuNumber << " Trying to stall. Stall Count currently at " << SIM_stalled_until(proc_ptr) << endl; // Maurice
+// }
+// SIM_stall_cycle(proc_ptr, cycles); // Maurice
+ assert(0);
+}
+
+void SIMICS_post_stall_proc(int cpuNumber, int cycles)
+{
+// void* proc_ptr = (void*) SIM_proc_no_2_ptr(cpuNumber); // Maurice
+// SIM_stacked_post(proc_ptr, ruby_stall_proc, (void *) cycles); // Maurice
+ assert(0);
+}
+
+integer_t SIMICS_read_physical_memory( int procID, physical_address_t address,
+ int len )
+{
+// // SIM_clear_exception(); // Maurice
+// ASSERT( len <= 8 );
+// #ifdef CONTIGUOUS_ADDRESSES
+// if(g_p_ca_translator != NULL) {
+// address = g_p_ca_translator->TranslateRubyToSimics( address );
+// }
+// #endif // #ifdef CONTIGUOUS_ADDRESSES
+// // integer_t result = SIM_read_phys_memory( SIM_proc_no_2_ptr(procID), // Maurice
+// // // address, len );
+// //
+// // // int isexcept = SIM_get_pending_exception(); // Maurice
+// // if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
+// // // sim_exception_t except_code = SIM_clear_exception(); // Maurice
+// // WARN_MSG( "SIMICS_read_physical_memory: raised exception." );
+// // // WARN_MSG( SIM_last_error() ); // Maurice
+// // WARN_MSG( Address(address) );
+// // WARN_MSG( procID );
+// // ASSERT(0);
+// // }
+// // return ( result );
+ assert(0);
+ return 0;
+}
+//
+// /*
+// * Read data into a buffer and assume the buffer is already allocated
+// */
+void SIMICS_read_physical_memory_buffer(int procID, physical_address_t addr,
+ char* buffer, int len ) {
+// // // processor_t* obj = SIM_proc_no_2_ptr(procID); // Maurice
+// //
+// // assert( obj != NULL);
+// // assert( buffer != NULL );
+// //
+// // #ifdef CONTIGUOUS_ADDRESSES
+// // if(g_p_ca_translator != NULL) {
+// // addr = g_p_ca_translator->TranslateRubyToSimics( addr );
+// // }
+// // #endif // #ifdef CONTIGUOUS_ADDRESSES
+// //
+// // int buffer_pos = 0;
+// // physical_address_t start = addr;
+// // do {
+// // int size = (len < 8)? len:8;
+// // // integer_t result = SIM_read_phys_memory( obj, start, size ); // Maurice
+// // // int isexcept = SIM_get_pending_exception(); // Maurice
+// // if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
+// // // sim_exception_t except_code = SIM_clear_exception(); // Maurice
+// // WARN_MSG( "SIMICS_read_physical_memory_buffer: raised exception." );
+// // // WARN_MSG( SIM_last_error() ); // Maurice
+// // WARN_MSG( addr );
+// // WARN_MSG( procID );
+// // ASSERT( 0 );
+// // }
+// //
+// // #ifdef SPARC
+// // // assume big endian (i.e. SPARC V9 target)
+// // for(int i = size-1; i >= 0; i--) {
+// // #else
+// // // assume little endian (i.e. x86 target)
+// // for(int i = 0; i<size; i++) {
+// // #endif
+// // buffer[buffer_pos++] = (char) ((result>>(i<<3))&0xff);
+// // }
+// //
+// // len -= size;
+// // start += size;
+// // } while(len != 0);
+ assert(0);
+}
+//
+void SIMICS_write_physical_memory( int procID, physical_address_t address,
+ integer_t value, int len )
+ {
+// // ASSERT( len <= 8 );
+// //
+// // // SIM_clear_exception(); // Maurice
+// //
+// // // processor_t* obj = SIM_proc_no_2_ptr(procID); // Maurice
+// //
+// // #ifdef CONTIGUOUS_ADDRESSES
+// // if(g_p_ca_translator != NULL) {
+// // address = g_p_ca_translator->TranslateRubyToSimics( address );
+// // }
+// // #endif // #ifdef CONTIGUOUS_ADDRESSES
+// //
+// // // int isexcept = SIM_get_pending_exception(); // Maurice
+// // if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
+// // // sim_exception_t except_code = SIM_clear_exception(); // Maurice
+// // WARN_MSG( "SIMICS_write_physical_memory 1: raised exception." );
+// // // WARN_MSG( SIM_last_error() ); // Maurice
+// // WARN_MSG( address );
+// // }
+// //
+// // // SIM_write_phys_memory(obj, address, value, len ); // Maurice
+// //
+// // // isexcept = SIM_get_pending_exception(); // Maurice
+// // if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
+// // // sim_exception_t except_code = SIM_clear_exception(); // Maurice
+// // WARN_MSG( "SIMICS_write_physical_memory 2: raised exception." );
+// // // WARN_MSG( SIM_last_error() ); // Maurice
+// // WARN_MSG( address );
+// // }
+ assert(0);
+}
+//
+// /*
+// * write data to simics memory from a buffer (assumes the buffer is valid)
+// */
+void SIMICS_write_physical_memory_buffer(int procID, physical_address_t addr,
+ char* buffer, int len ) {
+// // processor_t* obj = SIM_proc_no_2_ptr(procID); // Maurice
+//
+// assert( obj != NULL);
+// assert( buffer != NULL );
+//
+// #ifdef CONTIGUOUS_ADDRESSES
+// if(g_p_ca_translator != NULL) {
+// addr = g_p_ca_translator->TranslateRubyToSimics( addr );
+// }
+// #endif // #ifdef CONTIGUOUS_ADDRESSES
+//
+// int buffer_pos = 0;
+// physical_address_t start = addr;
+// do {
+// int size = (len < 8)? len:8;
+// // //integer_t result = SIM_read_phys_memory( obj, start, size ); // Maurice
+// integer_t value = 0;
+// #ifdef SPARC
+// // assume big endian (i.e. SPARC V9 target)
+// for(int i = size-1; i >= 0; i--) {
+// #else
+// // assume little endian (i.e. x86 target)
+// for(int i = 0; i<size; i++) {
+// #endif
+// integer_t mask = buffer[buffer_pos++];
+// value |= ((mask)<<(i<<3));
+// }
+//
+//
+// // SIM_write_phys_memory( obj, start, value, size); // Maurice
+// // int isexcept = SIM_get_pending_exception(); // Maurice
+// if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
+// // sim_exception_t except_code = SIM_clear_exception(); // Maurice
+// WARN_MSG( "SIMICS_write_physical_memory_buffer: raised exception." );
+// // WARN_MSG( SIM_last_error() ); // Maurice
+// WARN_MSG( addr );
+// }
+//
+// len -= size;
+// start += size;
+// } while(len != 0);
+ assert(0);
+}
+
+bool SIMICS_check_memory_value(int procID, physical_address_t addr,
+ char* buffer, int len) {
+ char buf[len];
+ SIMICS_read_physical_memory_buffer(procID, addr, buf, len);
+ assert(0);
+ return (memcmp(buffer, buf, len) == 0)? true:false;
+}
+
+physical_address_t SIMICS_translate_address( int procID, Address address ) {
+// SIM_clear_exception(); // Maurice
+// physical_address_t physical_addr = SIM_logical_to_physical(SIM_proc_no_2_ptr(procID), Sim_DI_Instruction, address.getAddress() ); // Maurice
+// int isexcept = SIM_get_pending_exception(); // Maurice
+// if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
+// sim_exception_t except_code = SIM_clear_exception(); // Maurice
+// /*
+// WARN_MSG( "SIMICS_translate_address: raised exception." );
+// WARN_MSG( procID );
+// WARN_MSG( address );
+// // WARN_MSG( SIM_last_error() ); // Maurice
+// */
+// return 0;
+// }
+//
+// #ifdef CONTIGUOUS_ADDRESSES
+// if(g_p_ca_translator != NULL) {
+// physical_addr = g_p_ca_translator->TranslateSimicsToRuby( physical_addr );
+// }
+// #endif // #ifdef CONTIGUOUS_ADDRESSES
+//
+// return physical_addr;
+ assert(0);
+ return 0;
+}
+
+physical_address_t SIMICS_translate_data_address( int procID, Address address ) {
+// SIM_clear_exception(); // Maurice
+// physical_address_t physical_addr = SIM_logical_to_physical(SIM_proc_no_2_ptr(procID), Sim_DI_Data, address.getAddress() ); // Maurice
+// int isexcept = SIM_get_pending_exception(); // Maurice
+// if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
+// sim_exception_t except_code = SIM_clear_exception(); // Maurice
+ /*
+ WARN_MSG( "SIMICS_translate_data_address: raised exception." );
+ WARN_MSG( procID );
+ WARN_MSG( address );
+// WARN_MSG( SIM_last_error() ); // Maurice
+ */
+// }
+// return physical_addr;
+ assert(0);
+ return 0;
+}
+
+#ifdef SPARC
+bool SIMICS_is_ldda(const memory_transaction_t *mem_trans) {
+// void *cpu = mem_trans->s.ini_ptr;
+// int proc= SIMICS_get_proc_no(cpu);
+// Address addr = SIMICS_get_program_counter(cpu);
+// physical_address_t phys_addr = SIMICS_translate_address( proc, addr );
+// uint32 instr= SIMICS_read_physical_memory( proc, phys_addr, 4 );
+//
+// // determine if this is a "ldda" instruction (non-exclusive atomic)
+// // ldda bit mask: 1100 0001 1111 1000 == 0xc1f80000
+// // ldda match : 1100 0000 1001 1000 == 0xc0980000
+// if ( (instr & 0xc1f80000) == 0xc0980000 ) {
+// // should exactly be ldda instructions
+// ASSERT(!strncmp(SIMICS_disassemble_physical(proc, phys_addr), "ldda", 4));
+// //cout << "SIMICS_is_ldda END" << endl;
+// return true;
+// }
+// return false;
+ assert(0);
+ return false;
+}
+#endif
+
+const char *SIMICS_disassemble_physical( int procID, physical_address_t pa ) {
+//#ifdef CONTIGUOUS_ADDRESSES
+// if(g_p_ca_translator != NULL) {
+// pa = g_p_ca_translator->TranslateRubyToSimics( pa );
+// }
+//#endif // #ifdef CONTIGUOUS_ADDRESSES
+// return SIM_disassemble( SIM_proc_no_2_ptr(procID), pa , /* physical */ 0)->string; // Maurice
+ assert(0);
+ return "There is no spoon";
+}
+
+Address SIMICS_get_program_counter(void *cpu) {
+ assert(cpu != NULL);
+// return Address(SIM_get_program_counter((processor_t *) cpu)); // Maurice
+ assert(0);
+ return Address(0);
+}
+
+Address SIMICS_get_npc(int procID) {
+// void *cpu = SIM_proc_no_2_ptr(procID); // Maurice
+// return Address(SIM_read_register(cpu, SIM_get_register_number(cpu, "npc"))); // Maurice
+ assert(0);
+ return Address(0);
+}
+
+Address SIMICS_get_program_counter(int procID) {
+// void *cpu = SIM_proc_no_2_ptr(procID); // Maurice
+// assert(cpu != NULL);
+
+// Address addr = Address(SIM_get_program_counter(cpu)); // Maurice
+ assert(0);
+ return Address(0);
+}
+
+// /* NOTE: SIM_set_program_counter sets NPC to PC+4 */ // Maurice
+void SIMICS_set_program_counter(int procID, Address newPC) {
+// void *cpu = SIM_proc_no_2_ptr(procID); // Maurice
+// assert(cpu != NULL);
+
+// SIM_stacked_post(cpu, ruby_set_program_counter, (void*) newPC.getAddress()); // Maurice
+ assert(0);
+}
+
+void SIMICS_set_pc(int procID, Address newPC) {
+ // IMPORTANT: procID is the SIMICS simulated proc number (takes into account SMT)
+// void *cpu = SIM_proc_no_2_ptr(procID); // Maurice
+// assert(cpu != NULL);
+//
+// if(OpalInterface::isOpalLoaded() == false){
+// // SIM_set_program_counter(cpu, newPC.getAddress()); // Maurice
+// } else {
+// // explicitly change PC
+// ruby_set_pc( cpu, (void *) newPC.getAddress() );
+// }
+// // int isexcept = SIM_get_pending_exception(); // Maurice
+// if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
+// // sim_exception_t except_code = SIM_clear_exception(); // Maurice
+// WARN_MSG( "SIMICS_set_pc: raised exception." );
+// // WARN_MSG( SIM_last_error() ); // Maurice
+// ASSERT(0);
+// }
+ assert(0);
+}
+
+void SIMICS_set_next_program_counter(int procID, Address newNPC) {
+// void *cpu = SIM_proc_no_2_ptr(procID); // Maurice
+// assert(cpu != NULL);
+
+// SIM_stacked_post(cpu, ruby_set_npc, (void*) newNPC.getAddress()); // Maurice
+ assert(0);
+}
+
+void SIMICS_set_npc(int procID, Address newNPC) {
+// void *cpu = SIM_proc_no_2_ptr(procID); // Maurice
+// assert(cpu != NULL);
+//
+// if(OpalInterface::isOpalLoaded() == false){
+// // SIM_write_register(cpu, SIM_get_register_number(cpu, "npc"), newNPC.getAddress()); // Maurice
+// } else {
+// // explicitly change NPC
+// ruby_set_npc( cpu, (void *) newNPC.getAddress() );
+// }
+//
+// // int isexcept = SIM_get_pending_exception(); // Maurice
+// if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
+// // sim_exception_t except_code = SIM_clear_exception(); // Maurice
+// WARN_MSG( "SIMICS_set_npc: raised exception " );
+// // WARN_MSG( SIM_last_error() ); // Maurice
+// ASSERT(0);
+// }
+ assert(0);
+}
+
+void SIMICS_post_continue_execution(int procID){
+// void *cpu = SIM_proc_no_2_ptr(procID); // Maurice
+// assert(cpu != NULL);
+//
+// if(OpalInterface::isOpalLoaded() == false){
+// // SIM_stacked_post(cpu, ruby_continue_execution, (void *) NULL); // Maurice
+// } else{
+// ruby_continue_execution( cpu, (void *) NULL );
+// }
+ assert(0);
+}
+
+void SIMICS_post_restart_transaction(int procID){
+// void *cpu = SIM_proc_no_2_ptr(procID); // Maurice
+// assert(cpu != NULL);
+//
+// if(OpalInterface::isOpalLoaded() == false){
+// // SIM_stacked_post(cpu, ruby_restart_transaction, (void *) NULL); // Maurice
+// } else{
+// ruby_restart_transaction( cpu, (void *) NULL );
+// }
+ assert(0);
+}
+
+// return -1 when fail
+int SIMICS_get_proc_no(void *cpu) {
+// int proc_no = SIM_get_proc_no((processor_t *) cpu); // Maurice
+// return proc_no;
+ assert(0);
+ return -1;
+}
+
+void SIMICS_disable_processor( int cpuNumber ) {
+// if(SIM_cpu_enabled(SIMICS_get_proc_ptr(cpuNumber))) { // Maurice
+// SIM_disable_processor(SIMICS_get_proc_ptr(cpuNumber)); // Maurice
+// } else {
+// WARN_MSG(cpuNumber);
+// WARN_MSG( "Tried to disable a 'disabled' processor");
+// ASSERT(0);
+// }
+ assert(0);
+}
+
+void SIMICS_post_disable_processor( int cpuNumber ) {
+// SIM_stacked_post(SIMICS_get_proc_ptr(cpuNumber), ruby_disable_processor, (void*) NULL); // Maurice
+ assert(0);
+}
+
+void SIMICS_enable_processor( int cpuNumber ) {
+// if(!SIM_cpu_enabled(SIMICS_get_proc_ptr(cpuNumber))) { // Maurice
+// SIM_enable_processor(SIMICS_get_proc_ptr(cpuNumber)); // Maurice
+// } else {
+// WARN_MSG(cpuNumber);
+// WARN_MSG( "Tried to enable an 'enabled' processor");
+// }
+ assert(0);
+}
+
+bool SIMICS_processor_enabled( int cpuNumber ) {
+// return SIM_cpu_enabled(SIMICS_get_proc_ptr(cpuNumber)); // Maurice
+ assert(0);
+ return false;
+}
+
+// return NULL when fail
+void* SIMICS_get_proc_ptr(int cpuNumber) {
+// return (void *) SIM_proc_no_2_ptr(cpuNumber); // Maurice
+ assert(0);
+ return NULL;
+}
+
+void SIMICS_print_version(ostream& out) {
+// const char* version = SIM_version(); // Maurice
+// if (version != NULL) {
+// out << "simics_version: " << SIM_version() << endl; // Maurice
+// }
+ out << "Mwa ha ha this is not Simics!!";
+}
+
+// KM -- From Nikhil's SN code
+//these functions should be in interface.C ??
+
+uinteger_t SIMICS_read_control_register(int cpuNumber, int registerNumber)
+{
+// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
+// uinteger_t result = SIM_read_register(cpu, registerNumber); // Maurice
+// return result;
+ assert(0);
+ return 0;
+}
+
+uinteger_t SIMICS_read_window_register(int cpuNumber, int window, int registerNumber)
+{
+// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
+// uinteger_t result = SIM_read_register(cpu, registerNumber); // Maurice
+// return result;
+ assert(0);
+ return 0;
+}
+
+uinteger_t SIMICS_read_global_register(int cpuNumber, int globals, int registerNumber)
+{
+// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
+// uinteger_t result = SIM_read_register(cpu, registerNumber); // Maurice
+// return result;
+ assert(0);
+ return 0;
+}
+
+/**
+ uint64 SIMICS_read_fp_register_x(int cpuNumber, int registerNumber)
+ {
+// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
+// return SIM_read_fp_register_x(cpu, registerNumber); // Maurice
+ }
+**/
+
+void SIMICS_write_control_register(int cpuNumber, int registerNumber, uinteger_t value)
+{
+// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
+// SIM_write_register(cpu, registerNumber, value); // Maurice
+ assert(0);
+}
+
+void SIMICS_write_window_register(int cpuNumber, int window, int registerNumber, uinteger_t value)
+{
+// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
+// SIM_write_register(cpu, registerNumber, value); // Maurice
+ assert(0);
+}
+
+void SIMICS_write_global_register(int cpuNumber, int globals, int registerNumber, uinteger_t value)
+{
+// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
+// SIM_write_register(cpu, registerNumber, value); // Maurice
+ assert(0);
+}
+
+/***
+ void SIMICS_write_fp_register_x(int cpuNumber, int registerNumber, uint64 value)
+ {
+// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
+// SIM_write_fp_register_x(cpu, registerNumber, value); // Maurice
+ }
+***/
+
+// KM -- Functions using new APIs (update from Nikhil's original)
+
+int SIMICS_get_register_number(int cpuNumber, const char * reg_name){
+// int result = SIM_get_register_number(SIM_proc_no_2_ptr(cpuNumber), reg_name); // Maurice
+// return result;
+ assert(0);
+ return 0;
+}
+
+const char * SIMICS_get_register_name(int cpuNumber, int reg_num){
+// const char * result = SIM_get_register_name(SIM_proc_no_2_ptr(cpuNumber), reg_num); // Maurice
+// return result;
+ assert(0);
+ return "Then we shall fight in the shade";
+}
+
+uinteger_t SIMICS_read_register(int cpuNumber, int registerNumber)
+{
+// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
+// uinteger_t result = SIM_read_register(cpu, registerNumber); // Maurice
+// return result;
+ assert(0);
+ return 0;
+}
+
+void SIMICS_write_register(int cpuNumber, int registerNumber, uinteger_t value)
+{
+// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
+// SIM_write_register(cpu, registerNumber, value); // Maurice
+ assert(0);
+}
+
+// This version is called whenever we are about to jump to the SW handler
+void ruby_set_pc(void *cpu, void *parameter){
+// physical_address_t paddr;
+// paddr = (physical_address_t) parameter;
+// // Simics' processor number
+// // int proc_no = SIM_get_proc_no((processor_t *) cpu); // Maurice
+// int smt_proc_no = proc_no / RubyConfig::numberofSMTThreads();
+// // SIM_set_program_counter(cpu, paddr); // Maurice
+// //cout << "ruby_set_pc setting cpu[ " << proc_no << " ] smt_cpu[ " << smt_proc_no << " ] PC[ " << hex << paddr << " ]" << dec << endl;
+// // physical_address_t newpc = SIM_get_program_counter(cpu); // Maurice
+// // int pc_reg = SIM_get_register_number(cpu, "pc"); // Maurice
+// // int npc_reg = SIM_get_register_number( cpu, "npc"); // Maurice
+// // uinteger_t pc = SIM_read_register(cpu, pc_reg); // Maurice
+// // uinteger_t npc = SIM_read_register(cpu, npc_reg); // Maurice
+// //cout << "NEW PC[ 0x" << hex << newpc << " ]" << " PC REG[ 0x" << pc << " ] NPC REG[ 0x" << npc << " ]" << dec << endl;
+//
+// if(XACT_MEMORY){
+// if( !OpalInterface::isOpalLoaded() ){
+// // using SimicsDriver
+// ASSERT( proc_no == smt_proc_no );
+// SimicsDriver *simics_intf = dynamic_cast<SimicsDriver*>(g_system_ptr->getDriver());
+// ASSERT( simics_intf );
+// simics_intf->notifyTrapStart( proc_no, Address(paddr), 0 /*dummy threadID*/, 0 /* Simics uses 1 thread */ );
+// }
+// else{
+// // notify Opal about changing pc to SW handler
+// //cout << "informing Opal via notifyTrapStart proc = " << proc_no << endl;
+// //g_system_ptr->getSequencer(smt_proc_no)->notifyTrapStart( proc_no, Address(paddr) );
+// }
+//
+// if (XACT_DEBUG && XACT_DEBUG_LEVEL > 1){
+// cout << g_eventQueue_ptr->getTime() << " " << proc_no
+// << " ruby_set_pc PC: " << hex
+// // << SIM_get_program_counter(cpu) << // Maurice
+// // " NPC is: " << hex << SIM_read_register(cpu, 33) << " pc_val: " << paddr << dec << endl; // Maurice
+// }
+// }
+ assert(0);
+}
+
+// This version is called whenever we are about to return from SW handler
+void ruby_set_program_counter(void *cpu, void *parameter){
+// physical_address_t paddr;
+// paddr = (physical_address_t) parameter;
+// // Simics' processor number
+//// int proc_no = SIM_get_proc_no((processor_t *) cpu); // Maurice
+// // SMT proc number
+// int smt_proc_no = proc_no / RubyConfig::numberofSMTThreads();
+//
+//// // SIM_set_program_counter() also sets the NPC to PC+4. // Maurice
+// // Need to ensure that NPC doesn't change especially for PCs in the branch delay slot
+//// uinteger_t npc_val = SIM_read_register(cpu, SIM_get_register_number(cpu, "npc")); // Maurice
+//// SIM_set_program_counter(cpu, paddr); // Maurice
+//// SIM_write_register(cpu, SIM_get_register_number(cpu, "npc"), npc_val); // Maurice
+//
+// //LUKE - notify Opal of PC change (ie end of all register updates and abort complete)
+// // I moved the register checkpoint restoration to here also, to jointly update the PC and the registers at the same time
+// if(XACT_MEMORY){
+// if( !OpalInterface::isOpalLoaded() ){
+// //using SimicsDriver
+// //we should only be running with 1 thread with Simics
+// ASSERT( proc_no == smt_proc_no );
+// SimicsDriver *simics_intf = dynamic_cast<SimicsDriver*>(g_system_ptr->getDriver());
+// ASSERT( simics_intf );
+// simics_intf->notifyTrapComplete(proc_no, Address( paddr ), 0 /* Simics uses 1 thread */ );
+// }
+// else{
+// //using OpalInterface
+// // g_system_ptr->getSequencer(smt_proc_no)->notifyTrapComplete( proc_no, Address(paddr) );
+// }
+// }
+// if (XACT_DEBUG && XACT_DEBUG_LEVEL > 1){
+// cout << g_eventQueue_ptr->getTime() << " " << proc_no
+// << " ruby_set_program_counter PC: " << hex
+//// << SIM_get_program_counter(cpu) << // Maurice
+//// " NPC is: " << hex << SIM_read_register(cpu, 33) << " pc_val: " << paddr << " npc_val: " << npc_val << dec << endl; // Maurice
+// }
+ assert(0);
+}
+
+void ruby_set_npc(void *cpu, void *parameter){
+// physical_address_t paddr;
+// paddr = (physical_address_t) parameter;
+// // int proc_no = SIM_get_proc_no((processor_t *) cpu); // Maurice
+// // SMT proc number
+// int smt_proc_no = proc_no / RubyConfig::numberofSMTThreads();
+//
+// // SIM_write_register(cpu, SIM_get_register_number(cpu, "npc"), paddr); // Maurice
+// if (XACT_DEBUG && XACT_DEBUG_LEVEL > 1){
+// cout << g_eventQueue_ptr->getTime() << " " << proc_no
+// << " ruby_set_npc val: " << hex << paddr << " PC: " << hex
+// // << SIM_get_program_counter(cpu) << // Maurice
+// // " NPC is: " << hex << SIM_read_register(cpu, 33) << dec << endl; // Maurice
+// }
+ assert(0);
+}
+
+void ruby_continue_execution(void *cpu, void *parameter){
+// int logical_proc_no = SIM_get_proc_no((processor_t *) cpu); // Maurice
+// int thread = logical_proc_no % RubyConfig::numberofSMTThreads();
+// int proc_no = logical_proc_no / RubyConfig::numberofSMTThreads();
+// g_system_ptr->getTransactionInterfaceManager(proc_no)->continueExecutionCallback(thread);
+ assert(0);
+}
+
+void ruby_restart_transaction(void *cpu, void *parameter){
+// int logical_proc_no = SIM_get_proc_no((processor_t *) cpu); // Maurice
+// int thread = logical_proc_no % RubyConfig::numberofSMTThreads();
+// int proc_no = logical_proc_no / RubyConfig::numberofSMTThreads();
+// g_system_ptr->getTransactionInterfaceManager(proc_no)->restartTransactionCallback(thread);
+ assert(0);
+}
+
+void ruby_stall_proc(void *cpu, void *parameter){
+// int logical_proc_no = SIM_get_proc_no((processor_t*)cpu); // Maurice
+// int cycles = (uint64)parameter;
+
+// SIMICS_stall_proc(logical_proc_no, cycles);
+ assert(0);
+}
+
+void ruby_disable_processor(void *cpu, void *parameter){
+// int logical_proc_no = SIM_get_proc_no((processor_t*)cpu); // Maurice
+// SIMICS_disable_processor(logical_proc_no);
+ assert(0);
+}
+
diff --git a/src/mem/ruby/simics/interface.hh b/src/mem/ruby/simics/interface.hh
new file mode 100644
index 000000000..f8d9375d7
--- /dev/null
+++ b/src/mem/ruby/simics/interface.hh
@@ -0,0 +1,152 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: interface.h 1.33 05/01/19 13:12:32-06:00 mikem@maya.cs.wisc.edu $
+ *
+ * Description:
+ *
+ */
+
+#ifndef INTERFACE_H
+#define INTERFACE_H
+
+#include "Global.hh"
+#include "mf_api.hh"
+#include "Address.hh"
+
+// // Simics includes
+// extern "C" {
+// #include "simics/api.hh"
+// }
+
+typedef void memory_transaction_t;
+
+// simics memory access
+integer_t SIMICS_read_physical_memory(int procID, physical_address_t address,
+ int len );
+void SIMICS_read_physical_memory_buffer(int procID, physical_address_t addr,
+ char* buffer, int len );
+void SIMICS_write_physical_memory( int procID, physical_address_t address,
+ integer_t value, int len );
+void SIMICS_write_physical_memory_buffer(int procID, physical_address_t addr,
+ char* buffer, int len );
+bool SIMICS_check_memory_value(int procID, physical_address_t addr,
+ char* buffer, int len);
+const char *SIMICS_disassemble_physical( int procID, physical_address_t pa );
+
+// simics VM translation, decoding, etc.
+physical_address_t SIMICS_translate_address( int procID, Address address );
+physical_address_t SIMICS_translate_data_address( int procID, Address address );
+#ifdef SPARC
+bool SIMICS_is_ldda(const memory_transaction_t *mem_trans);
+#endif
+
+// simics timing
+void SIMICS_unstall_proc(int cpuNumber);
+void SIMICS_unstall_proc(int cpuNumber, int cycles);
+void SIMICS_stall_proc(int cpuNumber, int cycles);
+void SIMICS_post_stall_proc(int cpuNumber, int cycles);
+void SIMICS_wakeup_ruby();
+
+// simics callbacks
+void SIMICS_remove_ruby_callback( void );
+void SIMICS_install_timing_model( void );
+void SIMICS_remove_timing_model( void );
+void SIMICS_install_exception_callback( void );
+void SIMICS_remove_exception_callback( void );
+#ifdef SPARC
+void SIMICS_install_asi_callback( void );
+void SIMICS_remove_asi_callback( void );
+#endif
+
+// simics PC, IC
+integer_t SIMICS_get_insn_count( int cpuNumber );
+integer_t SIMICS_get_cycle_count(int cpuNumber);
+Address SIMICS_get_program_counter( void *cpu );
+Address SIMICS_get_program_counter( int procID );
+Address SIMICS_get_npc(int procID);
+void SIMICS_set_program_counter( int procID, Address newPC );
+void SIMICS_set_next_program_counter( int procID, Address newPC );
+void SIMICS_set_pc( int procID, Address newPC );
+void SIMICS_set_npc( int procID, Address newNPC );
+
+void SIMICS_post_continue_execution(int procID);
+void SIMICS_post_restart_transaction(int procID);
+
+// simics processor number
+int SIMICS_number_processors( void );
+void * SIMICS_current_processor( void );
+int SIMICS_current_processor_number( void );
+int SIMICS_get_proc_no( void *cpu );
+void* SIMICS_get_proc_ptr( int cpuNumber );
+
+// simics version
+void SIMICS_print_version(ostream& out);
+
+// opal
+mf_opal_api_t *SIMICS_get_opal_interface( void );
+
+// STC related, should not be used anymore!
+void SIMICS_flush_STC(int cpuNumber);
+void SIMICS_invalidate_from_STC(const Address& address, int cpuNumber);
+void SIMICS_downgrade_from_STC(const Address& address, int cpuNumber);
+
+// KM -- from Nikhil's SN code
+uinteger_t SIMICS_read_control_register(int cpuNumber, int registerNumber);
+uinteger_t SIMICS_read_window_register(int cpuNumber, int window, int registerNumber);
+uinteger_t SIMICS_read_global_register(int cpuNumber, int globals, int registerNumber);
+//uint64 SIMICS_read_fp_register_x(int cpuNumber, int registerNumber);
+
+// KM -- new version based on reg names
+int SIMICS_get_register_number(int cpuNumber, const char * reg_name);
+const char * SIMICS_get_register_name(int cpuNumber, int reg_num);
+uinteger_t SIMICS_read_register(int cpuNumber, int registerNumber);
+void SIMICS_write_register(int cpuNumber, int registerNumber, uinteger_t value);
+
+void SIMICS_write_control_register(int cpuNumber, int registerNumber, uinteger_t value);
+void SIMICS_write_window_register(int cpuNumber, int window, int registerNumber, uinteger_t value);
+void SIMICS_write_global_register(int cpuNumber, int globals, int registerNumber, uinteger_t value);
+void SIMICS_write_fp_register_x(int cpuNumber, int registerNumber, uint64 value);
+void SIMICS_enable_processor(int cpuNumber);
+void SIMICS_disable_processor(int cpuNumber);
+void SIMICS_post_disable_processor(int cpuNumber);
+bool SIMICS_processor_enabled(int cpuNumber);
+
+void ruby_abort_transaction(void *cpu, void *parameter);
+void ruby_set_program_counter(void *cpu, void *parameter);
+void ruby_set_pc(void *cpu, void *parameter);
+void ruby_set_npc(void *cpu, void *parameter);
+void ruby_continue_execution(void *cpu, void *parameter);
+void ruby_restart_transaction(void *cpu, void *parameter);
+void ruby_stall_proc(void *cpu, void *parameter);
+void ruby_disable_processor(void *cpu, void *parameter);
+
+#endif //INTERFACE_H
+
diff --git a/src/mem/ruby/simics/simics_api_dummy.c b/src/mem/ruby/simics/simics_api_dummy.c
new file mode 100644
index 000000000..e444b783c
--- /dev/null
+++ b/src/mem/ruby/simics/simics_api_dummy.c
@@ -0,0 +1,105 @@
+#include <assert.h>
+
+extern "C" {
+
+typedef int generic_transaction_t;
+typedef int generic_transaction;
+typedef int la_t;
+typedef int integer_t;
+typedef int uint64;
+typedef int attr_value_t;
+typedef int data_or_instr_t;
+typedef int sim_exception_t;
+typedef int processor_t;
+typedef int conf_object_t;
+typedef int conf_object;
+typedef int physical_address_t;
+typedef int logical_address_t;
+typedef int read_or_write_t;
+typedef int interface_t;
+typedef int set_error_t;
+typedef int ireg_t;
+typedef int pc_step_t;
+typedef int event_handler_t;
+typedef int lang_void;
+typedef int cycles_t;
+typedef int sync_t;
+typedef int FILE;
+typedef int va_list;
+typedef int log_object;
+typedef int hap_handle_t;
+typedef int str_hap_func_t;
+typedef int hap_type_t;
+typedef int cb_func_em_t;
+typedef int sync_t;
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SIM_number_processors() { assert(0); return; };
+void SIM_c_set_mem_op_value_buf(generic_transaction_t *mem_op, char *buf) { assert(0); return; };
+void SIM_c_get_mem_op_value_buf(generic_transaction_t *mem_op, char *buf) { assert(0); return; };
+sim_exception_t SIM_clear_exception(void) { assert(0); return 0; };
+processor_t *SIM_conf_object_to_processor(conf_object_t* obj) { assert(0); return 0; };
+processor_t *SIM_current_processor(void) { assert(0); return 0; };
+const char *SIM_disassemble(processor_t *cpu_ptr, physical_address_t pa, int type) { assert(0); return 0; };
+interface_t *SIM_get_interface(conf_object const *object, const char *interface_name) { assert(0); return 0; };
+conf_object_t *SIM_get_object(const char *name) { assert(0); return 0; };
+sim_exception_t SIM_get_pending_exception(void) { assert(0); return 0; };
+int SIM_get_proc_no(const processor_t *cpu_ptr) { assert(0); return 0; };
+la_t SIM_get_program_counter(processor_t *cpu) { assert(0); return 0; };
+const char *SIM_last_error(void) { assert(0); return 0; };
+physical_address_t SIM_logical_to_physical(conf_object *cpu_ptr, data_or_instr_t data_or_instr, logical_address_t address) { assert(0); return 0; };
+const char * SIM_get_exception_name( processor_t * p, int exc ) { assert(0); return 0;};
+processor_t *SIM_proc_no_2_ptr(int cpu_nr) { assert(0); return 0; };
+conf_object_t *SIM_processor_to_conf_object(processor_t* p) { assert(0); return 0; };
+ireg_t SIM_read_control_register(processor_t *cpu_ptr, int reg) { assert(0); return 0; };
+double SIM_read_fp_register_d(processor_t *cpu_ptr, int reg) { assert(0); return 0; };
+uint64 SIM_read_fp_register_x(processor_t *cpu_ptr, int reg) { assert(0); return 0; };
+ireg_t SIM_read_global_register(processor_t *cpu_ptr, int globals, int reg) { assert(0); return 0; };
+integer_t SIM_read_phys_memory(conf_object *cpu, physical_address_t address, int len) { assert(0); return 0; };
+ireg_t SIM_read_window_register(processor_t *cpu_ptr, int window, int reg) { assert(0); return 0; };
+set_error_t SIM_set_attribute(conf_object_t *object, char const *name, attr_value_t *value) { assert(0); return 0; };
+void SIM_free_attribute(attr_value_t *value) { assert(0); };
+void SIM_stall_cycle(conf_object_t *obj, cycles_t stall) { assert(0); return; };
+cycles_t SIM_stall_count(conf_object_t *obj) { assert(0); return 0; };
+void SIM_stall(conf_object_t *obj, cycles_t stall) { assert(0); return; };
+pc_step_t SIM_step_count(conf_object_t *p) { assert(0); return 0; };
+cycles_t SIM_cycle_count(conf_object_t *p) { assert(0); return 0; };
+cycles_t SIM_stalled_until(conf_object_t *p) { assert(0); return 0; };
+void SIM_time_clean(conf_object_t *obj, sync_t t, event_handler_t handler, lang_void * arg) { assert(0); return; };
+void SIM_time_post_cycle(conf_object_t * obj, cycles_t delta, sync_t sync, event_handler_t handler, lang_void * arg) { assert(0); return; };
+const char *SIM_version(void) { return 0; };
+void SIM_set_program_counter(conf_object_t *cpu, logical_address_t pc){assert(0);};
+void SIM_write_control_register(processor_t *cpu_ptr, int reg, ireg_t value) { assert(0); return; };
+void SIM_write_fp_register_x(processor_t *cpu_ptr, int reg, uint64 value) { assert(0); return; };
+void SIM_write_global_register(processor_t *cpu_ptr, int globals, int reg, ireg_t value) { assert(0); return; };
+void SIM_write_window_register(processor_t *cpu_ptr, int window, int reg, ireg_t value) { assert(0); return; };
+void SIM_write_phys_memory(conf_object *cpu, physical_address_t address, integer_t value, int len) { assert(0); };
+int __sparc_v9_vtvfprintf(FILE *stream, const char *format, va_list va) { assert(0); return 0; };
+int __l32_p32_vtvfprintf(FILE *stream, const char *format, va_list va) { assert(0); return 0; };
+int __l32_p64_vtvfprintf(FILE *stream, const char *format, va_list va) { assert(0); return 0; };
+int __l64_p64_vtvfprintf(FILE *stream, const char *format, va_list va) { assert(0); return 0; };
+void __sparc_v9_vtdebug_log_vararg(int lvl, log_object *dev, char const *str, va_list va) { assert (0); return; };
+hap_handle_t SIM_hap_add_callback(const char *id, str_hap_func_t cb, lang_void *data) { assert(0); return 0; };
+hap_type_t SIM_hap_get_number(const char *id) { assert(0); return 0; };
+void SIM_hap_delete_callback_id(hap_type_t hap, hap_handle_t hdl) { assert (0); return; };
+int SIM_flush(void) { assert(0); return 0; };
+void SIM_write_register(processor_t *cpu_ptr, int registerNumber, integer_t value){ assert(0); };
+integer_t SIM_read_register(processor_t *cpu_ptr, int registerNumber) { assert(0); return 0; };
+int SIM_get_register_number(processor_t *cpu_ptr, const char * register_name){ assert(0); return 0; }
+const char * SIM_get_register_name(processor_t *cpu_ptr, int reg_num){ assert(0); return 0; }
+
+void SIM_break_simulation(const char * msg){ assert(0); }
+void SIM_printf(const char *format, va_list ap){ assert(0); }
+set_error_t ruby_session_set( void *id, conf_object_t *obj,
+ attr_value_t *val, attr_value_t *idx ) { assert (0); return 0; };
+attr_value_t ruby_session_get( void *id, conf_object_t *obj,
+ attr_value_t *idx ) { assert (0); return 0; };
+ void SIM_stacked_post(conf_object_t *obj, event_handler_t, lang_void *arg){};
+ pc_step_t SIM_step_next_occurrence( conf_object_t * obj, event_handler_t, lang_void * arg){ assert(0); return 0;};
+void SIM_enable_processor(conf_object_t *p) { assert(0); };
+void SIM_disable_processor(conf_object_t *p) { assert(0); };
+int SIM_cpu_enabled(conf_object_t *p) { assert(0); };
+
+attr_value_t SIM_get_attribute(conf_object_t *object, const char *name) { assert(0); };
+} // extern "C"
diff --git a/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc b/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc
new file mode 100644
index 000000000..3d0e70408
--- /dev/null
+++ b/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc
@@ -0,0 +1,45 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: See AbstractCacheEntry.h
+ *
+ */
+
+#include "AbstractCacheEntry.hh"
+
+AbstractCacheEntry::AbstractCacheEntry() {
+}
+
+// still need to define destructor for subclasses
+AbstractCacheEntry::~AbstractCacheEntry() {
+}
+
diff --git a/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh b/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
new file mode 100644
index 000000000..ff678d342
--- /dev/null
+++ b/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
@@ -0,0 +1,81 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: Common base class for a machine node.
+ *
+ */
+
+#ifndef AbstractCacheEntry_H
+#define AbstractCacheEntry_H
+
+#include "Global.hh"
+#include "Address.hh"
+#include "AccessPermission.hh"
+
+class AbstractCacheEntry {
+public:
+ // Constructors
+ AbstractCacheEntry();
+
+ // Destructor, prevent it from instantiation
+ virtual ~AbstractCacheEntry() = 0;
+
+ // Public Methods
+
+ // The methods below are those called by ruby runtime, add when it is
+ // absolutely necessary and should all be virtual function.
+
+
+ virtual void print(ostream& out) const = 0;
+
+ // Data Members (m_ prefix)
+ Address m_Address; // Address of this block, required by CacheMemory
+ Time m_LastRef; // Last time this block was referenced, required by CacheMemory
+ AccessPermission m_Permission; // Access permission for this block, required by CacheMemory
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const AbstractCacheEntry& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const AbstractCacheEntry& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //AbstractCacheEntry_H
+
diff --git a/src/mem/ruby/slicc_interface/AbstractChip.cc b/src/mem/ruby/slicc_interface/AbstractChip.cc
new file mode 100644
index 000000000..1f2eda741
--- /dev/null
+++ b/src/mem/ruby/slicc_interface/AbstractChip.cc
@@ -0,0 +1,47 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: See AbstractChip.h
+ *
+ */
+
+#include "AbstractChip.hh"
+
+AbstractChip::AbstractChip(NodeID id, Network* net_ptr) {
+ m_id = id;
+ m_net_ptr = net_ptr;
+ m_L1Cache_sequencer_vec.setSize(0);
+}
+
+// still need to be defined for subclasses
+AbstractChip::~AbstractChip() {
+}
diff --git a/src/mem/ruby/slicc_interface/AbstractChip.hh b/src/mem/ruby/slicc_interface/AbstractChip.hh
new file mode 100644
index 000000000..9d113a1e1
--- /dev/null
+++ b/src/mem/ruby/slicc_interface/AbstractChip.hh
@@ -0,0 +1,126 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: Common base class for a machine chip.
+ *
+ */
+
+#ifndef ABSTRACT_CHIP_H
+#define ABSTRACT_CHIP_H
+
+#include "Global.hh"
+#include "NodeID.hh"
+#include "RubyConfig.hh"
+#include "L1Cache_Entry.hh"
+#include "Address.hh"
+#include "Vector.hh"
+
+class Network;
+class Sequencer;
+class StoreBuffer;
+class ENTRY;
+class MessageBuffer;
+class CacheRecorder;
+class TransactionInterfaceManager;
+
+template<class ENTRY> class CacheMemory;
+
+class AbstractChip {
+public:
+ // Constructors
+ AbstractChip(NodeID chip_number, Network* net_ptr);
+
+ // Destructor, prevent from being instantiated
+ virtual ~AbstractChip() = 0;
+
+ // Public Methods
+ NodeID getID() const { return m_id; };
+ Network* getNetwork() const { return m_net_ptr; };
+ Sequencer* getSequencer(int index) const { return m_L1Cache_sequencer_vec[index]; };
+ TransactionInterfaceManager* getTransactionInterfaceManager(int index) const { return m_L1Cache_xact_mgr_vec[index]; };
+ void setTransactionInterfaceManager(TransactionInterfaceManager* manager, int index) { m_L1Cache_xact_mgr_vec[index] = manager; }
+
+ // used when CHECK_COHERENCE is enabled. See System::checkGlobalCoherence()
+ virtual bool isBlockExclusive(const Address& addr) const { return false; }
+ virtual bool isBlockShared(const Address& addr) const { return false; }
+
+ // cache dump functions
+ virtual void recordCacheContents(CacheRecorder& tr) const = 0;
+ virtual void dumpCaches(ostream& out) const = 0;
+ virtual void dumpCacheData(ostream& out) const = 0;
+
+ virtual void printConfig(ostream& out) = 0;
+ virtual void print(ostream& out) const = 0;
+
+ // pulic data structures
+ Vector < CacheMemory<L1Cache_Entry>* > m_L1Cache_L1DcacheMemory_vec;
+ Vector < CacheMemory<L1Cache_Entry>* > m_L1Cache_L1IcacheMemory_vec;
+ Vector < CacheMemory<L1Cache_Entry>* > m_L1Cache_cacheMemory_vec;
+ Vector < CacheMemory<L1Cache_Entry>* > m_L1Cache_L2cacheMemory_vec;
+ Vector < CacheMemory<L1Cache_Entry>* > m_L2Cache_L2cacheMemory_vec;
+
+ // added so that the prefetcher and sequencer can access the L1 and L2 request queues
+ Vector < MessageBuffer* > m_L1Cache_optionalQueue_vec;
+ Vector < MessageBuffer* >m_L1Cache_mandatoryQueue_vec;
+
+ // TSO storebuffer
+ Vector < StoreBuffer* > m_L1Cache_storeBuffer_vec;
+
+ // TM transaction manager
+ Vector < TransactionInterfaceManager* > m_L1Cache_xact_mgr_vec;
+
+protected:
+
+ // Data Members (m_ prefix)
+ NodeID m_id; // Chip id
+ Network* m_net_ptr; // Points to the Network simulator
+ Vector < Sequencer* > m_L1Cache_sequencer_vec; // All chip should have a sequencer
+
+
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const AbstractChip& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const AbstractChip& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //ABSTRACT_CHIP_H
+
diff --git a/src/mem/ruby/slicc_interface/AbstractProtocol.hh b/src/mem/ruby/slicc_interface/AbstractProtocol.hh
new file mode 100644
index 000000000..d602f5e54
--- /dev/null
+++ b/src/mem/ruby/slicc_interface/AbstractProtocol.hh
@@ -0,0 +1,60 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: Define all possible protocol parameters and their
+ * default value here. Normally, all parameters should
+ * have default value "false" means the feature of the
+ * protocol is turned off.
+ *
+ */
+
+#ifndef AbstractProtocol_H
+#define AbstractProtocol_H
+
+class AbstractProtocol {
+public:
+ // Constructors
+ AbstractProtocol() {};
+
+ // Destructor, no instantiation
+ // No definition also, so no subclass can be instantiated also
+ virtual ~AbstractProtocol() = 0;
+
+ // Public Methods
+
+ // Data Members (m_ prefix)
+ static const bool m_CMP = false ;
+ static const bool m_TwoLevelCache = false ;
+};
+
+#endif //AbstractProtocol_H
+
diff --git a/src/mem/ruby/slicc_interface/Message.hh b/src/mem/ruby/slicc_interface/Message.hh
new file mode 100644
index 000000000..1604f1798
--- /dev/null
+++ b/src/mem/ruby/slicc_interface/Message.hh
@@ -0,0 +1,91 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#ifndef MESSAGE_H
+#define MESSAGE_H
+
+#include "Global.hh"
+#include "RefCnt.hh"
+#include "RefCountable.hh"
+#include "EventQueue.hh"
+
+class Message;
+typedef RefCnt<Message> MsgPtr;
+
+class Message : public RefCountable {
+public:
+ // Constructors
+ Message() : RefCountable() { m_time = g_eventQueue_ptr->getTime(); m_LastEnqueueTime = g_eventQueue_ptr->getTime(); m_DelayedCycles = 0;}
+
+ // Destructor
+ virtual ~Message() { }
+
+ // Public Methods
+ virtual Message* clone() const = 0;
+ virtual void destroy() = 0;
+ virtual void print(ostream& out) const = 0;
+
+ void setDelayedCycles(const int& cycles) { m_DelayedCycles = cycles; }
+ const int& getDelayedCycles() const {return m_DelayedCycles;}
+ int& getDelayedCycles() {return m_DelayedCycles;}
+ void setLastEnqueueTime(const Time& time) { m_LastEnqueueTime = time; }
+ const Time& getLastEnqueueTime() const {return m_LastEnqueueTime;}
+ Time& getLastEnqueueTime() {return m_LastEnqueueTime;}
+
+ const Time& getTime() const { return m_time; }
+ void setTime(const Time& new_time) { m_time = new_time; }
+private:
+ // Private Methods
+
+ // Data Members (m_ prefix)
+ Time m_time;
+ Time m_LastEnqueueTime; // my last enqueue time
+ int m_DelayedCycles; // my delayed cycles
+
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Message& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Message& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //MESSAGE_H
diff --git a/src/mem/ruby/slicc_interface/NetworkMessage.hh b/src/mem/ruby/slicc_interface/NetworkMessage.hh
new file mode 100644
index 000000000..357b89402
--- /dev/null
+++ b/src/mem/ruby/slicc_interface/NetworkMessage.hh
@@ -0,0 +1,115 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NetworkMessage.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef NetworkMessage_H
+#define NetworkMessage_H
+
+#include "Global.hh"
+#include "RefCnt.hh"
+#include "RefCountable.hh"
+#include "Message.hh"
+#include "MessageSizeType.hh"
+#include "NetDest.hh"
+
+class Address;
+
+class NetworkMessage;
+typedef RefCnt<NetworkMessage> NetMsgPtr;
+
+class NetworkMessage : public Message {
+public:
+ // Constructors
+ NetworkMessage()
+ :Message()
+ {
+ m_internal_dest_valid = false;
+ }
+
+ // Destructor
+ virtual ~NetworkMessage() { }
+
+ // Public Methods
+
+ virtual const NetDest& getDestination() const = 0;
+ virtual NetDest& getDestination() = 0;
+ virtual const MessageSizeType& getMessageSize() const = 0;
+ virtual MessageSizeType& getMessageSize() = 0;
+ // virtual const Address& getAddress() const = 0;
+ // virtual Address& getAddress() = 0;
+
+ const NetDest& getInternalDestination() const {
+ if (m_internal_dest_valid == false) {
+ return getDestination();
+ } else {
+ return m_internal_dest;
+ }
+ }
+
+ NetDest& getInternalDestination() {
+ if (m_internal_dest_valid == false) {
+ m_internal_dest = getDestination();
+ m_internal_dest_valid = true;
+ }
+ return m_internal_dest;
+ }
+
+ virtual void print(ostream& out) const = 0;
+
+private:
+ // Private Methods
+
+ // Data Members (m_ prefix)
+ NetDest m_internal_dest;
+ bool m_internal_dest_valid;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const NetworkMessage& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const NetworkMessage& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //NetworkMessage_H
diff --git a/src/mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh b/src/mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh
new file mode 100644
index 000000000..90f7a702b
--- /dev/null
+++ b/src/mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh
@@ -0,0 +1,425 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#ifndef COMPONENTMAPPINGFNS_H
+#define COMPONENTMAPPINGFNS_H
+
+#include "Global.hh"
+#include "RubyConfig.hh"
+#include "NodeID.hh"
+#include "MachineID.hh"
+#include "Address.hh"
+#include "Set.hh"
+#include "NetDest.hh"
+#include "GenericMachineType.hh"
+
+#ifdef MACHINETYPE_L1Cache
+#define MACHINETYPE_L1CACHE_ENUM MachineType_L1Cache
+#else
+#define MACHINETYPE_L1CACHE_ENUM MachineType_NUM
+#endif
+
+#ifdef MACHINETYPE_L2Cache
+#define MACHINETYPE_L2CACHE_ENUM MachineType_L2Cache
+#else
+#define MACHINETYPE_L2CACHE_ENUM MachineType_NUM
+#endif
+
+#ifdef MACHINETYPE_L3Cache
+#define MACHINETYPE_L3CACHE_ENUM MachineType_L3Cache
+#else
+#define MACHINETYPE_L3CACHE_ENUM MachineType_NUM
+#endif
+
+#ifdef MACHINETYPE_PersistentArbiter
+#define MACHINETYPE_PERSISTENTARBITER_ENUM MachineType_PersistentArbiter
+#else
+#define MACHINETYPE_PERSISTENTARBITER_ENUM MachineType_NUM
+#endif
+
+#ifdef MACHINETYPE_Collector
+#define MACHINETYPE_COLLECTOR_ENUM MachineType_Collector
+#else
+#define MACHINETYPE_COLLECTOR_ENUM MachineType_NUM
+#endif
+
+
+// used to determine the correct L1 set
+// input parameters are the address and number of set bits for the L1 cache
+// returns a value between 0 and the total number of L1 cache sets
+inline
+int map_address_to_L1CacheSet(const Address& addr, int cache_num_set_bits)
+{
+ return addr.bitSelect(RubyConfig::dataBlockBits(),
+ RubyConfig::dataBlockBits()+cache_num_set_bits-1);
+}
+
+// used to determine the correct L2 set
+// input parameters are the address and number of set bits for the L2 cache
+// returns a value between 0 and the total number of L2 cache sets
+inline
+int map_address_to_L2CacheSet(const Address& addr, int cache_num_set_bits)
+{
+ assert(cache_num_set_bits == L2_CACHE_NUM_SETS_BITS); // ensure the l2 bank mapping functions agree with l2 set bits
+
+ if (MAP_L2BANKS_TO_LOWEST_BITS) {
+ return addr.bitSelect(RubyConfig::dataBlockBits()+RubyConfig::L2CachePerChipBits(),
+ RubyConfig::dataBlockBits()+RubyConfig::L2CachePerChipBits()+cache_num_set_bits-1);
+ } else {
+ return addr.bitSelect(RubyConfig::dataBlockBits(),
+ RubyConfig::dataBlockBits()+cache_num_set_bits-1);
+ }
+}
+
+// input parameter is the base ruby node of the L1 cache
+// returns a value between 0 and total_L2_Caches_within_the_system
+inline
+MachineID map_L1CacheMachId_to_L2Cache(const Address& addr, MachineID L1CacheMachId)
+{
+ int L2bank = 0;
+ MachineID mach = {MACHINETYPE_L2CACHE_ENUM, 0};
+
+ if (RubyConfig::L2CachePerChipBits() > 0) {
+ if (MAP_L2BANKS_TO_LOWEST_BITS) {
+ L2bank = addr.bitSelect(RubyConfig::dataBlockBits(),
+ RubyConfig::dataBlockBits()+RubyConfig::L2CachePerChipBits()-1);
+ } else {
+ L2bank = addr.bitSelect(RubyConfig::dataBlockBits()+L2_CACHE_NUM_SETS_BITS,
+ RubyConfig::dataBlockBits()+L2_CACHE_NUM_SETS_BITS+RubyConfig::L2CachePerChipBits()-1);
+ }
+ }
+
+ assert(L2bank < RubyConfig::numberOfL2CachePerChip());
+ assert(L2bank >= 0);
+
+ mach.num = RubyConfig::L1CacheNumToL2Base(L1CacheMachId.num)*RubyConfig::numberOfL2CachePerChip() // base #
+ + L2bank; // bank #
+ assert(mach.num < RubyConfig::numberOfL2Cache());
+ return mach;
+}
+
+// used to determine the correct L2 bank
+// input parameter is the base ruby node of the L2 cache
+// returns a value between 0 and total_L2_Caches_within_the_system
+inline
+MachineID map_L2ChipId_to_L2Cache(const Address& addr, NodeID L2ChipId)
+{
+ assert(L2ChipId < RubyConfig::numberOfChips());
+
+ int L2bank = 0;
+ MachineID mach = {MACHINETYPE_L2CACHE_ENUM, 0};
+
+ if (RubyConfig::L2CachePerChipBits() > 0) {
+ if (MAP_L2BANKS_TO_LOWEST_BITS) {
+ L2bank = addr.bitSelect(RubyConfig::dataBlockBits(),
+ RubyConfig::dataBlockBits()+RubyConfig::L2CachePerChipBits()-1);
+ } else {
+ L2bank = addr.bitSelect(RubyConfig::dataBlockBits()+L2_CACHE_NUM_SETS_BITS,
+ RubyConfig::dataBlockBits()+L2_CACHE_NUM_SETS_BITS+RubyConfig::L2CachePerChipBits()-1);
+ }
+ }
+
+ assert(L2bank < RubyConfig::numberOfL2CachePerChip());
+ assert(L2bank >= 0);
+
+ mach.num = L2ChipId*RubyConfig::numberOfL2CachePerChip() // base #
+ + L2bank; // bank #
+ assert(mach.num < RubyConfig::numberOfL2Cache());
+ return mach;
+}
+
+// used to determine the home directory
+// returns a value between 0 and total_directories_within_the_system
+inline
+NodeID map_Address_to_DirectoryNode(const Address& addr)
+{
+ NodeID dirNode = 0;
+
+ if (RubyConfig::memoryBits() > 0) {
+ dirNode = addr.bitSelect(RubyConfig::dataBlockBits(),
+ RubyConfig::dataBlockBits()+RubyConfig::memoryBits()-1);
+ }
+
+ // Index indexHighPortion = address.bitSelect(MEMORY_SIZE_BITS-1, PAGE_SIZE_BITS+NUMBER_OF_MEMORY_MODULE_BITS);
+ // Index indexLowPortion = address.bitSelect(DATA_BLOCK_BITS, PAGE_SIZE_BITS-1);
+
+ //Index index = indexLowPortion | (indexHighPortion << (PAGE_SIZE_BITS - DATA_BLOCK_BITS));
+
+/*
+
+ADDRESS_WIDTH MEMORY_SIZE_BITS PAGE_SIZE_BITS DATA_BLOCK_BITS
+ | | | |
+ \ / \ / \ / \ / 0
+ -----------------------------------------------------------------------
+ | unused |xxxxxxxxxxxxxxx| |xxxxxxxxxxxxxxx| |
+ | |xxxxxxxxxxxxxxx| |xxxxxxxxxxxxxxx| |
+ -----------------------------------------------------------------------
+ indexHighPortion indexLowPortion
+ <------->
+ NUMBER_OF_MEMORY_MODULE_BITS
+ */
+
+ assert(dirNode < RubyConfig::numberOfMemories());
+ assert(dirNode >= 0);
+ return dirNode;
+}
+
+// used to determine the home directory
+// returns a value between 0 and total_directories_within_the_system
+inline
+MachineID map_Address_to_Directory(const Address &addr)
+{
+ MachineID mach = {MachineType_Directory, map_Address_to_DirectoryNode(addr)};
+ return mach;
+}
+
+inline
+MachineID map_Address_to_CentralArbiterNode(const Address& addr)
+{
+ MachineType t = MACHINETYPE_PERSISTENTARBITER_ENUM;
+ MachineID mach = {t, map_Address_to_DirectoryNode(addr)};
+
+ assert(mach.num < RubyConfig::numberOfMemories());
+ assert(mach.num >= 0);
+ return mach;
+}
+
+inline
+NetDest getMultiStaticL2BankNetDest(const Address& addr, const Set& sharers) // set of L2RubyNodes
+{
+ NetDest dest;
+
+ for (int i = 0; i < sharers.getSize(); i++) {
+ if (sharers.isElement(i)) {
+ dest.add(map_L2ChipId_to_L2Cache(addr,i));
+ }
+ }
+ return dest;
+}
+
+inline
+NetDest getOtherLocalL1IDs(MachineID L1)
+{
+ int start = (L1.num / RubyConfig::numberOfProcsPerChip()) * RubyConfig::numberOfProcsPerChip();
+ NetDest ret;
+
+ assert(MACHINETYPE_L1CACHE_ENUM != MachineType_NUM);
+
+ for (int i = start; i < (start + RubyConfig::numberOfProcsPerChip()); i++) {
+ if (i != L1.num) {
+ MachineID mach = { MACHINETYPE_L1CACHE_ENUM, i };
+ ret.add( mach );
+ }
+ }
+
+ return ret;
+}
+
+inline
+NetDest getLocalL1IDs(MachineID mach)
+{
+ assert(MACHINETYPE_L1CACHE_ENUM != MachineType_NUM);
+
+ NetDest ret;
+
+ if (mach.type == MACHINETYPE_L1CACHE_ENUM) {
+
+ int start = (mach.num / RubyConfig::numberOfL1CachePerChip()) * RubyConfig::numberOfProcsPerChip();
+
+ for (int i = start; i < (start + RubyConfig::numberOfProcsPerChip()); i++) {
+ MachineID mach = { MACHINETYPE_L1CACHE_ENUM, i };
+ ret.add( mach );
+ }
+ }
+ else if (mach.type == MACHINETYPE_L2CACHE_ENUM) {
+
+ int chip = mach.num/RubyConfig::numberOfL2CachePerChip();
+ int start = ( chip*RubyConfig::numberOfL1CachePerChip());
+ for (int i = start; i < (start + RubyConfig::numberOfL1CachePerChip()); i++) {
+ MachineID mach = { MACHINETYPE_L1CACHE_ENUM, i };
+ ret.add( mach );
+ }
+ }
+
+ return ret;
+}
+
+inline
+NetDest getExternalL1IDs(MachineID L1)
+{
+ NetDest ret;
+
+ assert(MACHINETYPE_L1CACHE_ENUM != MachineType_NUM);
+
+ for (int i = 0; i < RubyConfig::numberOfProcessors(); i++) {
+ // ret.add( (NodeID) i);
+ MachineID mach = { MACHINETYPE_L1CACHE_ENUM, i };
+ ret.add( mach );
+ }
+
+ ret.removeNetDest(getLocalL1IDs(L1));
+
+ return ret;
+}
+
+inline
+bool isLocalProcessor(MachineID thisId, MachineID tarID)
+{
+ int start = (thisId.num / RubyConfig::numberOfProcsPerChip()) * RubyConfig::numberOfProcsPerChip();
+
+ for (int i = start; i < (start + RubyConfig::numberOfProcsPerChip()); i++) {
+ if (i == tarID.num) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+inline
+NetDest getAllPertinentL2Banks(const Address& addr) // set of L2RubyNodes
+{
+ NetDest dest;
+
+ for (int i = 0; i < RubyConfig::numberOfChips(); i++) {
+ dest.add(map_L2ChipId_to_L2Cache(addr,i));
+ }
+ return dest;
+}
+
+inline
+bool isL1OnChip(MachineID L1machID, NodeID L2NodeID)
+{
+ if (L1machID.type == MACHINETYPE_L1CACHE_ENUM) {
+ return (L1machID.num == L2NodeID);
+ } else {
+ return false;
+ }
+}
+
+inline
+bool isL2OnChip(MachineID L2machID, NodeID L2NodeID)
+{
+ if (L2machID.type == MACHINETYPE_L2CACHE_ENUM) {
+ return (L2machID.num == L2NodeID);
+ } else {
+ return false;
+ }
+}
+
+inline
+NodeID closest_clockwise_distance(NodeID this_node, NodeID next_node)
+{
+ if (this_node <= next_node) {
+ return (next_node - this_node);
+ } else {
+ return (next_node - this_node + RubyConfig::numberOfChips());
+ }
+}
+
+inline
+bool closer_clockwise_processor(NodeID this_node, NodeID newer, NodeID older)
+{
+ return (closest_clockwise_distance(this_node, newer) < closest_clockwise_distance(this_node, older));
+}
+
+extern inline NodeID getChipID(MachineID L2machID)
+{
+ return (L2machID.num%RubyConfig::numberOfChips())/RubyConfig::numberOfProcsPerChip();
+}
+
+extern inline NodeID machineIDToNodeID(MachineID machID)
+{
+ // return machID.num%RubyConfig::numberOfChips();
+ return machID.num;
+}
+
+extern inline NodeID machineIDToVersion(MachineID machID)
+{
+ return machID.num/RubyConfig::numberOfChips();
+}
+
+extern inline MachineType machineIDToMachineType(MachineID machID)
+{
+ return machID.type;
+}
+
+extern inline NodeID L1CacheMachIDToProcessorNum(MachineID machID)
+{
+ assert(machID.type == MachineType_L1Cache);
+ return machID.num;
+}
+
+extern inline NodeID L2CacheMachIDToChipID(MachineID machID)
+{
+ assert(machID.type == MACHINETYPE_L2CACHE_ENUM);
+ return machID.num/RubyConfig::numberOfL2CachePerChip();
+}
+
+extern inline MachineID getCollectorDest(MachineID L1MachID)
+{
+ MachineID mach = {MACHINETYPE_COLLECTOR_ENUM, L1MachID.num};
+ return mach;
+}
+
+extern inline MachineID getCollectorL1Cache(MachineID colID)
+{
+ MachineID mach = {MACHINETYPE_L1CACHE_ENUM, colID.num};
+ return mach;
+}
+
+extern inline MachineID getL1MachineID(NodeID L1RubyNode)
+{
+ MachineID mach = {MACHINETYPE_L1CACHE_ENUM, L1RubyNode};
+ return mach;
+}
+
+extern inline GenericMachineType ConvertMachToGenericMach(MachineType machType) {
+ if (machType == MACHINETYPE_L1CACHE_ENUM) {
+ return GenericMachineType_L1Cache;
+ } else if (machType == MACHINETYPE_L2CACHE_ENUM) {
+ return GenericMachineType_L2Cache;
+ } else if (machType == MACHINETYPE_L3CACHE_ENUM) {
+ return GenericMachineType_L3Cache;
+ } else if (machType == MachineType_Directory) {
+ return GenericMachineType_Directory;
+ } else if (machType == MACHINETYPE_COLLECTOR_ENUM) {
+ return GenericMachineType_Collector;
+ } else {
+ ERROR_MSG("cannot convert to a GenericMachineType");
+ return GenericMachineType_NULL;
+ }
+}
+
+
+#endif // COMPONENTMAPPINGFNS_H
diff --git a/src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.cc b/src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.cc
new file mode 100644
index 000000000..19018c218
--- /dev/null
+++ b/src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.cc
@@ -0,0 +1,161 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * slicc_util.C
+ *
+ * Description: See slicc_util.h
+ *
+ * $Id$
+ *
+ */
+
+#include "Global.hh"
+#include "System.hh"
+#include "Profiler.hh"
+#include "AddressProfiler.hh"
+#include "Protocol.hh"
+#include "RubySlicc_Profiler_interface.hh"
+#include "RubySlicc_ComponentMapping.hh"
+// #include "TransactionInterfaceManager.hh"
+
+void profile_request(int cache_state, Directory_State directory_state, GenericRequestType request_type)
+{
+ string requestStr = L1Cache_State_to_string(L1Cache_State(cache_state))+":"+
+ Directory_State_to_string(directory_state)+":"+
+ GenericRequestType_to_string(request_type);
+ g_system_ptr->getProfiler()->profileRequest(requestStr);
+}
+
+void profile_request(const string& L1CacheState, const string& L2CacheState, const string& directoryState, const string& requestType)
+{
+ string requestStr = L1CacheState+":"+L2CacheState+":"+directoryState+":"+requestType;
+ g_system_ptr->getProfiler()->profileRequest(requestStr);
+}
+
+void profile_outstanding_request(int outstanding)
+{
+ g_system_ptr->getProfiler()->profileOutstandingRequest(outstanding);
+}
+
+void profile_outstanding_persistent_request(int outstanding)
+{
+ g_system_ptr->getProfiler()->profileOutstandingPersistentRequest(outstanding);
+}
+
+void profile_average_latency_estimate(int latency)
+{
+ g_system_ptr->getProfiler()->profileAverageLatencyEstimate(latency);
+}
+
+void profile_sharing(const Address& addr, AccessType type, NodeID requestor, const Set& sharers, const Set& owner)
+{
+ g_system_ptr->getProfiler()->profileSharing(addr, type, requestor, sharers, owner);
+}
+
+void profile_miss(const CacheMsg& msg, NodeID id)
+{
+ // CMP profile address after L1 misses, not L2
+ ASSERT (!Protocol::m_CMP);
+ g_system_ptr->getProfiler()->addAddressTraceSample(msg, id);
+
+ g_system_ptr->getProfiler()->profileConflictingRequests(msg.getAddress());
+
+ g_system_ptr->getProfiler()->addSecondaryStatSample(msg.getType(),
+ msg.getAccessMode(), msg.getSize(), msg.getPrefetch(), id);
+}
+
+void profile_L1Cache_miss(const CacheMsg& msg, NodeID id)
+{
+ // only called by protocols assuming non-zero cycle hits
+ ASSERT (REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH);
+
+ g_system_ptr->getProfiler()->addPrimaryStatSample(msg, id);
+}
+
+void profileMsgDelay(int virtualNetwork, int delayCycles)
+{
+ g_system_ptr->getProfiler()->profileMsgDelay(virtualNetwork, delayCycles);
+}
+
+void profile_L2Cache_miss(GenericRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID nodeID)
+{
+ g_system_ptr->getProfiler()->addSecondaryStatSample(requestType, type, msgSize, pfBit, nodeID);
+}
+
+void profile_token_retry(const Address& addr, AccessType type, int count)
+{
+ g_system_ptr->getProfiler()->getAddressProfiler()->profileRetry(addr, type, count);
+}
+
+void profile_filter_action(int action)
+{
+ g_system_ptr->getProfiler()->profileFilterAction(action);
+}
+
+void profile_persistent_prediction(const Address& addr, AccessType type)
+{
+ g_system_ptr->getProfiler()->getAddressProfiler()->profilePersistentPrediction(addr, type);
+}
+
+void profile_multicast_retry(const Address& addr, int count)
+{
+ g_system_ptr->getProfiler()->profileMulticastRetry(addr, count);
+}
+
+void profileGetX(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor)
+{
+ g_system_ptr->getProfiler()->getAddressProfiler()->profileGetX(datablock, PC, owner, sharers, requestor);
+}
+
+void profileGetS(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor)
+{
+ g_system_ptr->getProfiler()->getAddressProfiler()->profileGetS(datablock, PC, owner, sharers, requestor);
+}
+
+void profileOverflow(const Address & addr, MachineID mach)
+{
+ if(mach.type == MACHINETYPE_L1CACHE_ENUM){
+ // for L1 overflows
+ int proc_num = L1CacheMachIDToProcessorNum(mach);
+ int chip_num = proc_num/RubyConfig::numberOfProcsPerChip();
+ assert(0);
+ // g_system_ptr->getChip(chip_num)->m_L1Cache_xact_mgr_vec[proc_num]->profileOverflow(addr, true);
+ }
+ else if(mach.type == MACHINETYPE_L2CACHE_ENUM){
+ // for L2 overflows
+ int chip_num = L2CacheMachIDToChipID(mach);
+ for(int p=0; p < RubyConfig::numberOfProcessors(); ++p){
+ assert(0);
+ // g_system_ptr->getChip(chip_num)->m_L1Cache_xact_mgr_vec[p]->profileOverflow(addr, false);
+ }
+ }
+}
+
+
diff --git a/src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.hh b/src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.hh
new file mode 100644
index 000000000..fa83c84c1
--- /dev/null
+++ b/src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.hh
@@ -0,0 +1,73 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * slicc_util.h
+ *
+ * Description: These are the functions that exported to slicc from ruby.
+ *
+ * $Id$
+ *
+ */
+
+#ifndef RUBYSLICC_PROFILER_INTERFACE_H
+#define RUBYSLICC_PROFILER_INTERFACE_H
+
+#include "Global.hh"
+#include "Profiler.hh"
+#include "Address.hh"
+#include "L1Cache_State.hh"
+#include "AccessType.hh"
+#include "GenericRequestType.hh"
+#include "Directory_State.hh"
+#include "NodeID.hh"
+
+class Set;
+
+void profile_request(int cache_state, Directory_State directory_state, GenericRequestType request_type);
+void profile_outstanding_persistent_request(int outstanding);
+void profile_outstanding_request(int outstanding);
+void profile_sharing(const Address& addr, AccessType type, NodeID requestor, const Set& sharers, const Set& owner);
+void profile_request(const string& L1CacheStateStr, const string& L2CacheStateStr, const string& directoryStateStr, const string& requestTypeStr);
+void profile_miss(const CacheMsg& msg, NodeID id);
+void profile_L1Cache_miss(const CacheMsg& msg, NodeID id);
+void profile_L2Cache_miss(GenericRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID l2cacheID);
+void profile_token_retry(const Address& addr, AccessType type, int count);
+void profile_filter_action(int action);
+void profile_persistent_prediction(const Address& addr, AccessType type);
+void profile_average_latency_estimate(int latency);
+void profileMsgDelay(int virtualNetwork, int delayCycles);
+
+void profile_multicast_retry(const Address& addr, int count);
+void profileGetX(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor);
+void profileGetS(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor);
+
+void profileOverflow(const Address & addr, MachineID mach);
+
+#endif // RUBYSLICC_PROFILER_INTERFACE_H
diff --git a/src/mem/ruby/slicc_interface/RubySlicc_Util.hh b/src/mem/ruby/slicc_interface/RubySlicc_Util.hh
new file mode 100644
index 000000000..a3233af5d
--- /dev/null
+++ b/src/mem/ruby/slicc_interface/RubySlicc_Util.hh
@@ -0,0 +1,219 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * slicc_util.h
+ *
+ * Description: These are the functions that exported to slicc from ruby.
+ *
+ * $Id$
+ *
+ */
+
+#ifndef SLICC_UTIL_H
+#define SLICC_UTIL_H
+
+#include "Global.hh"
+#include "Address.hh"
+#include "NodeID.hh"
+#include "MachineID.hh"
+#include "RubyConfig.hh"
+#include "CacheMsg.hh"
+#include "GenericRequestType.hh"
+#include "CacheRequestType.hh"
+#include "AccessType.hh"
+#include "MachineType.hh"
+#include "Directory_State.hh"
+#include "L1Cache_State.hh"
+#include "MessageSizeType.hh"
+#include "Network.hh"
+#include "PrefetchBit.hh"
+
+#include "RubySlicc_ComponentMapping.hh"
+
+class Set;
+class NetDest;
+
+extern inline int random(int n)
+{
+ return random() % n;
+}
+
+extern inline bool multicast_retry()
+{
+ if (RANDOMIZATION) {
+ return (random() & 0x1);
+ } else {
+ return true;
+ }
+}
+
+extern inline int cache_state_to_int(L1Cache_State state)
+{
+ return state;
+}
+
+extern inline Time get_time()
+{
+ return g_eventQueue_ptr->getTime();
+}
+
+extern inline Time zero_time()
+{
+ return 0;
+}
+
+extern inline NodeID intToID(int nodenum)
+{
+ NodeID id = nodenum;
+ return id;
+}
+
+extern inline int IDToInt(NodeID id)
+{
+ int nodenum = id;
+ return nodenum;
+}
+
+extern inline int addressToInt(Address addr)
+{
+ return (int) addr.getLineAddress();
+}
+
+extern inline int MessageSizeTypeToInt(MessageSizeType size_type)
+{
+ return MessageSizeType_to_int(size_type);
+}
+
+extern inline int numberOfNodes()
+{
+ return RubyConfig::numberOfChips();
+}
+
+extern inline int numberOfL1CachePerChip()
+{
+ return RubyConfig::numberOfL1CachePerChip();
+}
+
+extern inline bool long_enough_ago(Time event)
+{
+ return ((get_time() - event) > 200);
+}
+
+extern inline int getAddThenMod(int addend1, int addend2, int modulus)
+{
+ return (addend1 + addend2) % modulus;
+}
+
+extern inline Time getTimeModInt(Time time, int modulus)
+{
+ return time % modulus;
+}
+
+extern inline Time getTimePlusInt(Time addend1, int addend2)
+{
+ return (Time) addend1 + addend2;
+}
+
+extern inline Time getTimeMinusTime(Time t1, Time t2)
+{
+ ASSERT(t1 >= t2);
+ return t1 - t2;
+}
+
+extern inline Time getPreviousDelayedCycles(Time t1, Time t2)
+{
+ if (RANDOMIZATION) { // when randomizing delayed
+ return 0;
+ } else {
+ return getTimeMinusTime(t1, t2);
+ }
+}
+
+extern inline void WARN_ERROR_TIME(Time time)
+{
+ WARN_EXPR(time);
+}
+
+// Return type for time_to_int is "Time" and not "int" so we get a 64-bit integer
+extern inline Time time_to_int(Time time)
+{
+ return time;
+}
+
+
+extern inline bool getFilteringEnabled()
+{
+ return g_FILTERING_ENABLED;
+}
+
+extern inline int getRetryThreshold()
+{
+ return g_RETRY_THRESHOLD;
+}
+
+extern inline int getFixedTimeoutLatency()
+{
+ return g_FIXED_TIMEOUT_LATENCY;
+}
+
+extern inline int N_tokens()
+{
+ // return N+1 to handle clean writeback
+ return g_PROCS_PER_CHIP + 1;
+ // return 1;
+}
+
+extern inline bool distributedPersistentEnabled()
+{
+ return g_DISTRIBUTED_PERSISTENT_ENABLED;
+}
+
+extern inline bool getDynamicTimeoutEnabled()
+{
+ return g_DYNAMIC_TIMEOUT_ENABLED;
+}
+
+// Appends an offset to an address
+extern inline Address setOffset(Address addr, int offset)
+{
+ Address result = addr;
+ result.setOffset(offset);
+ return result;
+}
+
+// Makes an address into a line address
+extern inline Address makeLineAddress(Address addr)
+{
+ Address result = addr;
+ result.makeLineAddress();
+ return result;
+}
+
+#endif //SLICC_UTIL_H
diff --git a/src/mem/ruby/slicc_interface/RubySlicc_includes.hh b/src/mem/ruby/slicc_interface/RubySlicc_includes.hh
new file mode 100644
index 000000000..8f552db29
--- /dev/null
+++ b/src/mem/ruby/slicc_interface/RubySlicc_includes.hh
@@ -0,0 +1,9 @@
+#ifndef RUBYSLICC_INCLUDES_H
+#define RUBYSLICC_INCLUDES_H
+
+#include "RubySlicc_ComponentMapping.hh"
+#include "RubySlicc_Util.hh"
+#include "RubySlicc_Profiler_interface.hh"
+
+#endif
+
diff --git a/src/mem/ruby/system/AbstractBloomFilter.hh b/src/mem/ruby/system/AbstractBloomFilter.hh
new file mode 100644
index 000000000..3b0c703ae
--- /dev/null
+++ b/src/mem/ruby/system/AbstractBloomFilter.hh
@@ -0,0 +1,72 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * AbstractBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef ABSTRACT_BLOOM_FILTER_H
+#define ABSTRACT_BLOOM_FILTER_H
+
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+
+class AbstractBloomFilter {
+public:
+
+ virtual ~AbstractBloomFilter() {};
+ virtual void clear() = 0;
+ virtual void increment(const Address& addr) = 0;
+ virtual void decrement(const Address& addr) = 0;
+ virtual void merge(AbstractBloomFilter * other_filter) = 0;
+ virtual void set(const Address& addr) = 0;
+ virtual void unset(const Address& addr) = 0;
+
+ virtual bool isSet(const Address& addr) = 0;
+ virtual int getCount(const Address& addr) = 0;
+ virtual int getTotalCount() = 0;
+
+ virtual void print(ostream& out) const = 0;
+
+ virtual int getIndex(const Address& addr) = 0;
+ virtual int readBit(const int index) = 0;
+ virtual void writeBit(const int index, const int value) = 0;
+
+private:
+
+};
+
+
+#endif
diff --git a/src/mem/ruby/system/AbstractMemOrCache.hh b/src/mem/ruby/system/AbstractMemOrCache.hh
new file mode 100644
index 000000000..a96a1328f
--- /dev/null
+++ b/src/mem/ruby/system/AbstractMemOrCache.hh
@@ -0,0 +1,42 @@
+
+/*
+ * AbstractMemOrCache.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef ABSTRACT_MEM_OR_CACHE_H
+#define ABSTRACT_MEM_OR_CACHE_H
+
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+
+class AbstractMemOrCache {
+public:
+
+ virtual ~AbstractMemOrCache() {};
+ virtual void setConsumer(Consumer* consumer_ptr) = 0;
+ virtual Consumer* getConsumer() = 0;
+
+ virtual void enqueue (const MsgPtr& message, int latency ) = 0;
+ virtual void enqueueMemRef (MemoryNode& memRef) = 0;
+ virtual void dequeue () = 0;
+ virtual const Message* peek () = 0;
+ virtual bool isReady () = 0;
+ virtual MemoryNode peekNode () = 0;
+ virtual bool areNSlotsAvailable (int n) = 0;
+ virtual void printConfig (ostream& out) = 0;
+ virtual void print (ostream& out) const = 0;
+ virtual void setDebug (int debugFlag) = 0;
+
+private:
+
+};
+
+
+#endif
+
diff --git a/src/mem/ruby/system/AbstractReplacementPolicy.hh b/src/mem/ruby/system/AbstractReplacementPolicy.hh
new file mode 100644
index 000000000..497226fad
--- /dev/null
+++ b/src/mem/ruby/system/AbstractReplacementPolicy.hh
@@ -0,0 +1,62 @@
+
+#ifndef ABSTRACTREPLACEMENTPOLICY_H
+#define ABSTRACTREPLACEMENTPOLICY_H
+
+#include "Global.hh"
+
+class AbstractReplacementPolicy {
+
+public:
+
+ AbstractReplacementPolicy(Index num_sets, Index assoc);
+ virtual ~AbstractReplacementPolicy();
+
+ /* touch a block. a.k.a. update timestamp */
+ virtual void touch(Index set, Index way, Time time) = 0;
+
+ /* returns the way to replace */
+ virtual Index getVictim(Index set) const = 0;
+
+ /* get the time of the last access */
+ Time getLastAccess(Index set, Index way);
+
+ protected:
+ unsigned int m_num_sets; /** total number of sets */
+ unsigned int m_assoc; /** set associativity */
+ Time **m_last_ref_ptr; /** timestamp of last reference */
+};
+
+inline
+AbstractReplacementPolicy::AbstractReplacementPolicy(Index num_sets, Index assoc)
+{
+ m_num_sets = num_sets;
+ m_assoc = assoc;
+ m_last_ref_ptr = new Time*[m_num_sets];
+ for(unsigned int i = 0; i < m_num_sets; i++){
+ m_last_ref_ptr[i] = new Time[m_assoc];
+ for(unsigned int j = 0; j < m_assoc; j++){
+ m_last_ref_ptr[i][j] = 0;
+ }
+ }
+}
+
+inline
+AbstractReplacementPolicy::~AbstractReplacementPolicy()
+{
+ if(m_last_ref_ptr != NULL){
+ for(unsigned int i = 0; i < m_num_sets; i++){
+ if(m_last_ref_ptr[i] != NULL){
+ delete[] m_last_ref_ptr[i];
+ }
+ }
+ delete[] m_last_ref_ptr;
+ }
+}
+
+inline
+Time AbstractReplacementPolicy::getLastAccess(Index set, Index way)
+{
+ return m_last_ref_ptr[set][way];
+}
+
+#endif // ABSTRACTREPLACEMENTPOLICY_H
diff --git a/src/mem/ruby/system/BlockBloomFilter.cc b/src/mem/ruby/system/BlockBloomFilter.cc
new file mode 100644
index 000000000..dbb0b5458
--- /dev/null
+++ b/src/mem/ruby/system/BlockBloomFilter.cc
@@ -0,0 +1,147 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * BlockBloomFilter.C
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "BlockBloomFilter.hh"
+#include "Map.hh"
+#include "Address.hh"
+
+BlockBloomFilter::BlockBloomFilter(string str)
+{
+ string tail(str);
+ string head = string_split(tail, '_');
+
+ m_filter_size = atoi(head.c_str());
+ m_filter_size_bits = log_int(m_filter_size);
+
+ m_filter.setSize(m_filter_size);
+
+ clear();
+}
+
+BlockBloomFilter::~BlockBloomFilter(){
+}
+
+void BlockBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void BlockBloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void BlockBloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void BlockBloomFilter::merge(AbstractBloomFilter * other_filter)
+{
+ // TODO
+}
+
+void BlockBloomFilter::set(const Address& addr)
+{
+ int i = get_index(addr);
+ m_filter[i] = 1;
+}
+
+void BlockBloomFilter::unset(const Address& addr)
+{
+ int i = get_index(addr);
+ m_filter[i] = 0;
+}
+
+bool BlockBloomFilter::isSet(const Address& addr)
+{
+ int i = get_index(addr);
+ return (m_filter[i]);
+}
+
+
+int BlockBloomFilter::getCount(const Address& addr)
+{
+ return m_filter[get_index(addr)];
+}
+
+int BlockBloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ if (m_filter[i]) {
+ count++;
+ }
+ }
+ return count;
+}
+
+int BlockBloomFilter::getIndex(const Address& addr)
+{
+ return get_index(addr);
+}
+
+void BlockBloomFilter::print(ostream& out) const
+{
+}
+
+int BlockBloomFilter::readBit(const int index) {
+ return m_filter[index];
+}
+
+void BlockBloomFilter::writeBit(const int index, const int value) {
+ m_filter[index] = value;
+}
+
+int BlockBloomFilter::get_index(const Address& addr)
+{
+ // Pull out some bit field ==> B1
+ // Pull out additional bits, not the same as B1 ==> B2
+ // XOR B1 and B2 to get hash index
+ physical_address_t block_bits = addr.bitSelect( RubyConfig::dataBlockBits(), 2*RubyConfig::dataBlockBits() - 1);
+ int offset = 5;
+ physical_address_t other_bits = addr.bitSelect( 2*RubyConfig::dataBlockBits() + offset, 2*RubyConfig::dataBlockBits() + offset + m_filter_size_bits - 1);
+ int index = block_bits ^ other_bits;
+ assert(index < m_filter_size);
+ return index;
+}
+
+
diff --git a/src/mem/ruby/system/BlockBloomFilter.hh b/src/mem/ruby/system/BlockBloomFilter.hh
new file mode 100644
index 000000000..82f457157
--- /dev/null
+++ b/src/mem/ruby/system/BlockBloomFilter.hh
@@ -0,0 +1,83 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * BlockBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef BLOCK_BLOOM_FILTER_H
+#define BLOCK_BLOOM_FILTER_H
+
+#include "Map.hh"
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "AbstractBloomFilter.hh"
+
+class BlockBloomFilter : public AbstractBloomFilter {
+public:
+
+ ~BlockBloomFilter();
+ BlockBloomFilter(string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ void print(ostream& out) const;
+
+private:
+
+ int get_index(const Address& addr);
+
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_filter_size_bits;
+
+ int m_count_bits;
+ int m_count;
+};
+
+
+#endif
diff --git a/src/mem/ruby/system/BulkBloomFilter.cc b/src/mem/ruby/system/BulkBloomFilter.cc
new file mode 100644
index 000000000..3408dfada
--- /dev/null
+++ b/src/mem/ruby/system/BulkBloomFilter.cc
@@ -0,0 +1,233 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * BulkBloomFilter.C
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "BulkBloomFilter.hh"
+#include "Map.hh"
+#include "Address.hh"
+
+BulkBloomFilter::BulkBloomFilter(string str)
+{
+ string tail(str);
+ string head = string_split(tail, '_');
+
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ m_filter_size = atoi(head.c_str());
+ m_filter_size_bits = log_int(m_filter_size);
+ // split the filter bits in half, c0 and c1
+ m_sector_bits = m_filter_size_bits - 1;
+
+ m_temp_filter.setSize(m_filter_size);
+ m_filter.setSize(m_filter_size);
+ clear();
+
+ // clear temp filter
+ for(int i=0; i < m_filter_size; ++i){
+ m_temp_filter[i] = 0;
+ }
+}
+
+BulkBloomFilter::~BulkBloomFilter(){
+
+}
+
+void BulkBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void BulkBloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void BulkBloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void BulkBloomFilter::merge(AbstractBloomFilter * other_filter)
+{
+ // TODO
+}
+
+void BulkBloomFilter::set(const Address& addr)
+{
+ // c0 contains the cache index bits
+ int set_bits = m_sector_bits;
+ int block_bits = RubyConfig::dataBlockBits();
+ int c0 = addr.bitSelect( block_bits, block_bits + set_bits - 1);
+ // c1 contains the lower m_sector_bits permuted bits
+ //Address permuted_bits = permute(addr);
+ //int c1 = permuted_bits.bitSelect(0, set_bits-1);
+ int c1 = addr.bitSelect( block_bits+set_bits, (block_bits+2*set_bits) - 1);
+ //ASSERT(c0 < (m_filter_size/2));
+ //ASSERT(c0 + (m_filter_size/2) < m_filter_size);
+ //ASSERT(c1 < (m_filter_size/2));
+ // set v0 bit
+ m_filter[c0 + (m_filter_size/2)] = 1;
+ // set v1 bit
+ m_filter[c1] = 1;
+}
+
+void BulkBloomFilter::unset(const Address& addr)
+{
+ // not used
+}
+
+bool BulkBloomFilter::isSet(const Address& addr)
+{
+ // c0 contains the cache index bits
+ int set_bits = m_sector_bits;
+ int block_bits = RubyConfig::dataBlockBits();
+ int c0 = addr.bitSelect( block_bits, block_bits + set_bits - 1);
+ // c1 contains the lower 10 permuted bits
+ //Address permuted_bits = permute(addr);
+ //int c1 = permuted_bits.bitSelect(0, set_bits-1);
+ int c1 = addr.bitSelect( block_bits+set_bits, (block_bits+2*set_bits) - 1);
+ //ASSERT(c0 < (m_filter_size/2));
+ //ASSERT(c0 + (m_filter_size/2) < m_filter_size);
+ //ASSERT(c1 < (m_filter_size/2));
+ // set v0 bit
+ m_temp_filter[c0 + (m_filter_size/2)] = 1;
+ // set v1 bit
+ m_temp_filter[c1] = 1;
+
+ // perform filter intersection. If any c part is 0, no possibility of address being in signature.
+ // get first c intersection part
+ bool zero = false;
+ for(int i=0; i < m_filter_size/2; ++i){
+ // get intersection of signatures
+ m_temp_filter[i] = m_temp_filter[i] && m_filter[i];
+ zero = zero || m_temp_filter[i];
+ }
+ zero = !zero;
+ if(zero){
+ // one section is zero, no possiblility of address in signature
+ // reset bits we just set
+ m_temp_filter[c0 + (m_filter_size/2)] = 0;
+ m_temp_filter[c1] = 0;
+ return false;
+ }
+
+ // check second section
+ zero = false;
+ for(int i=m_filter_size/2; i < m_filter_size; ++i){
+ // get intersection of signatures
+ m_temp_filter[i] = m_temp_filter[i] && m_filter[i];
+ zero = zero || m_temp_filter[i];
+ }
+ zero = !zero;
+ if(zero){
+ // one section is zero, no possiblility of address in signature
+ m_temp_filter[c0 + (m_filter_size/2)] = 0;
+ m_temp_filter[c1] = 0;
+ return false;
+ }
+ // one section has at least one bit set
+ m_temp_filter[c0 + (m_filter_size/2)] = 0;
+ m_temp_filter[c1] = 0;
+ return true;
+}
+
+
+int BulkBloomFilter::getCount(const Address& addr)
+{
+ // not used
+ return 0;
+}
+
+int BulkBloomFilter::getTotalCount()
+{
+ int count = 0;
+ for (int i = 0; i < m_filter_size; i++) {
+ if (m_filter[i]) {
+ count++;
+ }
+ }
+ return count;
+}
+
+int BulkBloomFilter::getIndex(const Address& addr)
+{
+ return get_index(addr);
+}
+
+int BulkBloomFilter::readBit(const int index) {
+ return 0;
+ // TODO
+}
+
+void BulkBloomFilter::writeBit(const int index, const int value) {
+ // TODO
+}
+
+void BulkBloomFilter::print(ostream& out) const
+{
+}
+
+int BulkBloomFilter::get_index(const Address& addr)
+{
+ return addr.bitSelect( RubyConfig::dataBlockBits(), RubyConfig::dataBlockBits() + m_filter_size_bits - 1);
+}
+
+Address BulkBloomFilter::permute(const Address & addr){
+ // permutes the original address bits according to Table 5
+ int block_offset = RubyConfig::dataBlockBits();
+ physical_address_t part1 = addr.bitSelect( block_offset, block_offset + 6 );
+ physical_address_t part2 = addr.bitSelect( block_offset + 9, block_offset + 9 );
+ physical_address_t part3 = addr.bitSelect( block_offset + 11, block_offset + 11 );
+ physical_address_t part4 = addr.bitSelect( block_offset + 17, block_offset + 17 );
+ physical_address_t part5 = addr.bitSelect( block_offset + 7, block_offset + 8 );
+ physical_address_t part6 = addr.bitSelect( block_offset + 10, block_offset + 10 );
+ physical_address_t part7 = addr.bitSelect( block_offset + 12, block_offset + 12 );
+ physical_address_t part8 = addr.bitSelect( block_offset + 13, block_offset + 13 );
+ physical_address_t part9 = addr.bitSelect( block_offset + 15, block_offset + 16 );
+ physical_address_t part10 = addr.bitSelect( block_offset + 18, block_offset + 20 );
+ physical_address_t part11 = addr.bitSelect( block_offset + 14, block_offset + 14 );
+
+ physical_address_t result = (part1 << 14 ) | (part2 << 13 ) | (part3 << 12 ) | (part4 << 11 ) | (part5 << 9) | (part6 << 8)
+ | (part7 << 7) | (part8 << 6) | (part9 << 4) | (part10 << 1) | (part11);
+ // assume 32 bit addresses (both virtual and physical)
+ // select the remaining high-order 11 bits
+ physical_address_t remaining_bits = (addr.bitSelect( block_offset + 21, 31 )) << 21;
+ result = result | remaining_bits;
+
+ return Address(result);
+}
diff --git a/src/mem/ruby/system/BulkBloomFilter.hh b/src/mem/ruby/system/BulkBloomFilter.hh
new file mode 100644
index 000000000..f05b83a87
--- /dev/null
+++ b/src/mem/ruby/system/BulkBloomFilter.hh
@@ -0,0 +1,88 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * BulkBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef BULK_BLOOM_FILTER_H
+#define BULK_BLOOM_FILTER_H
+
+#include "Map.hh"
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "AbstractBloomFilter.hh"
+
+class BulkBloomFilter : public AbstractBloomFilter {
+public:
+
+ ~BulkBloomFilter();
+ BulkBloomFilter(string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ void print(ostream& out) const;
+
+private:
+
+ int get_index(const Address& addr);
+ Address permute(const Address & addr);
+
+ Vector<int> m_filter;
+ Vector<int> m_temp_filter;
+
+ int m_filter_size;
+ int m_filter_size_bits;
+
+ int m_sector_bits;
+
+ int m_count_bits;
+ int m_count;
+};
+
+
+#endif
diff --git a/src/mem/ruby/system/CacheMemory.hh b/src/mem/ruby/system/CacheMemory.hh
new file mode 100644
index 000000000..9344f1463
--- /dev/null
+++ b/src/mem/ruby/system/CacheMemory.hh
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * CacheMemory.h
+ *
+ * Description:
+ *
+ * $Id: CacheMemory.h,v 3.7 2004/06/18 20:15:15 beckmann Exp $
+ *
+ */
+
+#ifndef CACHEMEMORY_H
+#define CACHEMEMORY_H
+
+#include "AbstractChip.hh"
+#include "Global.hh"
+#include "AccessPermission.hh"
+#include "Address.hh"
+#include "CacheRecorder.hh"
+#include "CacheRequestType.hh"
+#include "Vector.hh"
+#include "DataBlock.hh"
+#include "MachineType.hh"
+#include "RubySlicc_ComponentMapping.hh"
+#include "PseudoLRUPolicy.hh"
+#include "LRUPolicy.hh"
+#include <vector>
+
+template<class ENTRY>
+class CacheMemory {
+public:
+
+ // Constructors
+ CacheMemory(AbstractChip* chip_ptr, int numSetBits, int cacheAssoc, const MachineType machType, const string& description);
+
+ // Destructor
+ ~CacheMemory();
+
+ // Public Methods
+ void printConfig(ostream& out);
+
+ // perform a cache access and see if we hit or not. Return true on a hit.
+ bool tryCacheAccess(const Address& address, CacheRequestType type, DataBlock*& data_ptr);
+
+ // similar to above, but doesn't require full access check
+ bool testCacheAccess(const Address& address, CacheRequestType type, DataBlock*& data_ptr);
+
+ // tests to see if an address is present in the cache
+ bool isTagPresent(const Address& address) const;
+
+ // Returns true if there is:
+ // a) a tag match on this address or there is
+ // b) an unused line in the same cache "way"
+ bool cacheAvail(const Address& address) const;
+
+ // find an unused entry and sets the tag appropriate for the address
+ void allocate(const Address& address);
+
+ // Explicitly free up this address
+ void deallocate(const Address& address);
+
+ // Returns with the physical address of the conflicting cache line
+ Address cacheProbe(const Address& address) const;
+
+ // looks an address up in the cache
+ ENTRY& lookup(const Address& address);
+ const ENTRY& lookup(const Address& address) const;
+
+ // Get/Set permission of cache block
+ AccessPermission getPermission(const Address& address) const;
+ void changePermission(const Address& address, AccessPermission new_perm);
+
+ // Hook for checkpointing the contents of the cache
+ void recordCacheContents(CacheRecorder& tr) const;
+ void setAsInstructionCache(bool is_icache) { m_is_instruction_cache = is_icache; }
+
+ // Set this address to most recently used
+ void setMRU(const Address& address);
+
+ void getMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes );
+ void setMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes );
+
+ // Print cache contents
+ void print(ostream& out) const;
+ void printData(ostream& out) const;
+
+private:
+ // Private Methods
+
+ // convert a Address to its location in the cache
+ Index addressToCacheSet(const Address& address) const;
+
+ // Given a cache tag: returns the index of the tag in a set.
+ // returns -1 if the tag is not found.
+ int findTagInSet(Index line, const Address& tag) const;
+ int findTagInSetIgnorePermissions(Index cacheSet, const Address& tag) const;
+
+ // Private copy constructor and assignment operator
+ CacheMemory(const CacheMemory& obj);
+ CacheMemory& operator=(const CacheMemory& obj);
+
+ // Data Members (m_prefix)
+ AbstractChip* m_chip_ptr;
+ MachineType m_machType;
+ string m_description;
+ bool m_is_instruction_cache;
+
+ // The first index is the # of cache lines.
+ // The second index is the the amount associativity.
+ Vector<Vector<ENTRY> > m_cache;
+
+ AbstractReplacementPolicy *m_replacementPolicy_ptr;
+
+ int m_cache_num_sets;
+ int m_cache_num_set_bits;
+ int m_cache_assoc;
+};
+
+// Output operator declaration
+//ostream& operator<<(ostream& out, const CacheMemory<ENTRY>& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+template<class ENTRY>
+inline
+ostream& operator<<(ostream& out, const CacheMemory<ENTRY>& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+// ****************************************************************
+
+template<class ENTRY>
+inline
+CacheMemory<ENTRY>::CacheMemory(AbstractChip* chip_ptr, int numSetBits,
+ int cacheAssoc, const MachineType machType, const string& description)
+
+{
+ //cout << "CacheMemory constructor numThreads = " << numThreads << endl;
+ m_chip_ptr = chip_ptr;
+ m_machType = machType;
+ m_description = MachineType_to_string(m_machType)+"_"+description;
+ m_cache_num_set_bits = numSetBits;
+ m_cache_num_sets = 1 << numSetBits;
+ m_cache_assoc = cacheAssoc;
+ m_is_instruction_cache = false;
+
+ m_cache.setSize(m_cache_num_sets);
+ if(strcmp(g_REPLACEMENT_POLICY, "PSEDUO_LRU") == 0)
+ m_replacementPolicy_ptr = new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc);
+ else if(strcmp(g_REPLACEMENT_POLICY, "LRU") == 0)
+ m_replacementPolicy_ptr = new LRUPolicy(m_cache_num_sets, m_cache_assoc);
+ else
+ assert(false);
+ for (int i = 0; i < m_cache_num_sets; i++) {
+ m_cache[i].setSize(m_cache_assoc);
+ for (int j = 0; j < m_cache_assoc; j++) {
+ m_cache[i][j].m_Address.setAddress(0);
+ m_cache[i][j].m_Permission = AccessPermission_NotPresent;
+ }
+ }
+
+
+ // cout << "Before setting trans address list size" << endl;
+ //create a trans address for each SMT thread
+// m_trans_address_list.setSize(numThreads);
+// for(int i=0; i < numThreads; ++i){
+// cout << "Setting list size for list " << i << endl;
+// m_trans_address_list[i].setSize(30);
+// }
+ //cout << "CacheMemory constructor finished" << endl;
+}
+
+template<class ENTRY>
+inline
+CacheMemory<ENTRY>::~CacheMemory()
+{
+ if(m_replacementPolicy_ptr != NULL)
+ delete m_replacementPolicy_ptr;
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::printConfig(ostream& out)
+{
+ out << "Cache config: " << m_description << endl;
+ out << " cache_associativity: " << m_cache_assoc << endl;
+ out << " num_cache_sets_bits: " << m_cache_num_set_bits << endl;
+ const int cache_num_sets = 1 << m_cache_num_set_bits;
+ out << " num_cache_sets: " << cache_num_sets << endl;
+ out << " cache_set_size_bytes: " << cache_num_sets * RubyConfig::dataBlockBytes() << endl;
+ out << " cache_set_size_Kbytes: "
+ << double(cache_num_sets * RubyConfig::dataBlockBytes()) / (1<<10) << endl;
+ out << " cache_set_size_Mbytes: "
+ << double(cache_num_sets * RubyConfig::dataBlockBytes()) / (1<<20) << endl;
+ out << " cache_size_bytes: "
+ << cache_num_sets * RubyConfig::dataBlockBytes() * m_cache_assoc << endl;
+ out << " cache_size_Kbytes: "
+ << double(cache_num_sets * RubyConfig::dataBlockBytes() * m_cache_assoc) / (1<<10) << endl;
+ out << " cache_size_Mbytes: "
+ << double(cache_num_sets * RubyConfig::dataBlockBytes() * m_cache_assoc) / (1<<20) << endl;
+}
+
+// PRIVATE METHODS
+
+// convert a Address to its location in the cache
+template<class ENTRY>
+inline
+Index CacheMemory<ENTRY>::addressToCacheSet(const Address& address) const
+{
+ assert(address == line_address(address));
+ Index temp = -1;
+ switch (m_machType) {
+ case MACHINETYPE_L1CACHE_ENUM:
+ temp = map_address_to_L1CacheSet(address, m_cache_num_set_bits);
+ break;
+ case MACHINETYPE_L2CACHE_ENUM:
+ temp = map_address_to_L2CacheSet(address, m_cache_num_set_bits);
+ break;
+ default:
+ ERROR_MSG("Don't recognize m_machType");
+ }
+ assert(temp < m_cache_num_sets);
+ assert(temp >= 0);
+ return temp;
+}
+
+// Given a cache index: returns the index of the tag in a set.
+// returns -1 if the tag is not found.
+template<class ENTRY>
+inline
+int CacheMemory<ENTRY>::findTagInSet(Index cacheSet, const Address& tag) const
+{
+ assert(tag == line_address(tag));
+ // search the set for the tags
+ for (int i=0; i < m_cache_assoc; i++) {
+ if ((m_cache[cacheSet][i].m_Address == tag) &&
+ (m_cache[cacheSet][i].m_Permission != AccessPermission_NotPresent)) {
+ return i;
+ }
+ }
+ return -1; // Not found
+}
+
+// Given a cache index: returns the index of the tag in a set.
+// returns -1 if the tag is not found.
+template<class ENTRY>
+inline
+int CacheMemory<ENTRY>::findTagInSetIgnorePermissions(Index cacheSet, const Address& tag) const
+{
+ assert(tag == line_address(tag));
+ // search the set for the tags
+ for (int i=0; i < m_cache_assoc; i++) {
+ if (m_cache[cacheSet][i].m_Address == tag)
+ return i;
+ }
+ return -1; // Not found
+}
+
+// PUBLIC METHODS
+template<class ENTRY>
+inline
+bool CacheMemory<ENTRY>::tryCacheAccess(const Address& address,
+ CacheRequestType type,
+ DataBlock*& data_ptr)
+{
+ assert(address == line_address(address));
+ DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ if(loc != -1){ // Do we even have a tag match?
+ ENTRY& entry = m_cache[cacheSet][loc];
+ m_replacementPolicy_ptr->touch(cacheSet, loc, g_eventQueue_ptr->getTime());
+ data_ptr = &(entry.getDataBlk());
+
+ if(entry.m_Permission == AccessPermission_Read_Write) {
+ return true;
+ }
+ if ((entry.m_Permission == AccessPermission_Read_Only) &&
+ (type == CacheRequestType_LD || type == CacheRequestType_IFETCH)) {
+ return true;
+ }
+ // The line must not be accessible
+ }
+ data_ptr = NULL;
+ return false;
+}
+
+template<class ENTRY>
+inline
+bool CacheMemory<ENTRY>::testCacheAccess(const Address& address,
+ CacheRequestType type,
+ DataBlock*& data_ptr)
+{
+ assert(address == line_address(address));
+ DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ if(loc != -1){ // Do we even have a tag match?
+ ENTRY& entry = m_cache[cacheSet][loc];
+ m_replacementPolicy_ptr->touch(cacheSet, loc, g_eventQueue_ptr->getTime());
+ data_ptr = &(entry.getDataBlk());
+
+ return (m_cache[cacheSet][loc].m_Permission != AccessPermission_NotPresent);
+ }
+ data_ptr = NULL;
+ return false;
+}
+
+// tests to see if an address is present in the cache
+template<class ENTRY>
+inline
+bool CacheMemory<ENTRY>::isTagPresent(const Address& address) const
+{
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int location = findTagInSet(cacheSet, address);
+
+ if (location == -1) {
+ // We didn't find the tag
+ DEBUG_EXPR(CACHE_COMP, LowPrio, address);
+ DEBUG_MSG(CACHE_COMP, LowPrio, "No tag match");
+ return false;
+ }
+ DEBUG_EXPR(CACHE_COMP, LowPrio, address);
+ DEBUG_MSG(CACHE_COMP, LowPrio, "found");
+ return true;
+}
+
+// Returns true if there is:
+// a) a tag match on this address or there is
+// b) an unused line in the same cache "way"
+template<class ENTRY>
+inline
+bool CacheMemory<ENTRY>::cacheAvail(const Address& address) const
+{
+ assert(address == line_address(address));
+
+ Index cacheSet = addressToCacheSet(address);
+
+ for (int i=0; i < m_cache_assoc; i++) {
+ if (m_cache[cacheSet][i].m_Address == address) {
+ // Already in the cache
+ return true;
+ }
+
+ if (m_cache[cacheSet][i].m_Permission == AccessPermission_NotPresent) {
+ // We found an empty entry
+ return true;
+ }
+ }
+ return false;
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::allocate(const Address& address)
+{
+ assert(address == line_address(address));
+ assert(!isTagPresent(address));
+ assert(cacheAvail(address));
+ DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+
+ // Find the first open slot
+ Index cacheSet = addressToCacheSet(address);
+ for (int i=0; i < m_cache_assoc; i++) {
+ if (m_cache[cacheSet][i].m_Permission == AccessPermission_NotPresent) {
+ m_cache[cacheSet][i] = ENTRY(); // Init entry
+ m_cache[cacheSet][i].m_Address = address;
+ m_cache[cacheSet][i].m_Permission = AccessPermission_Invalid;
+
+ m_replacementPolicy_ptr->touch(cacheSet, i, g_eventQueue_ptr->getTime());
+
+ return;
+ }
+ }
+ ERROR_MSG("Allocate didn't find an available entry");
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::deallocate(const Address& address)
+{
+ assert(address == line_address(address));
+ assert(isTagPresent(address));
+ DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+ lookup(address).m_Permission = AccessPermission_NotPresent;
+}
+
+// Returns with the physical address of the conflicting cache line
+template<class ENTRY>
+inline
+Address CacheMemory<ENTRY>::cacheProbe(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(!cacheAvail(address));
+
+ Index cacheSet = addressToCacheSet(address);
+ return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)].m_Address;
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+inline
+ENTRY& CacheMemory<ENTRY>::lookup(const Address& address)
+{
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ assert(loc != -1);
+ return m_cache[cacheSet][loc];
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+inline
+const ENTRY& CacheMemory<ENTRY>::lookup(const Address& address) const
+{
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ assert(loc != -1);
+ return m_cache[cacheSet][loc];
+}
+
+template<class ENTRY>
+inline
+AccessPermission CacheMemory<ENTRY>::getPermission(const Address& address) const
+{
+ assert(address == line_address(address));
+ return lookup(address).m_Permission;
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::changePermission(const Address& address, AccessPermission new_perm)
+{
+ assert(address == line_address(address));
+ lookup(address).m_Permission = new_perm;
+ assert(getPermission(address) == new_perm);
+}
+
+// Sets the most recently used bit for a cache block
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::setMRU(const Address& address)
+{
+ Index cacheSet;
+
+ cacheSet = addressToCacheSet(address);
+ m_replacementPolicy_ptr->touch(cacheSet,
+ findTagInSet(cacheSet, address),
+ g_eventQueue_ptr->getTime());
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::recordCacheContents(CacheRecorder& tr) const
+{
+ for (int i = 0; i < m_cache_num_sets; i++) {
+ for (int j = 0; j < m_cache_assoc; j++) {
+ AccessPermission perm = m_cache[i][j].m_Permission;
+ CacheRequestType request_type = CacheRequestType_NULL;
+ if (perm == AccessPermission_Read_Only) {
+ if (m_is_instruction_cache) {
+ request_type = CacheRequestType_IFETCH;
+ } else {
+ request_type = CacheRequestType_LD;
+ }
+ } else if (perm == AccessPermission_Read_Write) {
+ request_type = CacheRequestType_ST;
+ }
+
+ if (request_type != CacheRequestType_NULL) {
+ tr.addRecord(m_chip_ptr->getID(), m_cache[i][j].m_Address,
+ Address(0), request_type, m_replacementPolicy_ptr->getLastAccess(i, j));
+ }
+ }
+ }
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::print(ostream& out) const
+{
+ out << "Cache dump: " << m_description << endl;
+ for (int i = 0; i < m_cache_num_sets; i++) {
+ for (int j = 0; j < m_cache_assoc; j++) {
+ out << " Index: " << i
+ << " way: " << j
+ << " entry: " << m_cache[i][j] << endl;
+ }
+ }
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::printData(ostream& out) const
+{
+ out << "printData() not supported" << endl;
+}
+
+template<class ENTRY>
+void CacheMemory<ENTRY>::getMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes ){
+ ENTRY entry = lookup(line_address(addr));
+ unsigned int startByte = addr.getAddress() - line_address(addr).getAddress();
+ for(unsigned int i=0; i<size_in_bytes; ++i){
+ value[i] = entry.m_DataBlk.getByte(i + startByte);
+ }
+}
+
+template<class ENTRY>
+void CacheMemory<ENTRY>::setMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes ){
+ ENTRY& entry = lookup(line_address(addr));
+ unsigned int startByte = addr.getAddress() - line_address(addr).getAddress();
+ assert(size_in_bytes > 0);
+ for(unsigned int i=0; i<size_in_bytes; ++i){
+ entry.m_DataBlk.setByte(i + startByte, value[i]);
+ }
+
+ entry = lookup(line_address(addr));
+}
+
+#endif //CACHEMEMORY_H
+
diff --git a/src/mem/ruby/system/DirectoryMemory.cc b/src/mem/ruby/system/DirectoryMemory.cc
new file mode 100644
index 000000000..a1ec38cd2
--- /dev/null
+++ b/src/mem/ruby/system/DirectoryMemory.cc
@@ -0,0 +1,175 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * DirectoryMemory.C
+ *
+ * Description: See DirectoryMemory.h
+ *
+ * $Id$
+ *
+ */
+
+#include "System.hh"
+#include "Driver.hh"
+#include "DirectoryMemory.hh"
+#include "RubySlicc_Util.hh"
+#include "RubyConfig.hh"
+#include "Chip.hh"
+#include "interface.hh"
+
+DirectoryMemory::DirectoryMemory(Chip* chip_ptr, int version)
+{
+ m_chip_ptr = chip_ptr;
+ m_version = version;
+ // THIS DOESN'T SEEM TO WORK -- MRM
+ // m_size = RubyConfig::memoryModuleBlocks()/RubyConfig::numberOfDirectory();
+ m_size = RubyConfig::memoryModuleBlocks();
+ assert(m_size > 0);
+ // allocates an array of directory entry pointers & sets them to NULL
+ m_entries = new Directory_Entry*[m_size];
+ if (m_entries == NULL) {
+ ERROR_MSG("Directory Memory: unable to allocate memory.");
+ }
+
+ for (int i=0; i < m_size; i++) {
+ m_entries[i] = NULL;
+ }
+}
+
+DirectoryMemory::~DirectoryMemory()
+{
+ // free up all the directory entries
+ for (int i=0; i < m_size; i++) {
+ if (m_entries[i] != NULL) {
+ delete m_entries[i];
+ m_entries[i] = NULL;
+ }
+ }
+
+ // free up the array of directory entries
+ delete[] m_entries;
+}
+
+// Static method
+void DirectoryMemory::printConfig(ostream& out)
+{
+ out << "Memory config:" << endl;
+ out << " memory_bits: " << RubyConfig::memorySizeBits() << endl;
+ out << " memory_size_bytes: " << RubyConfig::memorySizeBytes() << endl;
+ out << " memory_size_Kbytes: " << double(RubyConfig::memorySizeBytes()) / (1<<10) << endl;
+ out << " memory_size_Mbytes: " << double(RubyConfig::memorySizeBytes()) / (1<<20) << endl;
+ out << " memory_size_Gbytes: " << double(RubyConfig::memorySizeBytes()) / (1<<30) << endl;
+
+ out << " module_bits: " << RubyConfig::memoryModuleBits() << endl;
+ out << " module_size_lines: " << RubyConfig::memoryModuleBlocks() << endl;
+ out << " module_size_bytes: " << RubyConfig::memoryModuleBlocks() * RubyConfig::dataBlockBytes() << endl;
+ out << " module_size_Kbytes: " << double(RubyConfig::memoryModuleBlocks() * RubyConfig::dataBlockBytes()) / (1<<10) << endl;
+ out << " module_size_Mbytes: " << double(RubyConfig::memoryModuleBlocks() * RubyConfig::dataBlockBytes()) / (1<<20) << endl;
+}
+
+// Public method
+bool DirectoryMemory::isPresent(PhysAddress address)
+{
+ return (map_Address_to_DirectoryNode(address) == m_chip_ptr->getID()*RubyConfig::numberOfDirectoryPerChip()+m_version);
+}
+
+Directory_Entry& DirectoryMemory::lookup(PhysAddress address)
+{
+ assert(isPresent(address));
+ Index index = address.memoryModuleIndex();
+
+ if (index < 0 || index > m_size) {
+ WARN_EXPR(m_chip_ptr->getID());
+ WARN_EXPR(address.getAddress());
+ WARN_EXPR(index);
+ WARN_EXPR(m_size);
+ ERROR_MSG("Directory Memory Assertion: accessing memory out of range.");
+ }
+ Directory_Entry* entry = m_entries[index];
+
+ // allocate the directory entry on demand.
+ if (entry == NULL) {
+ entry = new Directory_Entry;
+
+ // entry->getProcOwner() = m_chip_ptr->getID(); // FIXME - This should not be hard coded
+ // entry->getDirOwner() = true; // FIXME - This should not be hard-coded
+
+ // load the data from SimICS when first initalizing
+ if (g_SIMICS) {
+ if (DATA_BLOCK) {
+ physical_address_t physAddr = address.getAddress();
+
+ for(int j=0; j < RubyConfig::dataBlockBytes(); j++) {
+ int8 data_byte = (int8) SIMICS_read_physical_memory( m_chip_ptr->getID(),
+ physAddr + j, 1 );
+ //printf("SimICS, byte %d: %lld\n", j, data_byte );
+ entry->getDataBlk().setByte(j, data_byte);
+ }
+ DEBUG_EXPR(NODE_COMP, MedPrio,entry->getDataBlk());
+ }
+ }
+
+ // store entry to the table
+ m_entries[index] = entry;
+ }
+
+ return (*entry);
+}
+
+/*
+void DirectoryMemory::invalidateBlock(PhysAddress address)
+{
+ assert(isPresent(address));
+
+ Index index = address.memoryModuleIndex();
+
+ if (index < 0 || index > m_size) {
+ ERROR_MSG("Directory Memory Assertion: accessing memory out of range.");
+ }
+
+ if(m_entries[index] != NULL){
+ delete m_entries[index];
+ m_entries[index] = NULL;
+ }
+
+}
+*/
+
+void DirectoryMemory::print(ostream& out) const
+{
+ out << "Directory dump: " << endl;
+ for (int i=0; i < m_size; i++) {
+ if (m_entries[i] != NULL) {
+ out << i << ": ";
+ out << *m_entries[i] << endl;
+ }
+ }
+}
+
diff --git a/src/mem/ruby/system/DirectoryMemory.hh b/src/mem/ruby/system/DirectoryMemory.hh
new file mode 100644
index 000000000..7c0831af6
--- /dev/null
+++ b/src/mem/ruby/system/DirectoryMemory.hh
@@ -0,0 +1,91 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * DirectoryMemory.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef DIRECTORYMEMORY_H
+#define DIRECTORYMEMORY_H
+
+#include "Global.hh"
+#include "Address.hh"
+#include "Directory_Entry.hh"
+
+class Chip;
+
+class DirectoryMemory {
+public:
+ // Constructors
+ DirectoryMemory(Chip* chip_ptr, int version);
+
+ // Destructor
+ ~DirectoryMemory();
+
+ // Public Methods
+ static void printConfig(ostream& out);
+ bool isPresent(PhysAddress address);
+ Directory_Entry& lookup(PhysAddress address);
+
+ void print(ostream& out) const;
+
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ DirectoryMemory(const DirectoryMemory& obj);
+ DirectoryMemory& operator=(const DirectoryMemory& obj);
+
+ // Data Members (m_ prefix)
+ Directory_Entry **m_entries;
+ Chip* m_chip_ptr;
+ int m_size; // # of memory module blocks for this directory
+ int m_version;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const DirectoryMemory& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const DirectoryMemory& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //DIRECTORYMEMORY_H
diff --git a/src/mem/ruby/system/GenericBloomFilter.cc b/src/mem/ruby/system/GenericBloomFilter.cc
new file mode 100644
index 000000000..38dd7f437
--- /dev/null
+++ b/src/mem/ruby/system/GenericBloomFilter.cc
@@ -0,0 +1,154 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * GenericBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+
+#include "GenericBloomFilter.hh"
+#include "LSB_CountingBloomFilter.hh"
+#include "NonCountingBloomFilter.hh"
+#include "BulkBloomFilter.hh"
+#include "BlockBloomFilter.hh"
+#include "MultiGrainBloomFilter.hh"
+#include "MultiBitSelBloomFilter.hh"
+#include "H3BloomFilter.hh"
+
+GenericBloomFilter::GenericBloomFilter(AbstractChip* chip_ptr, string config)
+{
+ m_chip_ptr = chip_ptr;
+
+
+ string tail(config);
+ string head = string_split(tail,'_');
+
+ if (head == "LSB_Counting" ) {
+ m_filter = new LSB_CountingBloomFilter(tail);
+ }
+ else if(head == "NonCounting" ) {
+ m_filter = new NonCountingBloomFilter(tail);
+ }
+ else if(head == "Bulk" ) {
+ m_filter = new BulkBloomFilter(tail);
+ }
+ else if(head == "Block") {
+ m_filter = new BlockBloomFilter(tail);
+ }
+ else if(head == "Multigrain"){
+ m_filter = new MultiGrainBloomFilter(tail);
+ }
+ else if(head == "MultiBitSel"){
+ m_filter = new MultiBitSelBloomFilter(tail);
+ }
+ else if(head == "H3"){
+ m_filter = new H3BloomFilter(tail);
+ }
+ else {
+ assert(0);
+ }
+}
+
+GenericBloomFilter::~GenericBloomFilter()
+{
+ delete m_filter;
+}
+
+void GenericBloomFilter::clear()
+{
+ m_filter->clear();
+}
+
+void GenericBloomFilter::increment(const Address& addr)
+{
+ m_filter->increment(addr);
+}
+
+void GenericBloomFilter::decrement(const Address& addr)
+{
+ m_filter->decrement(addr);
+}
+
+void GenericBloomFilter::merge(GenericBloomFilter * other_filter)
+{
+ m_filter->merge(other_filter->getFilter());
+}
+
+void GenericBloomFilter::set(const Address& addr)
+{
+ m_filter->set(addr);
+}
+
+void GenericBloomFilter::unset(const Address& addr)
+{
+ m_filter->unset(addr);
+}
+
+bool GenericBloomFilter::isSet(const Address& addr)
+{
+ return m_filter->isSet(addr);
+}
+
+int GenericBloomFilter::getCount(const Address& addr)
+{
+ return m_filter->getCount(addr);
+}
+
+int GenericBloomFilter::getTotalCount()
+{
+ return m_filter->getTotalCount();
+}
+
+int GenericBloomFilter::getIndex(const Address& addr)
+{
+ return m_filter->getIndex(addr);
+}
+
+int GenericBloomFilter::readBit(const int index) {
+ return m_filter->readBit(index);
+}
+
+void GenericBloomFilter::writeBit(const int index, const int value) {
+ m_filter->writeBit(index, value);
+}
+
+void GenericBloomFilter::print(ostream& out) const
+{
+ return m_filter->print(out);
+}
+
+
diff --git a/src/mem/ruby/system/GenericBloomFilter.hh b/src/mem/ruby/system/GenericBloomFilter.hh
new file mode 100644
index 000000000..91cfdfd6e
--- /dev/null
+++ b/src/mem/ruby/system/GenericBloomFilter.hh
@@ -0,0 +1,96 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * GenericBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef GENERIC_BLOOM_FILTER_H
+#define GENERIC_BLOOM_FILTER_H
+
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "AbstractBloomFilter.hh"
+
+class GenericBloomFilter {
+public:
+
+ // Constructors
+ GenericBloomFilter(AbstractChip* chip_ptr, string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(GenericBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+ AbstractBloomFilter * getFilter(){
+ return m_filter;
+ }
+
+ bool isSet(const Address& addr);
+
+ int getCount(const Address& addr);
+
+ int getTotalCount();
+
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ void print(ostream& out) const;
+ void printConfig(ostream& out) { out << "GenericBloomFilter" << endl; }
+
+ // Destructor
+ ~GenericBloomFilter();
+
+
+private:
+
+ AbstractChip* m_chip_ptr;
+ AbstractBloomFilter* m_filter;
+};
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const GenericBloomFilter& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+#endif
diff --git a/src/mem/ruby/system/H3BloomFilter.cc b/src/mem/ruby/system/H3BloomFilter.cc
new file mode 100644
index 000000000..43a47e873
--- /dev/null
+++ b/src/mem/ruby/system/H3BloomFilter.cc
@@ -0,0 +1,210 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NonCountingBloomFilter.C
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "H3BloomFilter.hh"
+#include "Map.hh"
+#include "Address.hh"
+
+H3BloomFilter::H3BloomFilter(string str)
+{
+ //TODO: change this ugly init code...
+ primes_list[0] = 9323;
+ primes_list[1] = 11279;
+ primes_list[2] = 10247;
+ primes_list[3] = 30637;
+ primes_list[4] = 25717;
+ primes_list[5] = 43711;
+
+ mults_list[0] = 255;
+ mults_list[1] = 29;
+ mults_list[2] = 51;
+ mults_list[3] = 3;
+ mults_list[4] = 77;
+ mults_list[5] = 43;
+
+ adds_list[0] = 841;
+ adds_list[1] = 627;
+ adds_list[2] = 1555;
+ adds_list[3] = 241;
+ adds_list[4] = 7777;
+ adds_list[5] = 65931;
+
+
+
+ string tail(str);
+ string head = string_split(tail, '_');
+
+ // head contains filter size, tail contains bit offset from block number
+ m_filter_size = atoi(head.c_str());
+
+ head = string_split(tail, '_');
+ m_num_hashes = atoi(head.c_str());
+
+ if(tail == "Regular") {
+ isParallel = false;
+ } else if (tail == "Parallel") {
+ isParallel = true;
+ } else {
+ cout << "ERROR: Incorrect config string for MultiHash Bloom! :" << str << endl;
+ assert(0);
+ }
+
+ m_filter_size_bits = log_int(m_filter_size);
+
+ m_par_filter_size = m_filter_size/m_num_hashes;
+ m_par_filter_size_bits = log_int(m_par_filter_size);
+
+ m_filter.setSize(m_filter_size);
+ clear();
+}
+
+H3BloomFilter::~H3BloomFilter(){
+}
+
+void H3BloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void H3BloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void H3BloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void H3BloomFilter::merge(AbstractBloomFilter * other_filter){
+ // assumes both filters are the same size!
+ H3BloomFilter * temp = (H3BloomFilter*) other_filter;
+ for(int i=0; i < m_filter_size; ++i){
+ m_filter[i] |= (*temp)[i];
+ }
+
+}
+
+void H3BloomFilter::set(const Address& addr)
+{
+ for (int i = 0; i < m_num_hashes; i++) {
+ int idx = get_index(addr, i);
+ m_filter[idx] = 1;
+
+ //Profile hash value distribution
+ //g_system_ptr->getProfiler()->getXactProfiler()->profileHashValue(i, idx); // gem5:Arka decomissiong of log_tm
+ }
+}
+
+void H3BloomFilter::unset(const Address& addr)
+{
+ cout << "ERROR: Unset should never be called in a Bloom filter";
+ assert(0);
+}
+
+bool H3BloomFilter::isSet(const Address& addr)
+{
+ bool res = true;
+
+ for (int i=0; i < m_num_hashes; i++) {
+ int idx = get_index(addr, i);
+ res = res && m_filter[idx];
+ }
+ return res;
+}
+
+
+int H3BloomFilter::getCount(const Address& addr)
+{
+ return isSet(addr)? 1: 0;
+}
+
+int H3BloomFilter::getIndex(const Address& addr)
+{
+ return 0;
+}
+
+int H3BloomFilter::readBit(const int index) {
+ return 0;
+}
+
+void H3BloomFilter::writeBit(const int index, const int value) {
+
+}
+
+int H3BloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ count += m_filter[i];
+ }
+ return count;
+}
+
+void H3BloomFilter::print(ostream& out) const
+{
+}
+
+int H3BloomFilter::get_index(const Address& addr, int i)
+{
+ uint64 x = addr.getLineAddress();
+ //uint64 y = (x*mults_list[i] + adds_list[i]) % primes_list[i];
+ int y = hash_H3(x,i);
+
+ if(isParallel) {
+ return (y % m_par_filter_size) + i*m_par_filter_size;
+ } else {
+ return y % m_filter_size;
+ }
+}
+
+int H3BloomFilter::hash_H3(uint64 value, int index) {
+ uint64 mask = 1;
+ uint64 val = value;
+ int result = 0;
+
+ for(int i = 0; i < 64; i++) {
+ if(val&mask) result ^= H3[i][index];
+ val = val >> 1;
+ }
+ return result;
+ }
+
diff --git a/src/mem/ruby/system/H3BloomFilter.hh b/src/mem/ruby/system/H3BloomFilter.hh
new file mode 100644
index 000000000..9da6cdef5
--- /dev/null
+++ b/src/mem/ruby/system/H3BloomFilter.hh
@@ -0,0 +1,1259 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * H3BloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef H3_BLOOM_FILTER_H
+#define H3_BLOOM_FILTER_H
+
+#include "Map.hh"
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "System.hh"
+#include "Profiler.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "AbstractBloomFilter.hh"
+
+static int H3[64][16] = {
+{
+33268410,
+395488709,
+311024285,
+456111753,
+181495008,
+119997521,
+220697869,
+433891432,
+755927921,
+515226970,
+719448198,
+349842774,
+269183649,
+463275672,
+429800228,
+521598937
+},
+{
+628677802,
+820947732,
+809435975,
+1024657192,
+887631270,
+412050215,
+391365090,
+324227279,
+318338329,
+1038393087,
+489807930,
+387366128,
+518096428,
+324184340,
+429376066,
+447109279
+},
+{
+599747653,
+404960623,
+103933604,
+946416030,
+656460913,
+925957005,
+1047665689,
+163552053,
+88359290,
+841315415,
+899833584,
+1067336680,
+348549994,
+464045876,
+270252128,
+829897652
+},
+{
+215495230,
+966696438,
+82589012,
+750102795,
+909780866,
+920285789,
+769759214,
+331966823,
+939936006,
+439950703,
+883794828,
+1009277508,
+61634610,
+741444350,
+98689608,
+524144422
+},
+{
+93868534,
+196958667,
+774076619,
+327921978,
+122538783,
+879785030,
+690748527,
+3498564,
+83163077,
+1027963025,
+582088444,
+466152216,
+312424878,
+550064499,
+646612667,
+561099434
+},
+{
+1002047931,
+395477707,
+821317480,
+890482112,
+697094476,
+263813044,
+840275189,
+469664185,
+795625845,
+211504898,
+99204277,
+1004491153,
+725930417,
+1064479221,
+893834767,
+839719181
+},
+{
+278507126,
+985111995,
+706462983,
+1042178726,
+123281719,
+963778122,
+500881056,
+726291104,
+134293026,
+568379664,
+317050609,
+533470307,
+1022365922,
+197645211,
+315125721,
+634827678
+},
+{
+219227366,
+553960647,
+870169525,
+322232839,
+508322497,
+648672696,
+249405795,
+883596102,
+476433133,
+541372919,
+646647793,
+1042679515,
+43242483,
+600187508,
+499866821,
+135713210
+},
+{
+52837162,
+96966684,
+401840460,
+1071661176,
+733560065,
+150035417,
+341319946,
+811582750,
+636173904,
+519054065,
+196321433,
+1028294565,
+882204070,
+522965093,
+48884074,
+117810166
+},
+{
+650860353,
+789534698,
+328813544,
+473250022,
+143128306,
+173196006,
+846958825,
+174632187,
+683273509,
+405459497,
+787235556,
+773873501,
+240110267,
+426797736,
+92043842,
+711789240
+},
+{
+586637493,
+5059646,
+398035664,
+6686087,
+498300175,
+948278148,
+681227731,
+592751744,
+572019677,
+558044722,
+589368271,
+695745538,
+1073416749,
+529192035,
+550984939,
+1070620580
+},
+{
+102904663,
+647598516,
+758863940,
+313426443,
+76504114,
+1050747783,
+708436441,
+563815069,
+224107668,
+875925186,
+167675944,
+926209739,
+279737287,
+1040288182,
+768184312,
+371708956
+},
+{
+683968868,
+1027427757,
+180781926,
+742898864,
+624078545,
+645659833,
+577225838,
+987150210,
+723410002,
+224013421,
+993286634,
+33188488,
+247264323,
+888018697,
+38048664,
+189037096
+},
+{
+475612146,
+426739285,
+873726278,
+529192871,
+607715202,
+388486246,
+987001312,
+474493980,
+259747270,
+417465536,
+217062395,
+392858482,
+563810075,
+137852805,
+1051814153,
+72895217
+},
+{
+71277086,
+785496675,
+500608842,
+89633426,
+274085706,
+248467935,
+838061983,
+48106147,
+773662506,
+49545328,
+9071573,
+100739031,
+602018002,
+904371654,
+534132064,
+332211304
+},
+{
+401893602,
+735125342,
+775548339,
+210224843,
+256081130,
+482894412,
+350801633,
+1035713633,
+429458128,
+327281409,
+739927752,
+359327650,
+886942880,
+847691759,
+752417993,
+359445596
+},
+{
+267472014,
+1050659620,
+1068232362,
+1049684368,
+17130239,
+690524969,
+793224378,
+14455158,
+423092885,
+873853424,
+430535778,
+7867877,
+309731959,
+370260786,
+862353083,
+403906850
+},
+{
+993077283,
+218812656,
+389234651,
+393202875,
+413116501,
+263300295,
+470013158,
+592730725,
+441847172,
+732392823,
+407574059,
+875664777,
+271347307,
+792954404,
+554774761,
+1022424300
+},
+{
+675919719,
+637054073,
+784720745,
+149714381,
+813144874,
+502525801,
+635436670,
+1003196587,
+160786091,
+947509775,
+969788637,
+26854073,
+257964369,
+63898568,
+539767732,
+772364518
+},
+{
+943076868,
+1021732472,
+697575075,
+15843624,
+617573396,
+534113303,
+122953324,
+964873912,
+942995378,
+87830944,
+1012914818,
+455484661,
+592160054,
+599844284,
+810394353,
+836812568
+},
+{
+688992674,
+279465370,
+731582262,
+687883235,
+438178468,
+80493001,
+342701501,
+663561405,
+23360106,
+531315007,
+508931618,
+36294623,
+231216223,
+840438413,
+255665680,
+663205938
+},
+{
+857265418,
+552630887,
+8173237,
+792122963,
+210140052,
+823124938,
+667709953,
+751538219,
+991957789,
+462064153,
+19070176,
+726604748,
+714567823,
+151147895,
+1012619677,
+697114353
+},
+{
+467105652,
+683256174,
+702387467,
+28730434,
+549942998,
+48712701,
+960519696,
+1008345587,
+679267717,
+370932249,
+880419471,
+352141567,
+331640403,
+598772468,
+95160685,
+812053015
+},
+{
+1053491323,
+430526562,
+1014938507,
+109685515,
+765949103,
+177288303,
+1034642653,
+485421658,
+71850281,
+981034542,
+61620389,
+601367920,
+504420930,
+220599168,
+583051998,
+158735752
+},
+{
+103033901,
+522494916,
+658494760,
+959206022,
+931348143,
+834510661,
+21542994,
+189699884,
+679327018,
+171983002,
+96774168,
+456133168,
+543103352,
+923945936,
+970074188,
+643658485
+},
+{
+566379913,
+805798263,
+840662512,
+820206124,
+796507494,
+223712542,
+118811519,
+662246595,
+809326534,
+416471323,
+748027186,
+161169753,
+739149488,
+276330378,
+924837051,
+964873733
+},
+{
+585882743,
+135502711,
+3386031,
+625631285,
+1068193307,
+270342640,
+432739484,
+556606453,
+826419155,
+1038540977,
+158000202,
+69109538,
+207087256,
+298111218,
+678046259,
+184611498
+},
+{
+305310710,
+46237988,
+855726974,
+735975153,
+930663798,
+425764232,
+104362407,
+391371443,
+867622101,
+71645091,
+61824734,
+661902640,
+293738633,
+309416189,
+281710675,
+879317360
+},
+{
+398146324,
+398293087,
+689145387,
+1038451703,
+521637478,
+516134620,
+314658937,
+830334981,
+583400300,
+340083705,
+68029852,
+675389876,
+994635780,
+788959180,
+406967042,
+74403607
+},
+{
+69463153,
+744427484,
+191639960,
+590927798,
+969916795,
+546846769,
+728756758,
+889355646,
+520855076,
+136068426,
+776132410,
+189663815,
+252051082,
+533662856,
+362198652,
+1026161384
+},
+{
+584984279,
+1004834381,
+568439705,
+834508761,
+21812513,
+670870173,
+1052043300,
+341868768,
+473755574,
+124339439,
+36193947,
+437997647,
+137419489,
+58705193,
+337793711,
+340738909
+},
+{
+898051466,
+512792906,
+234874060,
+655358775,
+683745319,
+671676404,
+428888546,
+639928192,
+672697722,
+176477579,
+747020991,
+758211282,
+443045009,
+205395173,
+1016944273,
+5584717
+},
+{
+156038300,
+138620174,
+588466825,
+1061494056,
+1013672100,
+1064257198,
+881417791,
+839470738,
+83519030,
+100875683,
+237486447,
+461483733,
+681527127,
+777996147,
+574635362,
+815974538
+},
+{
+184168473,
+519509808,
+62531892,
+51821173,
+43787358,
+385711644,
+141325169,
+36069511,
+584183031,
+571372909,
+671503175,
+226486781,
+194932686,
+1045460970,
+753718579,
+331442433
+},
+{
+73065106,
+1015327221,
+630916840,
+1058053470,
+306737587,
+296343219,
+907194989,
+920172546,
+224516225,
+818625553,
+551143849,
+634570650,
+432966225,
+756438259,
+939564853,
+767999933
+},
+{
+884775648,
+394862257,
+446787794,
+219833788,
+727195727,
+728122304,
+249888353,
+732947974,
+289908868,
+448282580,
+618161877,
+898939716,
+739554163,
+860631799,
+1058977530,
+86916736
+},
+{
+143850006,
+352708694,
+200194048,
+979764914,
+629404175,
+546279766,
+72106714,
+860980514,
+313190585,
+897143111,
+308425797,
+953791785,
+349924906,
+221457005,
+950588925,
+908254505
+},
+{
+950032043,
+829868728,
+68623614,
+714624605,
+69760597,
+297275854,
+355894016,
+985369737,
+882852618,
+864071289,
+958512902,
+950910111,
+991368991,
+829645051,
+434698210,
+771350575
+},
+{
+552695074,
+319195551,
+80297396,
+496413831,
+944046531,
+621525571,
+617653363,
+416729825,
+441842808,
+9847464,
+99420657,
+1033914550,
+812966458,
+937053011,
+673390195,
+934577365
+},
+{
+1034695843,
+190969665,
+332900185,
+51897434,
+523888639,
+883512843,
+146908572,
+506785674,
+565814307,
+692255649,
+314052926,
+826386588,
+430691325,
+866927620,
+413880214,
+936474339
+},
+{
+129380164,
+741739952,
+1013703462,
+494392795,
+957214600,
+1010879043,
+931790677,
+94551922,
+988065869,
+120637871,
+882506912,
+395075379,
+210570485,
+812422692,
+910383687,
+817722285
+},
+{
+51850866,
+283408630,
+1053047202,
+858940389,
+818507731,
+477082181,
+353546901,
+993324368,
+407093779,
+231608253,
+1067319867,
+73159811,
+429792535,
+971320614,
+565699344,
+718823399
+},
+{
+408185106,
+491493570,
+596050720,
+310776444,
+703628192,
+454438809,
+523988035,
+728512200,
+686012353,
+976339656,
+72816924,
+116926720,
+165866591,
+452043792,
+866943072,
+968545481
+},
+{
+443231195,
+905907843,
+1061421320,
+746360489,
+1043120338,
+1069659155,
+463359031,
+688303227,
+186550710,
+155347339,
+1044842421,
+1005904570,
+69332909,
+706951903,
+422513657,
+882038450
+},
+{
+430990623,
+946501980,
+742556791,
+278398643,
+183759217,
+659404315,
+279754382,
+1069347846,
+843746517,
+222777670,
+990835599,
+548741637,
+129220580,
+1392170,
+1032654091,
+894058935
+},
+{
+452042227,
+751640705,
+259481376,
+765824585,
+145991469,
+1013683228,
+1055491225,
+536379588,
+392593350,
+913368594,
+1029429776,
+226857786,
+31505342,
+1054416381,
+32341741,
+687106649
+},
+{
+404750944,
+811417027,
+869530820,
+773491060,
+810901282,
+979340397,
+1036910290,
+461764404,
+834235095,
+765695033,
+604692390,
+452158120,
+928988098,
+442719218,
+1024059719,
+167723114
+},
+{
+974245177,
+1046377300,
+1003424287,
+787349855,
+336314155,
+875074696,
+1018462718,
+890313003,
+367376809,
+86355556,
+1020618772,
+890710345,
+444741481,
+373230261,
+767064947,
+840920177
+},
+{
+719581124,
+431808156,
+138301690,
+668222575,
+497413494,
+740492013,
+485033226,
+125301442,
+831265111,
+879071459,
+341690480,
+152975256,
+850330086,
+717444507,
+694225877,
+785340566
+},
+{
+1032766252,
+140959364,
+737474726,
+1062767538,
+364464647,
+331414723,
+356152634,
+642832379,
+158733632,
+374691640,
+285504811,
+345349905,
+876599880,
+476392727,
+479589210,
+606376325
+},
+{
+174997730,
+778177086,
+319164313,
+163614456,
+10331364,
+599358958,
+8331663,
+237538058,
+159173957,
+174533880,
+65588684,
+878222844,
+424467599,
+901803515,
+187504218,
+776690353
+},
+{
+803856182,
+965850321,
+694948067,
+218315960,
+358416571,
+683713254,
+178069303,
+428076035,
+686176454,
+579553217,
+357306738,
+315018080,
+886852373,
+568563910,
+896839725,
+257416821
+},
+{
+401650013,
+183289141,
+497957228,
+879734476,
+265024455,
+825794561,
+889237440,
+323359863,
+100258491,
+991414783,
+313986632,
+85847250,
+362520248,
+276103512,
+1041630342,
+525981595
+},
+{
+487732740,
+46201705,
+990837834,
+62744493,
+1067364756,
+58015363,
+690846283,
+680262648,
+997278956,
+469357861,
+432164624,
+996763915,
+211907847,
+167824295,
+144928194,
+454839915
+},
+{
+41404232,
+514493300,
+259546924,
+578217256,
+972345130,
+123299213,
+346040332,
+1014668104,
+520910639,
+579955198,
+36627803,
+179072921,
+547684341,
+598950511,
+269497394,
+854352266
+},
+{
+603906768,
+100863318,
+708837659,
+204175569,
+375560904,
+908375384,
+28314106,
+6303733,
+175283124,
+749851198,
+308667367,
+415293931,
+225365403,
+1032188331,
+977112710,
+819705229
+},
+{
+399767123,
+697985692,
+356790426,
+643687584,
+298624218,
+185095167,
+381653926,
+876816342,
+296720023,
+2205879,
+235816616,
+521850105,
+622753786,
+1021421218,
+726349744,
+256504902
+},
+{
+851245024,
+1022500222,
+511909628,
+313809625,
+99776025,
+39710175,
+798739932,
+741832408,
+140631966,
+898295927,
+607660421,
+870669312,
+1051422478,
+789055529,
+669113756,
+681943450
+},
+{
+853872755,
+491465269,
+503341472,
+98019440,
+258267420,
+335602837,
+320687824,
+1053324395,
+24932389,
+955011453,
+934255131,
+435625663,
+501568768,
+238967025,
+549987406,
+248619780
+},
+{
+411151284,
+576471205,
+757985419,
+544137226,
+968135693,
+877548443,
+194586894,
+74882373,
+248353663,
+21207540,
+273789651,
+853653916,
+861267970,
+533253322,
+3739570,
+661358586
+},
+{
+271430986,
+71390029,
+257643671,
+949329860,
+348156406,
+251939238,
+445808698,
+48269799,
+907589462,
+105677619,
+635451508,
+20805932,
+464874661,
+7542147,
+243619464,
+288304568
+},
+{
+368215982,
+530288964,
+770090421,
+660961164,
+614935537,
+630760399,
+931299233,
+794519275,
+779918979,
+401746493,
+561237006,
+1027202224,
+258968003,
+339508073,
+1050610516,
+1064307013
+},
+{
+1039172162,
+448331205,
+928997884,
+49813151,
+198712120,
+992335354,
+671024050,
+879525220,
+745915336,
+1038822580,
+138669665,
+917958819,
+681422342,
+792868818,
+924762727,
+816386174
+},
+{
+515190336,
+313808618,
+441296783,
+1022120897,
+792325033,
+354387581,
+59273006,
+280075434,
+411357221,
+665274694,
+4054464,
+1059046246,
+394261773,
+848616745,
+15446017,
+517723271
+}};
+
+
+class H3BloomFilter : public AbstractBloomFilter {
+public:
+
+ ~H3BloomFilter();
+ H3BloomFilter(string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ void print(ostream& out) const;
+
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ int operator[](const int index) const{
+ return this->m_filter[index];
+ }
+
+private:
+
+ int get_index(const Address& addr, int hashNumber);
+
+ int hash_H3(uint64 value, int index);
+
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_num_hashes;
+ int m_filter_size_bits;
+
+ int m_par_filter_size;
+ int m_par_filter_size_bits;
+
+ int m_count_bits;
+ int m_count;
+
+
+
+ int primes_list[6];// = {9323,11279,10247,30637,25717,43711};
+ int mults_list[6]; //= {255,29,51,3,77,43};
+ int adds_list[6]; //= {841,627,1555,241,7777,65391};
+
+ bool isParallel;
+
+};
+
+
+#endif
diff --git a/src/mem/ruby/system/LRUPolicy.hh b/src/mem/ruby/system/LRUPolicy.hh
new file mode 100644
index 000000000..ea621bf4b
--- /dev/null
+++ b/src/mem/ruby/system/LRUPolicy.hh
@@ -0,0 +1,65 @@
+
+#ifndef LRUPOLICY_H
+#define LRUPOLICY_H
+
+#include "AbstractReplacementPolicy.hh"
+
+/* Simple true LRU replacement policy */
+
+class LRUPolicy : public AbstractReplacementPolicy {
+ public:
+
+ LRUPolicy(Index num_sets, Index assoc);
+ ~LRUPolicy();
+
+ void touch(Index set, Index way, Time time);
+ Index getVictim(Index set) const;
+};
+
+inline
+LRUPolicy::LRUPolicy(Index num_sets, Index assoc)
+ : AbstractReplacementPolicy(num_sets, assoc)
+{
+}
+
+inline
+LRUPolicy::~LRUPolicy()
+{
+}
+
+inline
+void LRUPolicy::touch(Index set, Index index, Time time){
+ assert(index >= 0 && index < m_assoc);
+ assert(set >= 0 && set < m_num_sets);
+
+ m_last_ref_ptr[set][index] = time;
+}
+
+inline
+Index LRUPolicy::getVictim(Index set) const {
+ // assert(m_assoc != 0);
+ Time time, smallest_time;
+ Index smallest_index;
+
+ smallest_index = 0;
+ smallest_time = m_last_ref_ptr[set][0];
+
+ for (unsigned int i=0; i < m_assoc; i++) {
+ time = m_last_ref_ptr[set][i];
+ //assert(m_cache[cacheSet][i].m_Permission != AccessPermission_NotPresent);
+
+ if (time < smallest_time){
+ smallest_index = i;
+ smallest_time = time;
+ }
+ }
+
+ // DEBUG_EXPR(CACHE_COMP, MedPrio, cacheSet);
+ // DEBUG_EXPR(CACHE_COMP, MedPrio, smallest_index);
+ // DEBUG_EXPR(CACHE_COMP, MedPrio, m_cache[cacheSet][smallest_index]);
+ // DEBUG_EXPR(CACHE_COMP, MedPrio, *this);
+
+ return smallest_index;
+}
+
+#endif // PSEUDOLRUBITS_H
diff --git a/src/mem/ruby/system/LSB_CountingBloomFilter.cc b/src/mem/ruby/system/LSB_CountingBloomFilter.cc
new file mode 100644
index 000000000..ddfa97f5f
--- /dev/null
+++ b/src/mem/ruby/system/LSB_CountingBloomFilter.cc
@@ -0,0 +1,141 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * LSB_CountingBloomFilter.C
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "LSB_CountingBloomFilter.hh"
+#include "Map.hh"
+#include "Address.hh"
+
+LSB_CountingBloomFilter::LSB_CountingBloomFilter(string str)
+{
+ string tail(str);
+ string head = string_split(tail, ':');
+
+ m_filter_size = atoi(head.c_str());
+ m_filter_size_bits = log_int(m_filter_size);
+
+ m_count = atoi(tail.c_str());
+ m_count_bits = log_int(m_count);
+
+ m_filter.setSize(m_filter_size);
+ clear();
+}
+
+LSB_CountingBloomFilter::~LSB_CountingBloomFilter(){
+}
+
+void LSB_CountingBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void LSB_CountingBloomFilter::increment(const Address& addr)
+{
+ int i = get_index(addr);
+ if (m_filter[i] < m_count);
+ m_filter[i] += 1;
+}
+
+
+void LSB_CountingBloomFilter::decrement(const Address& addr)
+{
+ int i = get_index(addr);
+ if (m_filter[i] > 0)
+ m_filter[i] -= 1;
+}
+
+void LSB_CountingBloomFilter::merge(AbstractBloomFilter * other_filter)
+{
+ // TODO
+}
+
+void LSB_CountingBloomFilter::set(const Address& addr)
+{
+ // TODO
+}
+
+void LSB_CountingBloomFilter::unset(const Address& addr)
+{
+ // TODO
+}
+
+bool LSB_CountingBloomFilter::isSet(const Address& addr)
+{
+ // TODO
+}
+
+
+int LSB_CountingBloomFilter::getCount(const Address& addr)
+{
+ return m_filter[get_index(addr)];
+}
+
+int LSB_CountingBloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ count += m_filter[i];
+ }
+ return count;
+}
+
+int LSB_CountingBloomFilter::getIndex(const Address& addr)
+{
+ return get_index(addr);
+}
+
+void LSB_CountingBloomFilter::print(ostream& out) const
+{
+}
+
+int LSB_CountingBloomFilter::readBit(const int index) {
+ return 0;
+ // TODO
+}
+
+void LSB_CountingBloomFilter::writeBit(const int index, const int value) {
+ // TODO
+}
+
+int LSB_CountingBloomFilter::get_index(const Address& addr)
+{
+ return addr.bitSelect( RubyConfig::dataBlockBits(), RubyConfig::dataBlockBits() + m_filter_size_bits - 1);
+}
+
+
diff --git a/src/mem/ruby/system/LSB_CountingBloomFilter.hh b/src/mem/ruby/system/LSB_CountingBloomFilter.hh
new file mode 100644
index 000000000..5b0cdc87c
--- /dev/null
+++ b/src/mem/ruby/system/LSB_CountingBloomFilter.hh
@@ -0,0 +1,83 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * LSB_CountingBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef LSB_COUNTING_BLOOM_FILTER_H
+#define LSB_COUNTING_BLOOM_FILTER_H
+
+#include "Map.hh"
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "AbstractBloomFilter.hh"
+
+class LSB_CountingBloomFilter : public AbstractBloomFilter {
+public:
+
+ ~LSB_CountingBloomFilter();
+ LSB_CountingBloomFilter(string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ void print(ostream& out) const;
+
+private:
+
+ int get_index(const Address& addr);
+
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_filter_size_bits;
+
+ int m_count_bits;
+ int m_count;
+};
+
+
+#endif
diff --git a/src/mem/ruby/system/MachineID.hh b/src/mem/ruby/system/MachineID.hh
new file mode 100644
index 000000000..2f294dc54
--- /dev/null
+++ b/src/mem/ruby/system/MachineID.hh
@@ -0,0 +1,89 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NodeID.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef MACHINEID_H
+#define MACHINEID_H
+
+#include "Global.hh"
+#include "util.hh"
+#include "MachineType.hh"
+
+struct MachineID {
+ MachineType type;
+ int num; // range: 0 ... number of this machine's components in the system - 1
+};
+
+extern inline
+string MachineIDToString (MachineID machine) {
+ return MachineType_to_string(machine.type)+"_"+int_to_string(machine.num);
+}
+
+extern inline
+bool operator==(const MachineID & obj1, const MachineID & obj2)
+{
+ return (obj1.type == obj2.type && obj1.num == obj2.num);
+}
+
+extern inline
+bool operator!=(const MachineID & obj1, const MachineID & obj2)
+{
+ return (obj1.type != obj2.type || obj1.num != obj2.num);
+}
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const MachineID& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const MachineID& obj)
+{
+ if ((obj.type < MachineType_NUM) && (obj.type >= MachineType_FIRST)) {
+ out << MachineType_to_string(obj.type);
+ } else {
+ out << "NULL";
+ }
+ out << "-";
+ out << obj.num;
+ out << flush;
+ return out;
+}
+
+
+#endif //MACHINEID_H
diff --git a/src/mem/ruby/system/MemoryControl.cc b/src/mem/ruby/system/MemoryControl.cc
new file mode 100644
index 000000000..e9f8a5ca8
--- /dev/null
+++ b/src/mem/ruby/system/MemoryControl.cc
@@ -0,0 +1,632 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MemoryControl.C
+ *
+ * Description: This module simulates a basic DDR-style memory controller
+ * (and can easily be extended to do FB-DIMM as well).
+ *
+ * This module models a single channel, connected to any number of
+ * DIMMs with any number of ranks of DRAMs each. If you want multiple
+ * address/data channels, you need to instantiate multiple copies of
+ * this module.
+ *
+ * Each memory request is placed in a queue associated with a specific
+ * memory bank. This queue is of finite size; if the queue is full
+ * the request will back up in an (infinite) common queue and will
+ * effectively throttle the whole system. This sort of behavior is
+ * intended to be closer to real system behavior than if we had an
+ * infinite queue on each bank. If you want the latter, just make
+ * the bank queues unreasonably large.
+ *
+ * The head item on a bank queue is issued when all of the
+ * following are true:
+ * the bank is available
+ * the address path to the DIMM is available
+ * the data path to or from the DIMM is available
+ *
+ * Note that we are not concerned about fixed offsets in time. The bank
+ * will not be used at the same moment as the address path, but since
+ * there is no queue in the DIMM or the DRAM it will be used at a constant
+ * number of cycles later, so it is treated as if it is used at the same
+ * time.
+ *
+ * We are assuming closed bank policy; that is, we automatically close
+ * each bank after a single read or write. Adding an option for open
+ * bank policy is for future work.
+ *
+ * We are assuming "posted CAS"; that is, we send the READ or WRITE
+ * immediately after the ACTIVATE. This makes scheduling the address
+ * bus trivial; we always schedule a fixed set of cycles. For DDR-400,
+ * this is a set of two cycles; for some configurations such as
+ * DDR-800 the parameter tRRD forces this to be set to three cycles.
+ *
+ * We assume a four-bit-time transfer on the data wires. This is
+ * the minimum burst length for DDR-2. This would correspond
+ * to (for example) a memory where each DIMM is 72 bits wide
+ * and DIMMs are ganged in pairs to deliver 64 bytes at a shot.
+ * This gives us the same occupancy on the data wires as on the
+ * address wires (for the two-address-cycle case).
+ *
+ * The only non-trivial scheduling problem is the data wires.
+ * A write will use the wires earlier in the operation than a read
+ * will; typically one cycle earlier as seen at the DRAM, but earlier
+ * by a worst-case round-trip wire delay when seen at the memory controller.
+ * So, while reads from one rank can be scheduled back-to-back
+ * every two cycles, and writes (to any rank) scheduled every two cycles,
+ * when a read is followed by a write we need to insert a bubble.
+ * Furthermore, consecutive reads from two different ranks may need
+ * to insert a bubble due to skew between when one DRAM stops driving the
+ * wires and when the other one starts. (These bubbles are parameters.)
+ *
+ * This means that when some number of reads and writes are at the
+ * heads of their queues, reads could starve writes, and/or reads
+ * to the same rank could starve out other requests, since the others
+ * would never see the data bus ready.
+ * For this reason, we have implemented an anti-starvation feature.
+ * A group of requests is marked "old", and a counter is incremented
+ * each cycle as long as any request from that batch has not issued.
+ * if the counter reaches twice the bank busy time, we hold off any
+ * newer requests until all of the "old" requests have issued.
+ *
+ * We also model tFAW. This is an obscure DRAM parameter that says
+ * that no more than four activate requests can happen within a window
+ * of a certain size. For most configurations this does not come into play,
+ * or has very little effect, but it could be used to throttle the power
+ * consumption of the DRAM. In this implementation (unlike in a DRAM
+ * data sheet) TFAW is measured in memory bus cycles; i.e. if TFAW = 16
+ * then no more than four activates may happen within any 16 cycle window.
+ * Refreshes are included in the activates.
+ *
+ *
+ * $Id: $
+ *
+ */
+
+#include "Global.hh"
+#include "Map.hh"
+#include "Address.hh"
+#include "Profiler.hh"
+#include "AbstractChip.hh"
+#include "System.hh"
+#include "RubySlicc_ComponentMapping.hh"
+#include "NetworkMessage.hh"
+#include "Network.hh"
+
+#include "Consumer.hh"
+
+#include "MemoryControl.hh"
+
+#include <list>
+
+class Consumer;
+
+// Value to reset watchdog timer to.
+// If we're idle for this many memory control cycles,
+// shut down our clock (our rescheduling of ourselves).
+// Refresh shuts down as well.
+// When we restart, we'll be in a different phase
+// with respect to ruby cycles, so this introduces
+// a slight inaccuracy. But it is necessary or the
+// ruby tester never terminates because the event
+// queue is never empty.
+#define IDLECOUNT_MAX_VALUE 1000
+
+// Output operator definition
+
+ostream& operator<<(ostream& out, const MemoryControl& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+// ****************************************************************
+
+// CONSTRUCTOR
+
+MemoryControl::MemoryControl (AbstractChip* chip_ptr, int version) {
+ m_chip_ptr = chip_ptr;
+ m_version = version;
+ m_msg_counter = 0;
+
+ m_debug = 0;
+ //if (m_version == 0) m_debug = 1;
+
+ m_mem_bus_cycle_multiplier = RubyConfig::memBusCycleMultiplier();
+ m_banks_per_rank = RubyConfig::banksPerRank();
+ m_ranks_per_dimm = RubyConfig::ranksPerDimm();
+ m_dimms_per_channel = RubyConfig::dimmsPerChannel();
+ m_bank_bit_0 = RubyConfig::bankBit0();
+ m_rank_bit_0 = RubyConfig::rankBit0();
+ m_dimm_bit_0 = RubyConfig::dimmBit0();
+ m_bank_queue_size = RubyConfig::bankQueueSize();
+ m_bank_busy_time = RubyConfig::bankBusyTime();
+ m_rank_rank_delay = RubyConfig::rankRankDelay();
+ m_read_write_delay = RubyConfig::readWriteDelay();
+ m_basic_bus_busy_time = RubyConfig::basicBusBusyTime();
+ m_mem_ctl_latency = RubyConfig::memCtlLatency();
+ m_refresh_period = RubyConfig::refreshPeriod();
+ m_memRandomArbitrate = RubyConfig::memRandomArbitrate();
+ m_tFaw = RubyConfig::tFaw();
+ m_memFixedDelay = RubyConfig::memFixedDelay();
+
+ assert(m_tFaw <= 62); // must fit in a uint64 shift register
+
+ m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
+ m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
+ m_refresh_period_system = m_refresh_period / m_total_banks;
+
+ m_bankQueues = new list<MemoryNode> [m_total_banks];
+ assert(m_bankQueues);
+
+ m_bankBusyCounter = new int [m_total_banks];
+ assert(m_bankBusyCounter);
+
+ m_oldRequest = new int [m_total_banks];
+ assert(m_oldRequest);
+
+ for (int i=0; i<m_total_banks; i++) {
+ m_bankBusyCounter[i] = 0;
+ m_oldRequest[i] = 0;
+ }
+
+ m_busBusyCounter_Basic = 0;
+ m_busBusyCounter_Write = 0;
+ m_busBusyCounter_ReadNewRank = 0;
+ m_busBusy_WhichRank = 0;
+
+ m_roundRobin = 0;
+ m_refresh_count = 1;
+ m_need_refresh = 0;
+ m_refresh_bank = 0;
+ m_awakened = 0;
+ m_idleCount = 0;
+ m_ageCounter = 0;
+
+ // Each tfaw shift register keeps a moving bit pattern
+ // which shows when recent activates have occurred.
+ // m_tfaw_count keeps track of how many 1 bits are set
+ // in each shift register. When m_tfaw_count is >= 4,
+ // new activates are not allowed.
+ m_tfaw_shift = new uint64 [m_total_ranks];
+ m_tfaw_count = new int [m_total_ranks];
+ for (int i=0; i<m_total_ranks; i++) {
+ m_tfaw_shift[i] = 0;
+ m_tfaw_count[i] = 0;
+ }
+}
+
+
+// DESTRUCTOR
+
+MemoryControl::~MemoryControl () {
+ delete [] m_bankQueues;
+ delete [] m_bankBusyCounter;
+ delete [] m_oldRequest;
+}
+
+
+// PUBLIC METHODS
+
+// enqueue new request from directory
+
+void MemoryControl::enqueue (const MsgPtr& message, int latency) {
+ Time current_time = g_eventQueue_ptr->getTime();
+ Time arrival_time = current_time + latency;
+ const MemoryMsg* memMess = dynamic_cast<const MemoryMsg*>(message.ref());
+ physical_address_t addr = memMess->getAddress().getAddress();
+ MemoryRequestType type = memMess->getType();
+ bool is_mem_read = (type == MemoryRequestType_MEMORY_READ);
+ MemoryNode thisReq(arrival_time, message, addr, is_mem_read, !is_mem_read);
+ enqueueMemRef(thisReq);
+}
+
+// Alternate entry point used when we already have a MemoryNode structure built.
+
+void MemoryControl::enqueueMemRef (MemoryNode& memRef) {
+ m_msg_counter++;
+ memRef.m_msg_counter = m_msg_counter;
+ Time arrival_time = memRef.m_time;
+ uint64 at = arrival_time;
+ bool is_mem_read = memRef.m_is_mem_read;
+ bool dirtyWB = memRef.m_is_dirty_wb;
+ physical_address_t addr = memRef.m_addr;
+ int bank = getBank(addr);
+ if (m_debug) {
+ printf("New memory request%7d: 0x%08llx %c arrived at %10lld ", m_msg_counter, addr, is_mem_read? 'R':'W', at);
+ printf("bank =%3x\n", bank);
+ }
+ g_system_ptr->getProfiler()->profileMemReq(bank);
+ m_input_queue.push_back(memRef);
+ if (!m_awakened) {
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ m_awakened = 1;
+ }
+}
+
+
+
+// dequeue, peek, and isReady are used to transfer completed requests
+// back to the directory
+
+void MemoryControl::dequeue () {
+ assert(isReady());
+ m_response_queue.pop_front();
+}
+
+
+const Message* MemoryControl::peek () {
+ MemoryNode node = peekNode();
+ Message* msg_ptr = node.m_msgptr.ref();
+ assert(msg_ptr != NULL);
+ return msg_ptr;
+}
+
+
+MemoryNode MemoryControl::peekNode () {
+ assert(isReady());
+ MemoryNode req = m_response_queue.front();
+ uint64 returnTime = req.m_time;
+ if (m_debug) {
+ printf("Old memory request%7d: 0x%08llx %c peeked at %10lld\n",
+ req.m_msg_counter, req.m_addr, req.m_is_mem_read? 'R':'W', returnTime);
+ }
+ return req;
+}
+
+
+bool MemoryControl::isReady () {
+ return ((!m_response_queue.empty()) &&
+ (m_response_queue.front().m_time <= g_eventQueue_ptr->getTime()));
+}
+
+void MemoryControl::setConsumer (Consumer* consumer_ptr) {
+ m_consumer_ptr = consumer_ptr;
+}
+
+void MemoryControl::print (ostream& out) const {
+}
+
+
+void MemoryControl::printConfig (ostream& out) {
+ out << "Memory Control " << m_version << ":" << endl;
+ out << " Ruby cycles per memory cycle: " << m_mem_bus_cycle_multiplier << endl;
+ out << " Basic read latency: " << m_mem_ctl_latency << endl;
+ if (m_memFixedDelay) {
+ out << " Fixed Latency mode: Added cycles = " << m_memFixedDelay << endl;
+ } else {
+ out << " Bank busy time: " << BANK_BUSY_TIME << " memory cycles" << endl;
+ out << " Memory channel busy time: " << m_basic_bus_busy_time << endl;
+ out << " Dead cycles between reads to different ranks: " << m_rank_rank_delay << endl;
+ out << " Dead cycle between a read and a write: " << m_read_write_delay << endl;
+ out << " tFaw (four-activate) window: " << m_tFaw << endl;
+ }
+ out << " Banks per rank: " << m_banks_per_rank << endl;
+ out << " Ranks per DIMM: " << m_ranks_per_dimm << endl;
+ out << " DIMMs per channel: " << m_dimms_per_channel << endl;
+ out << " LSB of bank field in address: " << m_bank_bit_0 << endl;
+ out << " LSB of rank field in address: " << m_rank_bit_0 << endl;
+ out << " LSB of DIMM field in address: " << m_dimm_bit_0 << endl;
+ out << " Max size of each bank queue: " << m_bank_queue_size << endl;
+ out << " Refresh period (within one bank): " << m_refresh_period << endl;
+ out << " Arbitration randomness: " << m_memRandomArbitrate << endl;
+}
+
+
+void MemoryControl::setDebug (int debugFlag) {
+ m_debug = debugFlag;
+}
+
+
+// ****************************************************************
+
+// PRIVATE METHODS
+
+// Queue up a completed request to send back to directory
+
+void MemoryControl::enqueueToDirectory (MemoryNode req, int latency) {
+ Time arrival_time = g_eventQueue_ptr->getTime()
+ + (latency * m_mem_bus_cycle_multiplier);
+ req.m_time = arrival_time;
+ m_response_queue.push_back(req);
+
+ // schedule the wake up
+ g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, arrival_time);
+}
+
+
+
+// getBank returns an integer that is unique for each
+// bank across this memory controller.
+
+int MemoryControl::getBank (physical_address_t addr) {
+ int dimm = (addr >> m_dimm_bit_0) & (m_dimms_per_channel - 1);
+ int rank = (addr >> m_rank_bit_0) & (m_ranks_per_dimm - 1);
+ int bank = (addr >> m_bank_bit_0) & (m_banks_per_rank - 1);
+ return (dimm * m_ranks_per_dimm * m_banks_per_rank)
+ + (rank * m_banks_per_rank)
+ + bank;
+}
+
+// getRank returns an integer that is unique for each rank
+// and independent of individual bank.
+
+int MemoryControl::getRank (int bank) {
+ int rank = (bank / m_banks_per_rank);
+ assert (rank < (m_ranks_per_dimm * m_dimms_per_channel));
+ return rank;
+}
+
+
+// queueReady determines if the head item in a bank queue
+// can be issued this cycle
+
+bool MemoryControl::queueReady (int bank) {
+ if ((m_bankBusyCounter[bank] > 0) && !m_memFixedDelay) {
+ g_system_ptr->getProfiler()->profileMemBankBusy();
+ //if (m_debug) printf(" bank %x busy %d\n", bank, m_bankBusyCounter[bank]);
+ return false;
+ }
+ if (m_memRandomArbitrate >= 2) {
+ if ((random() % 100) < m_memRandomArbitrate) {
+ g_system_ptr->getProfiler()->profileMemRandBusy();
+ return false;
+ }
+ }
+ if (m_memFixedDelay) return true;
+ if ((m_ageCounter > (2 * m_bank_busy_time)) && !m_oldRequest[bank]) {
+ g_system_ptr->getProfiler()->profileMemNotOld();
+ return false;
+ }
+ if (m_busBusyCounter_Basic == m_basic_bus_busy_time) {
+ // Another bank must have issued this same cycle.
+ // For profiling, we count this as an arb wait rather than
+ // a bus wait. This is a little inaccurate since it MIGHT
+ // have also been blocked waiting for a read-write or a
+ // read-read instead, but it's pretty close.
+ g_system_ptr->getProfiler()->profileMemArbWait(1);
+ return false;
+ }
+ if (m_busBusyCounter_Basic > 0) {
+ g_system_ptr->getProfiler()->profileMemBusBusy();
+ return false;
+ }
+ int rank = getRank(bank);
+ if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) {
+ g_system_ptr->getProfiler()->profileMemTfawBusy();
+ return false;
+ }
+ bool write = !m_bankQueues[bank].front().m_is_mem_read;
+ if (write && (m_busBusyCounter_Write > 0)) {
+ g_system_ptr->getProfiler()->profileMemReadWriteBusy();
+ return false;
+ }
+ if (!write && (rank != m_busBusy_WhichRank)
+ && (m_busBusyCounter_ReadNewRank > 0)) {
+ g_system_ptr->getProfiler()->profileMemDataBusBusy();
+ return false;
+ }
+ return true;
+}
+
+
+// issueRefresh checks to see if this bank has a refresh scheduled
+// and, if so, does the refresh and returns true
+
+bool MemoryControl::issueRefresh (int bank) {
+ if (!m_need_refresh || (m_refresh_bank != bank)) return false;
+ if (m_bankBusyCounter[bank] > 0) return false;
+ // Note that m_busBusyCounter will prevent multiple issues during
+ // the same cycle, as well as on different but close cycles:
+ if (m_busBusyCounter_Basic > 0) return false;
+ int rank = getRank(bank);
+ if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) return false;
+
+ // Issue it:
+
+ //if (m_debug) {
+ //uint64 current_time = g_eventQueue_ptr->getTime();
+ //printf(" Refresh bank %3x at %lld\n", bank, current_time);
+ //}
+ g_system_ptr->getProfiler()->profileMemRefresh();
+ m_need_refresh--;
+ m_refresh_bank++;
+ if (m_refresh_bank >= m_total_banks) m_refresh_bank = 0;
+ m_bankBusyCounter[bank] = m_bank_busy_time;
+ m_busBusyCounter_Basic = m_basic_bus_busy_time;
+ m_busBusyCounter_Write = m_basic_bus_busy_time;
+ m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
+ markTfaw(rank);
+ return true;
+}
+
+
+// Mark the activate in the tFaw shift register
+void MemoryControl::markTfaw (int rank) {
+ if (m_tFaw) {
+ m_tfaw_shift[rank] |= (1 << (m_tFaw-1));
+ m_tfaw_count[rank]++;
+ }
+}
+
+
+// Issue a memory request: Activate the bank,
+// reserve the address and data buses, and queue
+// the request for return to the requesting
+// processor after a fixed latency.
+
+void MemoryControl::issueRequest (int bank) {
+ int rank = getRank(bank);
+ MemoryNode req = m_bankQueues[bank].front();
+ m_bankQueues[bank].pop_front();
+ if (m_debug) {
+ uint64 current_time = g_eventQueue_ptr->getTime();
+ printf(" Mem issue request%7d: 0x%08llx %c at %10lld bank =%3x\n",
+ req.m_msg_counter, req.m_addr, req.m_is_mem_read? 'R':'W', current_time, bank);
+ }
+ if (req.m_msgptr.ref() != NULL) { // don't enqueue L3 writebacks
+ enqueueToDirectory(req, m_mem_ctl_latency + m_memFixedDelay);
+ }
+ m_oldRequest[bank] = 0;
+ markTfaw(rank);
+ m_bankBusyCounter[bank] = m_bank_busy_time;
+ m_busBusy_WhichRank = rank;
+ if (req.m_is_mem_read) {
+ g_system_ptr->getProfiler()->profileMemRead();
+ m_busBusyCounter_Basic = m_basic_bus_busy_time;
+ m_busBusyCounter_Write = m_basic_bus_busy_time + m_read_write_delay;
+ m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time + m_rank_rank_delay;
+ } else {
+ g_system_ptr->getProfiler()->profileMemWrite();
+ m_busBusyCounter_Basic = m_basic_bus_busy_time;
+ m_busBusyCounter_Write = m_basic_bus_busy_time;
+ m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
+ }
+}
+
+
+// executeCycle: This function is called once per memory clock cycle
+// to simulate all the periodic hardware.
+
+void MemoryControl::executeCycle () {
+ // Keep track of time by counting down the busy counters:
+ for (int bank=0; bank < m_total_banks; bank++) {
+ if (m_bankBusyCounter[bank] > 0) m_bankBusyCounter[bank]--;
+ }
+ if (m_busBusyCounter_Write > 0) m_busBusyCounter_Write--;
+ if (m_busBusyCounter_ReadNewRank > 0) m_busBusyCounter_ReadNewRank--;
+ if (m_busBusyCounter_Basic > 0) m_busBusyCounter_Basic--;
+
+ // Count down the tFAW shift registers:
+ for (int rank=0; rank < m_total_ranks; rank++) {
+ if (m_tfaw_shift[rank] & 1) m_tfaw_count[rank]--;
+ m_tfaw_shift[rank] >>= 1;
+ }
+
+ // After time period expires, latch an indication that we need a refresh.
+ // Disable refresh if in memFixedDelay mode.
+ if (!m_memFixedDelay) m_refresh_count--;
+ if (m_refresh_count == 0) {
+ m_refresh_count = m_refresh_period_system;
+ assert (m_need_refresh < 10); // Are we overrunning our ability to refresh?
+ m_need_refresh++;
+ }
+
+ // If this batch of requests is all done, make a new batch:
+ m_ageCounter++;
+ int anyOld = 0;
+ for (int bank=0; bank < m_total_banks; bank++) {
+ anyOld |= m_oldRequest[bank];
+ }
+ if (!anyOld) {
+ for (int bank=0; bank < m_total_banks; bank++) {
+ if (!m_bankQueues[bank].empty()) m_oldRequest[bank] = 1;
+ }
+ m_ageCounter = 0;
+ }
+
+ // If randomness desired, re-randomize round-robin position each cycle
+ if (m_memRandomArbitrate) {
+ m_roundRobin = random() % m_total_banks;
+ }
+
+
+ // For each channel, scan round-robin, and pick an old, ready
+ // request and issue it. Treat a refresh request as if it
+ // were at the head of its bank queue. After we issue something,
+ // keep scanning the queues just to gather statistics about
+ // how many are waiting. If in memFixedDelay mode, we can issue
+ // more than one request per cycle.
+
+ int queueHeads = 0;
+ int banksIssued = 0;
+ for (int i = 0; i < m_total_banks; i++) {
+ m_roundRobin++;
+ if (m_roundRobin >= m_total_banks) m_roundRobin = 0;
+ issueRefresh(m_roundRobin);
+ int qs = m_bankQueues[m_roundRobin].size();
+ if (qs > 1) {
+ g_system_ptr->getProfiler()->profileMemBankQ(qs-1);
+ }
+ if (qs > 0) {
+ m_idleCount = IDLECOUNT_MAX_VALUE; // we're not idle if anything is queued
+ queueHeads++;
+ if (queueReady(m_roundRobin)) {
+ issueRequest(m_roundRobin);
+ banksIssued++;
+ if (m_memFixedDelay) {
+ g_system_ptr->getProfiler()->profileMemWaitCycles(m_memFixedDelay);
+ }
+ }
+ }
+ }
+
+ // memWaitCycles is a redundant catch-all for the specific counters in queueReady
+ g_system_ptr->getProfiler()->profileMemWaitCycles(queueHeads - banksIssued);
+
+ // Check input queue and move anything to bank queues if not full.
+ // Since this is done here at the end of the cycle, there will always
+ // be at least one cycle of latency in the bank queue.
+ // We deliberately move at most one request per cycle (to simulate
+ // typical hardware). Note that if one bank queue fills up, other
+ // requests can get stuck behind it here.
+
+ if (!m_input_queue.empty()) {
+ m_idleCount = IDLECOUNT_MAX_VALUE; // we're not idle if anything is pending
+ MemoryNode req = m_input_queue.front();
+ int bank = getBank(req.m_addr);
+ if (m_bankQueues[bank].size() < m_bank_queue_size) {
+ m_input_queue.pop_front();
+ m_bankQueues[bank].push_back(req);
+ }
+ g_system_ptr->getProfiler()->profileMemInputQ(m_input_queue.size());
+ }
+}
+
+
+// wakeup: This function is called once per memory controller clock cycle.
+
+void MemoryControl::wakeup () {
+
+ // execute everything
+ executeCycle();
+
+ m_idleCount--;
+ if (m_idleCount <= 0) {
+ m_awakened = 0;
+ } else {
+ // Reschedule ourselves so that we run every memory cycle:
+ g_eventQueue_ptr->scheduleEvent(this, m_mem_bus_cycle_multiplier);
+ }
+}
+
+
diff --git a/src/mem/ruby/system/MemoryControl.hh b/src/mem/ruby/system/MemoryControl.hh
new file mode 100644
index 000000000..ee71b8f51
--- /dev/null
+++ b/src/mem/ruby/system/MemoryControl.hh
@@ -0,0 +1,176 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MemoryControl.h
+ *
+ * Description: See MemoryControl.C
+ *
+ * $Id: $
+ *
+ */
+
+#ifndef MEMORY_CONTROL_H
+#define MEMORY_CONTROL_H
+
+#include "Global.hh"
+#include "Map.hh"
+#include "Address.hh"
+#include "Profiler.hh"
+#include "AbstractChip.hh"
+#include "System.hh"
+#include "Message.hh"
+#include "util.hh"
+#include "MemoryNode.hh"
+// Note that "MemoryMsg" is in the "generated" directory:
+#include "MemoryMsg.hh"
+#include "Consumer.hh"
+#include "AbstractMemOrCache.hh"
+
+#include <list>
+
+// This constant is part of the definition of tFAW; see
+// the comments in header to MemoryControl.C
+#define ACTIVATE_PER_TFAW 4
+
+//////////////////////////////////////////////////////////////////////////////
+
+class Consumer;
+
+class MemoryControl : public Consumer, public AbstractMemOrCache {
+public:
+
+ // Constructors
+ MemoryControl (AbstractChip* chip_ptr, int version);
+
+ // Destructor
+ ~MemoryControl ();
+
+ // Public Methods
+
+ void wakeup() ;
+
+ void setConsumer (Consumer* consumer_ptr);
+ Consumer* getConsumer () { return m_consumer_ptr; };
+ void setDescription (const string& name) { m_name = name; };
+ string getDescription () { return m_name; };
+
+ // Called from the directory:
+ void enqueue (const MsgPtr& message, int latency );
+ void enqueueMemRef (MemoryNode& memRef);
+ void dequeue ();
+ const Message* peek ();
+ MemoryNode peekNode ();
+ bool isReady();
+ bool areNSlotsAvailable (int n) { return true; }; // infinite queue length
+
+ //// Called from L3 cache:
+ //void writeBack(physical_address_t addr);
+
+ void printConfig (ostream& out);
+ void print (ostream& out) const;
+ void setDebug (int debugFlag);
+
+private:
+
+ void enqueueToDirectory (MemoryNode req, int latency);
+ int getBank (physical_address_t addr);
+ int getRank (int bank);
+ bool queueReady (int bank);
+ void issueRequest (int bank);
+ bool issueRefresh (int bank);
+ void markTfaw (int rank);
+ void executeCycle ();
+
+ // Private copy constructor and assignment operator
+ MemoryControl (const MemoryControl& obj);
+ MemoryControl& operator=(const MemoryControl& obj);
+
+ // data members
+ AbstractChip* m_chip_ptr;
+ Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
+ string m_name;
+ int m_version;
+ int m_msg_counter;
+ int m_awakened;
+
+ int m_mem_bus_cycle_multiplier;
+ int m_banks_per_rank;
+ int m_ranks_per_dimm;
+ int m_dimms_per_channel;
+ int m_bank_bit_0;
+ int m_rank_bit_0;
+ int m_dimm_bit_0;
+ unsigned int m_bank_queue_size;
+ int m_bank_busy_time;
+ int m_rank_rank_delay;
+ int m_read_write_delay;
+ int m_basic_bus_busy_time;
+ int m_mem_ctl_latency;
+ int m_refresh_period;
+ int m_memRandomArbitrate;
+ int m_tFaw;
+ int m_memFixedDelay;
+
+ int m_total_banks;
+ int m_total_ranks;
+ int m_refresh_period_system;
+
+ // queues where memory requests live
+
+ list<MemoryNode> m_response_queue;
+ list<MemoryNode> m_input_queue;
+ list<MemoryNode>* m_bankQueues;
+
+ // Each entry indicates number of address-bus cycles until bank
+ // is reschedulable:
+ int* m_bankBusyCounter;
+ int* m_oldRequest;
+
+ uint64* m_tfaw_shift;
+ int* m_tfaw_count;
+
+ // Each of these indicates number of address-bus cycles until
+ // we can issue a new request of the corresponding type:
+ int m_busBusyCounter_Write;
+ int m_busBusyCounter_ReadNewRank;
+ int m_busBusyCounter_Basic;
+
+ int m_busBusy_WhichRank; // which rank last granted
+ int m_roundRobin; // which bank queue was last granted
+ int m_refresh_count; // cycles until next refresh
+ int m_need_refresh; // set whenever m_refresh_count goes to zero
+ int m_refresh_bank; // which bank to refresh next
+ int m_ageCounter; // age of old requests; to detect starvation
+ int m_idleCount; // watchdog timer for shutting down
+ int m_debug; // turn on printf's
+};
+
+#endif // MEMORY_CONTROL_H
+
diff --git a/src/mem/ruby/system/MemoryNode.cc b/src/mem/ruby/system/MemoryNode.cc
new file mode 100644
index 000000000..5cba14eff
--- /dev/null
+++ b/src/mem/ruby/system/MemoryNode.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 1999 by Mark Hill and David Wood for the Wisconsin
+ * Multifacet Project. ALL RIGHTS RESERVED.
+ *
+ * ##HEADER##
+ *
+ * This software is furnished under a license and may be used and
+ * copied only in accordance with the terms of such license and the
+ * inclusion of the above copyright notice. This software or any
+ * other copies thereof or any derivative works may not be provided or
+ * otherwise made available to any other persons. Title to and
+ * ownership of the software is retained by Mark Hill and David Wood.
+ * Any use of this software must include the above copyright notice.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS". THE LICENSOR MAKES NO
+ * WARRANTIES ABOUT ITS CORRECTNESS OR PERFORMANCE.
+ * */
+
+/*
+ * MemoryNode.C
+ *
+ * Description: See MemoryNode.h
+ *
+ * $Id: MemoryNode.C 1.3 04/08/04 14:15:38-05:00 beckmann@c2-141.cs.wisc.edu $
+ *
+ */
+
+#include "MemoryNode.hh"
+
+void MemoryNode::print(ostream& out) const
+{
+ out << "[";
+ out << m_time << ", ";
+ out << m_msg_counter << ", ";
+ out << m_msgptr << "; ";
+ out << "]";
+}
diff --git a/src/mem/ruby/system/MemoryNode.hh b/src/mem/ruby/system/MemoryNode.hh
new file mode 100644
index 000000000..1ed3968bb
--- /dev/null
+++ b/src/mem/ruby/system/MemoryNode.hh
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 1999 by Mark Hill and David Wood for the Wisconsin
+ * Multifacet Project. ALL RIGHTS RESERVED.
+ *
+ * ##HEADER##
+ *
+ * This software is furnished under a license and may be used and
+ * copied only in accordance with the terms of such license and the
+ * inclusion of the above copyright notice. This software or any
+ * other copies thereof or any derivative works may not be provided or
+ * otherwise made available to any other persons. Title to and
+ * ownership of the software is retained by Mark Hill and David Wood.
+ * Any use of this software must include the above copyright notice.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS". THE LICENSOR MAKES NO
+ * WARRANTIES ABOUT ITS CORRECTNESS OR PERFORMANCE.
+ * */
+
+/*
+ * EventQueueNode.h
+ *
+ * Description:
+ * This structure records everything known about a single
+ * memory request that is queued in the memory controller.
+ * It is created when the memory request first arrives
+ * at a memory controller and is deleted when the underlying
+ * message is enqueued to be sent back to the directory.
+ *
+ * $Id: MemoryNode.h,v 3.3 2003/12/04 15:01:34 xu Exp $
+ *
+ */
+
+#ifndef MEMORYNODE_H
+#define MEMORYNODE_H
+
+#include "Global.hh"
+#include "Message.hh"
+#include "MemoryRequestType.hh"
+
+class MemoryNode {
+
+public:
+ // Constructors
+
+// old one:
+ MemoryNode(const Time& time, int counter, const MsgPtr& msgptr, const physical_address_t addr, const bool is_mem_read) {
+ m_time = time;
+ m_msg_counter = counter;
+ m_msgptr = msgptr;
+ m_addr = addr;
+ m_is_mem_read = is_mem_read;
+ m_is_dirty_wb = !is_mem_read;
+ }
+
+// new one:
+ MemoryNode(const Time& time, const MsgPtr& msgptr, const physical_address_t addr, const bool is_mem_read, const bool is_dirty_wb) {
+ m_time = time;
+ m_msg_counter = 0;
+ m_msgptr = msgptr;
+ m_addr = addr;
+ m_is_mem_read = is_mem_read;
+ m_is_dirty_wb = is_dirty_wb;
+ }
+
+ // Destructor
+ ~MemoryNode() {};
+
+ // Public Methods
+ void print(ostream& out) const;
+
+ // Data Members (m_ prefix) (all public -- this is really more a struct)
+
+ Time m_time;
+ int m_msg_counter;
+ MsgPtr m_msgptr;
+ physical_address_t m_addr;
+ bool m_is_mem_read;
+ bool m_is_dirty_wb;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const MemoryNode& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const MemoryNode& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //MEMORYNODE_H
diff --git a/src/mem/ruby/system/MultiBitSelBloomFilter.cc b/src/mem/ruby/system/MultiBitSelBloomFilter.cc
new file mode 100644
index 000000000..a42463d1e
--- /dev/null
+++ b/src/mem/ruby/system/MultiBitSelBloomFilter.cc
@@ -0,0 +1,191 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NonCountingBloomFilter.C
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "MultiBitSelBloomFilter.hh"
+#include "Map.hh"
+#include "Address.hh"
+
+MultiBitSelBloomFilter::MultiBitSelBloomFilter(string str)
+{
+
+ string tail(str);
+ string head = string_split(tail, '_');
+
+ // head contains filter size, tail contains bit offset from block number
+ m_filter_size = atoi(head.c_str());
+
+ head = string_split(tail, '_');
+ m_num_hashes = atoi(head.c_str());
+
+ head = string_split(tail, '_');
+ m_skip_bits = atoi(head.c_str());
+
+ if(tail == "Regular") {
+ isParallel = false;
+ } else if (tail == "Parallel") {
+ isParallel = true;
+ } else {
+ cout << "ERROR: Incorrect config string for MultiBitSel Bloom! :" << str << endl;
+ assert(0);
+ }
+
+ m_filter_size_bits = log_int(m_filter_size);
+
+ m_par_filter_size = m_filter_size/m_num_hashes;
+ m_par_filter_size_bits = log_int(m_par_filter_size);
+
+ m_filter.setSize(m_filter_size);
+ clear();
+}
+
+MultiBitSelBloomFilter::~MultiBitSelBloomFilter(){
+}
+
+void MultiBitSelBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void MultiBitSelBloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void MultiBitSelBloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void MultiBitSelBloomFilter::merge(AbstractBloomFilter * other_filter){
+ // assumes both filters are the same size!
+ MultiBitSelBloomFilter * temp = (MultiBitSelBloomFilter*) other_filter;
+ for(int i=0; i < m_filter_size; ++i){
+ m_filter[i] |= (*temp)[i];
+ }
+
+}
+
+void MultiBitSelBloomFilter::set(const Address& addr)
+{
+ for (int i = 0; i < m_num_hashes; i++) {
+ int idx = get_index(addr, i);
+ m_filter[idx] = 1;
+
+ //Profile hash value distribution
+ //g_system_ptr->getProfiler()->getXactProfiler()->profileHashValue(i, idx); //gem5:Arka for decomissioning of log_tm
+ }
+}
+
+void MultiBitSelBloomFilter::unset(const Address& addr)
+{
+ cout << "ERROR: Unset should never be called in a Bloom filter";
+ assert(0);
+}
+
+bool MultiBitSelBloomFilter::isSet(const Address& addr)
+{
+ bool res = true;
+
+ for (int i=0; i < m_num_hashes; i++) {
+ int idx = get_index(addr, i);
+ res = res && m_filter[idx];
+ }
+ return res;
+}
+
+
+int MultiBitSelBloomFilter::getCount(const Address& addr)
+{
+ return isSet(addr)? 1: 0;
+}
+
+int MultiBitSelBloomFilter::getIndex(const Address& addr)
+{
+ return 0;
+}
+
+int MultiBitSelBloomFilter::readBit(const int index) {
+ return 0;
+}
+
+void MultiBitSelBloomFilter::writeBit(const int index, const int value) {
+
+}
+
+int MultiBitSelBloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ count += m_filter[i];
+ }
+ return count;
+}
+
+void MultiBitSelBloomFilter::print(ostream& out) const
+{
+}
+
+int MultiBitSelBloomFilter::get_index(const Address& addr, int i)
+{
+ // m_skip_bits is used to perform BitSelect after skipping some bits. Used to simulate BitSel hashing on larger than cache-line granularities
+ uint64 x = (addr.getLineAddress()) >> m_skip_bits;
+ int y = hash_bitsel(x, i, m_num_hashes, 30, m_filter_size_bits);
+ //36-bit addresses, 6-bit cache lines
+
+ if(isParallel) {
+ return (y % m_par_filter_size) + i*m_par_filter_size;
+ } else {
+ return y % m_filter_size;
+ }
+}
+
+
+int MultiBitSelBloomFilter::hash_bitsel(uint64 value, int index, int jump, int maxBits, int numBits) {
+ uint64 mask = 1;
+ int result = 0;
+ int bit, i;
+
+ for(i = 0; i < numBits; i++) {
+ bit = (index + jump*i) % maxBits;
+ if (value & (mask << bit)) result += mask << i;
+ }
+ return result;
+}
diff --git a/src/mem/ruby/system/MultiBitSelBloomFilter.hh b/src/mem/ruby/system/MultiBitSelBloomFilter.hh
new file mode 100644
index 000000000..eaf4ff943
--- /dev/null
+++ b/src/mem/ruby/system/MultiBitSelBloomFilter.hh
@@ -0,0 +1,98 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MultiBitSelBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef MULTIBITSEL_BLOOM_FILTER_H
+#define MULTIBITSEL_BLOOM_FILTER_H
+
+#include "Map.hh"
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "System.hh"
+#include "Profiler.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "AbstractBloomFilter.hh"
+
+class MultiBitSelBloomFilter : public AbstractBloomFilter {
+public:
+
+ ~MultiBitSelBloomFilter();
+ MultiBitSelBloomFilter(string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ void print(ostream& out) const;
+
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ int operator[](const int index) const{
+ return this->m_filter[index];
+ }
+
+private:
+
+ int get_index(const Address& addr, int hashNumber);
+
+ int hash_bitsel(uint64 value, int index, int jump, int maxBits, int numBits);
+
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_num_hashes;
+ int m_filter_size_bits;
+ int m_skip_bits;
+
+ int m_par_filter_size;
+ int m_par_filter_size_bits;
+
+ int m_count_bits;
+ int m_count;
+
+ bool isParallel;
+
+};
+
+#endif
diff --git a/src/mem/ruby/system/MultiGrainBloomFilter.cc b/src/mem/ruby/system/MultiGrainBloomFilter.cc
new file mode 100644
index 000000000..f1e110b12
--- /dev/null
+++ b/src/mem/ruby/system/MultiGrainBloomFilter.cc
@@ -0,0 +1,172 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MultiGrainBloomFilter.C
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "MultiGrainBloomFilter.hh"
+#include "Map.hh"
+#include "Address.hh"
+
+MultiGrainBloomFilter::MultiGrainBloomFilter(string str)
+{
+ string tail(str);
+
+ // split into the 2 filter sizes
+ string head = string_split(tail, '_');
+
+ // head contains size of 1st bloom filter, tail contains size of 2nd bloom filter
+
+ m_filter_size = atoi(head.c_str());
+ m_filter_size_bits = log_int(m_filter_size);
+
+ m_page_filter_size = atoi(tail.c_str());
+ m_page_filter_size_bits = log_int(m_page_filter_size);
+
+ m_filter.setSize(m_filter_size);
+ m_page_filter.setSize(m_page_filter_size);
+ clear();
+}
+
+MultiGrainBloomFilter::~MultiGrainBloomFilter(){
+}
+
+void MultiGrainBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+ for(int i=0; i < m_page_filter_size; ++i){
+ m_page_filter[i] = 0;
+ }
+}
+
+void MultiGrainBloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void MultiGrainBloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void MultiGrainBloomFilter::merge(AbstractBloomFilter * other_filter)
+{
+ // TODO
+}
+
+void MultiGrainBloomFilter::set(const Address& addr)
+{
+ int i = get_block_index(addr);
+ int j = get_page_index(addr);
+ assert(i < m_filter_size);
+ assert(j < m_page_filter_size);
+ m_filter[i] = 1;
+ m_page_filter[i] = 1;
+
+}
+
+void MultiGrainBloomFilter::unset(const Address& addr)
+{
+ // not used
+}
+
+bool MultiGrainBloomFilter::isSet(const Address& addr)
+{
+ int i = get_block_index(addr);
+ int j = get_page_index(addr);
+ assert(i < m_filter_size);
+ assert(j < m_page_filter_size);
+ // we have to have both indices set
+ return (m_filter[i] && m_page_filter[i]);
+}
+
+int MultiGrainBloomFilter::getCount(const Address& addr)
+{
+ // not used
+ return 0;
+}
+
+int MultiGrainBloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ count += m_filter[i];
+ }
+
+ for(int i=0; i < m_page_filter_size; ++i){
+ count += m_page_filter[i] = 0;
+ }
+
+ return count;
+}
+
+int MultiGrainBloomFilter::getIndex(const Address& addr)
+{
+ return 0;
+ // TODO
+}
+
+int MultiGrainBloomFilter::readBit(const int index) {
+ return 0;
+ // TODO
+}
+
+void MultiGrainBloomFilter::writeBit(const int index, const int value) {
+ // TODO
+}
+
+void MultiGrainBloomFilter::print(ostream& out) const
+{
+}
+
+int MultiGrainBloomFilter::get_block_index(const Address& addr)
+{
+ // grap a chunk of bits after byte offset
+ return addr.bitSelect( RubyConfig::dataBlockBits(), RubyConfig::dataBlockBits() + m_filter_size_bits - 1);
+}
+
+int MultiGrainBloomFilter::get_page_index(const Address & addr)
+{
+ // grap a chunk of bits after first chunk
+ return addr.bitSelect( RubyConfig::dataBlockBits() + m_filter_size_bits - 1,
+ RubyConfig::dataBlockBits() + m_filter_size_bits - 1 + m_page_filter_size_bits - 1);
+}
+
+
+
+
diff --git a/src/mem/ruby/system/MultiGrainBloomFilter.hh b/src/mem/ruby/system/MultiGrainBloomFilter.hh
new file mode 100644
index 000000000..692960853
--- /dev/null
+++ b/src/mem/ruby/system/MultiGrainBloomFilter.hh
@@ -0,0 +1,89 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MultiGrainBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef MULTIGRAIN_BLOOM_FILTER_H
+#define MULTIGRAIN_BLOOM_FILTER_H
+
+#include "Map.hh"
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "AbstractBloomFilter.hh"
+
+class MultiGrainBloomFilter : public AbstractBloomFilter {
+public:
+
+ ~MultiGrainBloomFilter();
+ MultiGrainBloomFilter(string str);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ void print(ostream& out) const;
+
+private:
+
+ int get_block_index(const Address& addr);
+ int get_page_index(const Address & addr);
+
+ // The block filter
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_filter_size_bits;
+ // The page number filter
+ Vector<int> m_page_filter;
+ int m_page_filter_size;
+ int m_page_filter_size_bits;
+
+ int m_count_bits;
+ int m_count;
+};
+
+
+#endif
diff --git a/src/mem/ruby/system/NodeID.hh b/src/mem/ruby/system/NodeID.hh
new file mode 100644
index 000000000..23df8bb46
--- /dev/null
+++ b/src/mem/ruby/system/NodeID.hh
@@ -0,0 +1,50 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NodeID.h
+ *
+ * Description:
+ *
+ * $Id: NodeID.h,v 3.3 2003/12/04 15:01:39 xu Exp $
+ *
+ */
+
+#ifndef NODEID_H
+#define NODEID_H
+
+#include "Global.hh"
+#include "util.hh"
+
+typedef int NodeID;
+
+extern inline
+string NodeIDToString (NodeID node) { return int_to_string(node); }
+
+#endif //NODEID_H
diff --git a/src/mem/ruby/system/NodePersistentTable.cc b/src/mem/ruby/system/NodePersistentTable.cc
new file mode 100644
index 000000000..df2076c1e
--- /dev/null
+++ b/src/mem/ruby/system/NodePersistentTable.cc
@@ -0,0 +1,194 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: NodePersistentTable.C 1.3 04/08/16 14:12:33-05:00 beckmann@c2-143.cs.wisc.edu $
+ *
+ */
+
+#include "NodePersistentTable.hh"
+#include "Set.hh"
+#include "Map.hh"
+#include "Address.hh"
+#include "AbstractChip.hh"
+#include "util.hh"
+
+// randomize so that handoffs are not locality-aware
+// int persistent_randomize[] = {0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15};
+int persistent_randomize[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+
+class NodePersistentTableEntry {
+public:
+ Set m_starving;
+ Set m_marked;
+ Set m_request_to_write;
+};
+
+NodePersistentTable::NodePersistentTable(AbstractChip* chip_ptr, int version)
+{
+ m_chip_ptr = chip_ptr;
+ m_map_ptr = new Map<Address, NodePersistentTableEntry>;
+ m_version = version;
+}
+
+NodePersistentTable::~NodePersistentTable()
+{
+ delete m_map_ptr;
+ m_map_ptr = NULL;
+ m_chip_ptr = NULL;
+}
+
+void NodePersistentTable::persistentRequestLock(const Address& address, NodeID llocker, AccessType type)
+{
+
+ // if (locker == m_chip_ptr->getID() )
+ // cout << "Chip " << m_chip_ptr->getID() << ": " << llocker << " requesting lock for " << address << endl;
+
+ NodeID locker = (NodeID) persistent_randomize[llocker];
+
+ assert(address == line_address(address));
+ if (!m_map_ptr->exist(address)) {
+ // Allocate if not present
+ NodePersistentTableEntry entry;
+ entry.m_starving.add(locker);
+ if (type == AccessType_Write) {
+ entry.m_request_to_write.add(locker);
+ }
+ m_map_ptr->add(address, entry);
+ } else {
+ NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(!(entry.m_starving.isElement(locker))); // Make sure we're not already in the locked set
+
+ entry.m_starving.add(locker);
+ if (type == AccessType_Write) {
+ entry.m_request_to_write.add(locker);
+ }
+ assert(entry.m_marked.isSubset(entry.m_starving));
+ }
+}
+
+void NodePersistentTable::persistentRequestUnlock(const Address& address, NodeID uunlocker)
+{
+ // if (unlocker == m_chip_ptr->getID() )
+ // cout << "Chip " << m_chip_ptr->getID() << ": " << uunlocker << " requesting unlock for " << address << endl;
+
+ NodeID unlocker = (NodeID) persistent_randomize[uunlocker];
+
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(entry.m_starving.isElement(unlocker)); // Make sure we're in the locked set
+ assert(entry.m_marked.isSubset(entry.m_starving));
+ entry.m_starving.remove(unlocker);
+ entry.m_marked.remove(unlocker);
+ entry.m_request_to_write.remove(unlocker);
+ assert(entry.m_marked.isSubset(entry.m_starving));
+
+ // Deallocate if empty
+ if (entry.m_starving.isEmpty()) {
+ assert(entry.m_marked.isEmpty());
+ m_map_ptr->erase(address);
+ }
+}
+
+bool NodePersistentTable::okToIssueStarving(const Address& address) const
+{
+ assert(address == line_address(address));
+ if (!m_map_ptr->exist(address)) {
+ return true; // No entry present
+ } else if (m_map_ptr->lookup(address).m_starving.isElement(m_chip_ptr->getID())) {
+ return false; // We can't issue another lockdown until are previous unlock has occurred
+ } else {
+ return (m_map_ptr->lookup(address).m_marked.isEmpty());
+ }
+}
+
+NodeID NodePersistentTable::findSmallest(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ const NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ // cout << "Node " << m_chip_ptr->getID() << " returning " << persistent_randomize[entry.m_starving.smallestElement()] << " for findSmallest(" << address << ")" << endl;
+ return (NodeID) persistent_randomize[entry.m_starving.smallestElement()];
+}
+
+AccessType NodePersistentTable::typeOfSmallest(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ const NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ if (entry.m_request_to_write.isElement(entry.m_starving.smallestElement())) {
+ return AccessType_Write;
+ } else {
+ return AccessType_Read;
+ }
+}
+
+void NodePersistentTable::markEntries(const Address& address)
+{
+ assert(address == line_address(address));
+ if (m_map_ptr->exist(address)) {
+ NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(entry.m_marked.isEmpty()); // None should be marked
+ entry.m_marked = entry.m_starving; // Mark all the nodes currently in the table
+ }
+}
+
+bool NodePersistentTable::isLocked(const Address& address) const
+{
+ assert(address == line_address(address));
+ // If an entry is present, it must be locked
+ return (m_map_ptr->exist(address));
+}
+
+int NodePersistentTable::countStarvingForAddress(const Address& address) const
+{
+ if (m_map_ptr->exist(address)) {
+ NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ return (entry.m_starving.count());
+ }
+ else {
+ return 0;
+ }
+}
+
+int NodePersistentTable::countReadStarvingForAddress(const Address& address) const
+{
+ int count = 0;
+ if (m_map_ptr->exist(address)) {
+ NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ return (entry.m_starving.count() - entry.m_request_to_write.count());
+ }
+ else {
+ return 0;
+ }
+}
+
+
diff --git a/src/mem/ruby/system/NodePersistentTable.hh b/src/mem/ruby/system/NodePersistentTable.hh
new file mode 100644
index 000000000..ac25552b8
--- /dev/null
+++ b/src/mem/ruby/system/NodePersistentTable.hh
@@ -0,0 +1,99 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: NodePersistentTable.h 1.3 04/08/16 14:12:33-05:00 beckmann@c2-143.cs.wisc.edu $
+ *
+ * Description:
+ *
+ */
+
+#ifndef NodePersistentTable_H
+#define NodePersistentTable_H
+
+#include "Global.hh"
+#include "NodeID.hh"
+#include "AccessType.hh"
+
+class AbstractChip;
+
+template <class KEY_TYPE, class VALUE_TYPE> class Map;
+class Address;
+class NodePersistentTableEntry;
+
+class NodePersistentTable {
+public:
+ // Constructors
+ NodePersistentTable(AbstractChip* chip_ptr, int version);
+
+ // Destructor
+ ~NodePersistentTable();
+
+ // Public Methods
+ void persistentRequestLock(const Address& address, NodeID locker, AccessType type);
+ void persistentRequestUnlock(const Address& address, NodeID unlocker);
+ bool okToIssueStarving(const Address& address) const;
+ NodeID findSmallest(const Address& address) const;
+ AccessType typeOfSmallest(const Address& address) const;
+ void markEntries(const Address& address);
+ bool isLocked(const Address& addr) const;
+ int countStarvingForAddress(const Address& addr) const;
+ int countReadStarvingForAddress(const Address& addr) const;
+
+ static void printConfig(ostream& out) {}
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ NodePersistentTable(const NodePersistentTable& obj);
+ NodePersistentTable& operator=(const NodePersistentTable& obj);
+
+ // Data Members (m_prefix)
+ Map<Address, NodePersistentTableEntry>* m_map_ptr;
+ AbstractChip* m_chip_ptr;
+ int m_version;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const NodePersistentTable& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const NodePersistentTable& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //NodePersistentTable_H
diff --git a/src/mem/ruby/system/NonCountingBloomFilter.cc b/src/mem/ruby/system/NonCountingBloomFilter.cc
new file mode 100644
index 000000000..81e4adbcd
--- /dev/null
+++ b/src/mem/ruby/system/NonCountingBloomFilter.cc
@@ -0,0 +1,145 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NonCountingBloomFilter.C
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "NonCountingBloomFilter.hh"
+#include "Map.hh"
+#include "Address.hh"
+
+NonCountingBloomFilter::NonCountingBloomFilter(string str)
+{
+ string tail(str);
+ string head = string_split(tail, '_');
+
+ // head contains filter size, tail contains bit offset from block number
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ m_filter_size = atoi(head.c_str());
+ m_offset = atoi(tail.c_str());
+ m_filter_size_bits = log_int(m_filter_size);
+
+
+ m_filter.setSize(m_filter_size);
+ clear();
+}
+
+NonCountingBloomFilter::~NonCountingBloomFilter(){
+}
+
+void NonCountingBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void NonCountingBloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void NonCountingBloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void NonCountingBloomFilter::merge(AbstractBloomFilter * other_filter){
+ // assumes both filters are the same size!
+ NonCountingBloomFilter * temp = (NonCountingBloomFilter*) other_filter;
+ for(int i=0; i < m_filter_size; ++i){
+ m_filter[i] |= (*temp)[i];
+ }
+
+}
+
+void NonCountingBloomFilter::set(const Address& addr)
+{
+ int i = get_index(addr);
+ m_filter[i] = 1;
+}
+
+void NonCountingBloomFilter::unset(const Address& addr)
+{
+ int i = get_index(addr);
+ m_filter[i] = 0;
+}
+
+bool NonCountingBloomFilter::isSet(const Address& addr)
+{
+ int i = get_index(addr);
+ return (m_filter[i]);
+}
+
+
+int NonCountingBloomFilter::getCount(const Address& addr)
+{
+ return m_filter[get_index(addr)];
+}
+
+int NonCountingBloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ count += m_filter[i];
+ }
+ return count;
+}
+
+void NonCountingBloomFilter::print(ostream& out) const
+{
+}
+
+int NonCountingBloomFilter::getIndex(const Address& addr)
+{
+ return get_index(addr);
+}
+
+int NonCountingBloomFilter::readBit(const int index) {
+ return m_filter[index];
+}
+
+void NonCountingBloomFilter::writeBit(const int index, const int value) {
+ m_filter[index] = value;
+}
+
+int NonCountingBloomFilter::get_index(const Address& addr)
+{
+ return addr.bitSelect( RubyConfig::dataBlockBits() + m_offset,
+ RubyConfig::dataBlockBits() + m_offset + m_filter_size_bits - 1);
+}
+
+
diff --git a/src/mem/ruby/system/NonCountingBloomFilter.hh b/src/mem/ruby/system/NonCountingBloomFilter.hh
new file mode 100644
index 000000000..f2912c08c
--- /dev/null
+++ b/src/mem/ruby/system/NonCountingBloomFilter.hh
@@ -0,0 +1,89 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NonCountingBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef NONCOUNTING_BLOOM_FILTER_H
+#define NONCOUNTING_BLOOM_FILTER_H
+
+#include "Map.hh"
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "AbstractBloomFilter.hh"
+
+class NonCountingBloomFilter : public AbstractBloomFilter {
+public:
+
+ ~NonCountingBloomFilter();
+ NonCountingBloomFilter(string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ void print(ostream& out) const;
+
+ int operator[](const int index) const{
+ return this->m_filter[index];
+ }
+
+private:
+
+ int get_index(const Address& addr);
+
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_offset;
+ int m_filter_size_bits;
+
+ int m_count_bits;
+ int m_count;
+};
+
+
+#endif
diff --git a/src/mem/ruby/system/PerfectCacheMemory.hh b/src/mem/ruby/system/PerfectCacheMemory.hh
new file mode 100644
index 000000000..590b265c4
--- /dev/null
+++ b/src/mem/ruby/system/PerfectCacheMemory.hh
@@ -0,0 +1,239 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * PerfectCacheMemory.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef PERFECTCACHEMEMORY_H
+#define PERFECTCACHEMEMORY_H
+
+#include "Global.hh"
+#include "Map.hh"
+#include "AccessPermission.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "interface.hh"
+#include "AbstractChip.hh"
+
+template<class ENTRY>
+class PerfectCacheLineState {
+public:
+ PerfectCacheLineState() { m_permission = AccessPermission_NUM; }
+ AccessPermission m_permission;
+ ENTRY m_entry;
+};
+
+template<class ENTRY>
+class PerfectCacheMemory {
+public:
+
+ // Constructors
+ PerfectCacheMemory(AbstractChip* chip_ptr);
+
+ // Destructor
+ //~PerfectCacheMemory();
+
+ // Public Methods
+
+ static void printConfig(ostream& out);
+
+ // perform a cache access and see if we hit or not. Return true on
+ // a hit.
+ bool tryCacheAccess(const CacheMsg& msg, bool& block_stc, ENTRY*& entry);
+
+ // tests to see if an address is present in the cache
+ bool isTagPresent(const Address& address) const;
+
+ // Returns true if there is:
+ // a) a tag match on this address or there is
+ // b) an Invalid line in the same cache "way"
+ bool cacheAvail(const Address& address) const;
+
+ // find an Invalid entry and sets the tag appropriate for the address
+ void allocate(const Address& address);
+
+ void deallocate(const Address& address);
+
+ // Returns with the physical address of the conflicting cache line
+ Address cacheProbe(const Address& newAddress) const;
+
+ // looks an address up in the cache
+ ENTRY& lookup(const Address& address);
+ const ENTRY& lookup(const Address& address) const;
+
+ // Get/Set permission of cache block
+ AccessPermission getPermission(const Address& address) const;
+ void changePermission(const Address& address, AccessPermission new_perm);
+
+ // Print cache contents
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ PerfectCacheMemory(const PerfectCacheMemory& obj);
+ PerfectCacheMemory& operator=(const PerfectCacheMemory& obj);
+
+ // Data Members (m_prefix)
+ Map<Address, PerfectCacheLineState<ENTRY> > m_map;
+ AbstractChip* m_chip_ptr;
+};
+
+// Output operator declaration
+//ostream& operator<<(ostream& out, const PerfectCacheMemory<ENTRY>& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+template<class ENTRY>
+extern inline
+ostream& operator<<(ostream& out, const PerfectCacheMemory<ENTRY>& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+// ****************************************************************
+
+template<class ENTRY>
+extern inline
+PerfectCacheMemory<ENTRY>::PerfectCacheMemory(AbstractChip* chip_ptr)
+{
+ m_chip_ptr = chip_ptr;
+}
+
+// STATIC METHODS
+
+template<class ENTRY>
+extern inline
+void PerfectCacheMemory<ENTRY>::printConfig(ostream& out)
+{
+}
+
+// PUBLIC METHODS
+
+template<class ENTRY>
+extern inline
+bool PerfectCacheMemory<ENTRY>::tryCacheAccess(const CacheMsg& msg, bool& block_stc, ENTRY*& entry)
+{
+ ERROR_MSG("not implemented");
+}
+
+// tests to see if an address is present in the cache
+template<class ENTRY>
+extern inline
+bool PerfectCacheMemory<ENTRY>::isTagPresent(const Address& address) const
+{
+ return m_map.exist(line_address(address));
+}
+
+template<class ENTRY>
+extern inline
+bool PerfectCacheMemory<ENTRY>::cacheAvail(const Address& address) const
+{
+ return true;
+}
+
+// find an Invalid or already allocated entry and sets the tag
+// appropriate for the address
+template<class ENTRY>
+extern inline
+void PerfectCacheMemory<ENTRY>::allocate(const Address& address)
+{
+ PerfectCacheLineState<ENTRY> line_state;
+ line_state.m_permission = AccessPermission_Busy;
+ line_state.m_entry = ENTRY();
+ m_map.add(line_address(address), line_state);
+}
+
+// deallocate entry
+template<class ENTRY>
+extern inline
+void PerfectCacheMemory<ENTRY>::deallocate(const Address& address)
+{
+ m_map.erase(line_address(address));
+}
+
+// Returns with the physical address of the conflicting cache line
+template<class ENTRY>
+extern inline
+Address PerfectCacheMemory<ENTRY>::cacheProbe(const Address& newAddress) const
+{
+ ERROR_MSG("cacheProbe called in perfect cache");
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+extern inline
+ENTRY& PerfectCacheMemory<ENTRY>::lookup(const Address& address)
+{
+ return m_map.lookup(line_address(address)).m_entry;
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+extern inline
+const ENTRY& PerfectCacheMemory<ENTRY>::lookup(const Address& address) const
+{
+ return m_map.lookup(line_address(address)).m_entry;
+}
+
+template<class ENTRY>
+extern inline
+AccessPermission PerfectCacheMemory<ENTRY>::getPermission(const Address& address) const
+{
+ return m_map.lookup(line_address(address)).m_permission;
+}
+
+template<class ENTRY>
+extern inline
+void PerfectCacheMemory<ENTRY>::changePermission(const Address& address, AccessPermission new_perm)
+{
+ Address line_address = address;
+ line_address.makeLineAddress();
+ PerfectCacheLineState<ENTRY>& line_state = m_map.lookup(line_address);
+ AccessPermission old_perm = line_state.m_permission;
+ line_state.m_permission = new_perm;
+}
+
+template<class ENTRY>
+extern inline
+void PerfectCacheMemory<ENTRY>::print(ostream& out) const
+{
+}
+
+#endif //PERFECTCACHEMEMORY_H
diff --git a/src/mem/ruby/system/PersistentArbiter.cc b/src/mem/ruby/system/PersistentArbiter.cc
new file mode 100644
index 000000000..a0bbf6979
--- /dev/null
+++ b/src/mem/ruby/system/PersistentArbiter.cc
@@ -0,0 +1,165 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "PersistentArbiter.hh"
+#include "Address.hh"
+#include "AbstractChip.hh"
+#include "util.hh"
+
+PersistentArbiter::PersistentArbiter(AbstractChip* chip_ptr)
+{
+ m_chip_ptr = chip_ptr;
+
+ // wastes entries, but who cares
+ m_entries.setSize(RubyConfig::numberOfProcessors());
+
+ for (int i = 0; i < m_entries.size(); i++) {
+ m_entries[i].valid = false;
+ }
+
+ m_busy = false;
+ m_locker = -1;
+
+}
+
+PersistentArbiter::~PersistentArbiter()
+{
+ m_chip_ptr = NULL;
+}
+
+
+void PersistentArbiter::addLocker(NodeID id, Address addr, AccessType type) {
+ //cout << "Arbiter " << getArbiterId() << " adding locker " << id << " " << addr << endl;
+ assert(m_entries[id].valid == false);
+ m_entries[id].valid = true;
+ m_entries[id].address = addr;
+ m_entries[id].type = type;
+ m_entries[id].localId = id;
+
+}
+
+void PersistentArbiter::removeLocker(NodeID id) {
+ //cout << "Arbiter " << getArbiterId() << " removing locker " << id << " " << m_entries[id].address << endl;
+ assert(m_entries[id].valid == true);
+ m_entries[id].valid = false;
+
+ if (!lockersExist()) {
+ m_busy = false;
+ }
+}
+
+bool PersistentArbiter::successorRequestPresent(Address addr, NodeID id) {
+ for (int i = (id + 1); i < m_entries.size(); i++) {
+ if (m_entries[i].address == addr && m_entries[i].valid) {
+ //cout << "m_entries[" << id << ", address " << m_entries[id].address << " is equal to " << addr << endl;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool PersistentArbiter::lockersExist() {
+ for (int i = 0; i < m_entries.size(); i++) {
+ if (m_entries[i].valid == true) {
+ return true;
+ }
+ }
+ //cout << "no lockers found" << endl;
+ return false;
+}
+
+void PersistentArbiter::advanceActiveLock() {
+ assert(lockersExist());
+
+ //cout << "arbiter advancing lock from " << m_locker;
+ m_busy = false;
+
+ if (m_locker < (m_entries.size() - 1)) {
+ for (int i = (m_locker+1); i < m_entries.size(); i++) {
+ if (m_entries[i].valid == true) {
+ m_locker = i;
+ m_busy = true;
+ //cout << " to " << m_locker << endl;
+ return;
+ }
+ }
+ }
+
+ if (!m_busy) {
+ for (int i = 0; i < m_entries.size(); i++) {
+ if (m_entries[i].valid == true) {
+ m_locker = i;
+ m_busy = true;
+ //cout << " to " << m_locker << endl;
+ return;
+ }
+ }
+
+ assert(m_busy)
+ }
+}
+
+Address PersistentArbiter::getActiveLockAddress() {
+ assert( m_entries[m_locker].valid = true );
+ return m_entries[m_locker].address;
+}
+
+
+NodeID PersistentArbiter::getArbiterId() {
+ return m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip();
+}
+
+bool PersistentArbiter::isBusy() {
+ return m_busy;
+}
+
+NodeID PersistentArbiter::getActiveLocalId() {
+ assert( m_entries[m_locker].valid = true );
+ return m_entries[m_locker].localId;
+}
+
+void PersistentArbiter::setIssuedAddress(Address addr) {
+ m_issued_address = addr;
+}
+
+bool PersistentArbiter::isIssuedAddress(Address addr) {
+ return (m_issued_address == addr);
+}
+
+void PersistentArbiter::print(ostream& out) const {
+
+ out << "[";
+ for (int i = 0; i < m_entries.size(); i++) {
+ if (m_entries[i].valid == true) {
+ out << "( " << m_entries[i].localId << ", " << m_entries[i].address << ") ";
+ }
+ }
+ out << "]" << endl;
+
+}
diff --git a/src/mem/ruby/system/PersistentArbiter.hh b/src/mem/ruby/system/PersistentArbiter.hh
new file mode 100644
index 000000000..0654e3a9e
--- /dev/null
+++ b/src/mem/ruby/system/PersistentArbiter.hh
@@ -0,0 +1,108 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * PersistentArbiter.h
+ *
+ * Description:
+ *
+ * Used for hierarchical distributed persistent request scheme
+ *
+ */
+
+#ifndef PERSISTENTARBITER_H
+#define PERSISTENTARBITER_H
+
+#include "Global.hh"
+#include "Vector.hh"
+#include "AbstractChip.hh"
+#include "AccessPermission.hh"
+#include "AccessType.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "interface.hh"
+
+struct ArbiterEntry {
+ bool valid;
+ Address address;
+ AccessType type;
+ NodeID localId;
+};
+
+class PersistentArbiter {
+public:
+
+ // Constructors
+ PersistentArbiter(AbstractChip* chip_ptr);
+
+ // Destructor
+ ~PersistentArbiter();
+
+ // Public Methods
+
+ void addLocker(NodeID id, Address addr, AccessType type);
+ void removeLocker(NodeID id);
+ bool successorRequestPresent(Address addr, NodeID id);
+ bool lockersExist();
+ void advanceActiveLock();
+ Address getActiveLockAddress();
+ NodeID getArbiterId();
+ bool isBusy();
+
+ void setIssuedAddress(Address addr);
+ bool isIssuedAddress(Address addr);
+
+
+ Address getIssuedAddress() { return m_issued_address; }
+
+ static void printConfig(ostream& out) {}
+ void print(ostream& out) const;
+
+ NodeID getActiveLocalId();
+
+private:
+
+ Address m_issued_address;
+ AbstractChip* m_chip_ptr;
+ int m_locker;
+ bool m_busy;
+ Vector<ArbiterEntry> m_entries;
+};
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const PersistentArbiter& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+#endif //PERFECTCACHEMEMORY_H
diff --git a/src/mem/ruby/system/PersistentTable.cc b/src/mem/ruby/system/PersistentTable.cc
new file mode 100644
index 000000000..18c8b5736
--- /dev/null
+++ b/src/mem/ruby/system/PersistentTable.cc
@@ -0,0 +1,195 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "PersistentTable.hh"
+#include "NetDest.hh"
+#include "Map.hh"
+#include "Address.hh"
+#include "AbstractChip.hh"
+#include "util.hh"
+
+// randomize so that handoffs are not locality-aware
+// int persistent_randomize[] = {0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15};
+// int persistent_randomize[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+
+class PersistentTableEntry {
+public:
+ NetDest m_starving;
+ NetDest m_marked;
+ NetDest m_request_to_write;
+};
+
+PersistentTable::PersistentTable(AbstractChip* chip_ptr, int version)
+{
+ m_chip_ptr = chip_ptr;
+ m_map_ptr = new Map<Address, PersistentTableEntry>;
+ m_version = version;
+}
+
+PersistentTable::~PersistentTable()
+{
+ delete m_map_ptr;
+ m_map_ptr = NULL;
+ m_chip_ptr = NULL;
+}
+
+void PersistentTable::persistentRequestLock(const Address& address, MachineID locker, AccessType type)
+{
+
+ // if (locker == m_chip_ptr->getID() )
+ // cout << "Chip " << m_chip_ptr->getID() << ": " << llocker << " requesting lock for " << address << endl;
+
+ // MachineID locker = (MachineID) persistent_randomize[llocker];
+
+ assert(address == line_address(address));
+ if (!m_map_ptr->exist(address)) {
+ // Allocate if not present
+ PersistentTableEntry entry;
+ entry.m_starving.add(locker);
+ if (type == AccessType_Write) {
+ entry.m_request_to_write.add(locker);
+ }
+ m_map_ptr->add(address, entry);
+ } else {
+ PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(!(entry.m_starving.isElement(locker))); // Make sure we're not already in the locked set
+
+ entry.m_starving.add(locker);
+ if (type == AccessType_Write) {
+ entry.m_request_to_write.add(locker);
+ }
+ assert(entry.m_marked.isSubset(entry.m_starving));
+ }
+}
+
+void PersistentTable::persistentRequestUnlock(const Address& address, MachineID unlocker)
+{
+ // if (unlocker == m_chip_ptr->getID() )
+ // cout << "Chip " << m_chip_ptr->getID() << ": " << uunlocker << " requesting unlock for " << address << endl;
+
+ // MachineID unlocker = (MachineID) persistent_randomize[uunlocker];
+
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(entry.m_starving.isElement(unlocker)); // Make sure we're in the locked set
+ assert(entry.m_marked.isSubset(entry.m_starving));
+ entry.m_starving.remove(unlocker);
+ entry.m_marked.remove(unlocker);
+ entry.m_request_to_write.remove(unlocker);
+ assert(entry.m_marked.isSubset(entry.m_starving));
+
+ // Deallocate if empty
+ if (entry.m_starving.isEmpty()) {
+ assert(entry.m_marked.isEmpty());
+ m_map_ptr->erase(address);
+ }
+}
+
+bool PersistentTable::okToIssueStarving(const Address& address) const
+{
+ assert(address == line_address(address));
+ if (!m_map_ptr->exist(address)) {
+ return true; // No entry present
+ } else if (m_map_ptr->lookup(address).m_starving.isElement( (MachineID) {MachineType_L1Cache, m_version})) {
+ return false; // We can't issue another lockdown until are previous unlock has occurred
+ } else {
+ return (m_map_ptr->lookup(address).m_marked.isEmpty());
+ }
+}
+
+MachineID PersistentTable::findSmallest(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ const PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ // cout << "Node " << m_chip_ptr->getID() << " returning " << persistent_randomize[entry.m_starving.smallestElement()] << " for findSmallest(" << address << ")" << endl;
+ // return (MachineID) persistent_randomize[entry.m_starving.smallestElement()];
+ return (MachineID) { MachineType_L1Cache, entry.m_starving.smallestElement() };
+}
+
+AccessType PersistentTable::typeOfSmallest(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ const PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ if (entry.m_request_to_write.isElement((MachineID) {MachineType_L1Cache, entry.m_starving.smallestElement()})) {
+ return AccessType_Write;
+ } else {
+ return AccessType_Read;
+ }
+}
+
+void PersistentTable::markEntries(const Address& address)
+{
+ assert(address == line_address(address));
+ if (m_map_ptr->exist(address)) {
+ PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(entry.m_marked.isEmpty()); // None should be marked
+ entry.m_marked = entry.m_starving; // Mark all the nodes currently in the table
+ }
+}
+
+bool PersistentTable::isLocked(const Address& address) const
+{
+ assert(address == line_address(address));
+ // If an entry is present, it must be locked
+ return (m_map_ptr->exist(address));
+}
+
+int PersistentTable::countStarvingForAddress(const Address& address) const
+{
+ if (m_map_ptr->exist(address)) {
+ PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ return (entry.m_starving.count());
+ }
+ else {
+ return 0;
+ }
+}
+
+int PersistentTable::countReadStarvingForAddress(const Address& address) const
+{
+ int count = 0;
+ if (m_map_ptr->exist(address)) {
+ PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ return (entry.m_starving.count() - entry.m_request_to_write.count());
+ }
+ else {
+ return 0;
+ }
+}
+
+
diff --git a/src/mem/ruby/system/PersistentTable.hh b/src/mem/ruby/system/PersistentTable.hh
new file mode 100644
index 000000000..306f66e1d
--- /dev/null
+++ b/src/mem/ruby/system/PersistentTable.hh
@@ -0,0 +1,99 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef PersistentTable_H
+#define PersistentTable_H
+
+#include "Global.hh"
+#include "MachineID.hh"
+#include "AccessType.hh"
+
+class AbstractChip;
+
+template <class KEY_TYPE, class VALUE_TYPE> class Map;
+class Address;
+class PersistentTableEntry;
+
+class PersistentTable {
+public:
+ // Constructors
+ PersistentTable(AbstractChip* chip_ptr, int version);
+
+ // Destructor
+ ~PersistentTable();
+
+ // Public Methods
+ void persistentRequestLock(const Address& address, MachineID locker, AccessType type);
+ void persistentRequestUnlock(const Address& address, MachineID unlocker);
+ bool okToIssueStarving(const Address& address) const;
+ MachineID findSmallest(const Address& address) const;
+ AccessType typeOfSmallest(const Address& address) const;
+ void markEntries(const Address& address);
+ bool isLocked(const Address& addr) const;
+ int countStarvingForAddress(const Address& addr) const;
+ int countReadStarvingForAddress(const Address& addr) const;
+
+ static void printConfig(ostream& out) {}
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ PersistentTable(const PersistentTable& obj);
+ PersistentTable& operator=(const PersistentTable& obj);
+
+ // Data Members (m_prefix)
+ Map<Address, PersistentTableEntry>* m_map_ptr;
+ AbstractChip* m_chip_ptr;
+ int m_version;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const PersistentTable& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const PersistentTable& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //PersistentTable_H
diff --git a/src/mem/ruby/system/PseudoLRUPolicy.hh b/src/mem/ruby/system/PseudoLRUPolicy.hh
new file mode 100644
index 000000000..9d4d13a95
--- /dev/null
+++ b/src/mem/ruby/system/PseudoLRUPolicy.hh
@@ -0,0 +1,110 @@
+
+#ifndef PSEUDOLRUPOLICY_H
+#define PSEUDOLRUPOLICY_H
+
+#include "AbstractReplacementPolicy.hh"
+
+/**
+ * Implementation of tree-based pseudo-LRU replacement
+ *
+ * Works for any associativity between 1 and 128.
+ *
+ * Also implements associativities that are not a power of 2 by
+ * ignoring paths that lead to a larger index (i.e. truncating the
+ * tree). Note that when this occurs, the algorithm becomes less
+ * fair, as it will favor indicies in the larger (by index) half of
+ * the associative set. This is most unfair when the nearest power of
+ * 2 is one below the associativy, and most fair when it is one above.
+ */
+
+class PseudoLRUPolicy : public AbstractReplacementPolicy {
+ public:
+
+ PseudoLRUPolicy(Index num_sets, Index assoc);
+ ~PseudoLRUPolicy();
+
+ void touch(Index set, Index way, Time time);
+ Index getVictim(Index set) const;
+
+ private:
+ unsigned int m_effective_assoc; /** nearest (to ceiling) power of 2 */
+ unsigned int m_num_levels; /** number of levels in the tree */
+ uint64* m_trees; /** bit representation of the trees, one for each set */
+};
+
+inline
+PseudoLRUPolicy::PseudoLRUPolicy(Index num_sets, Index assoc)
+ : AbstractReplacementPolicy(num_sets, assoc)
+{
+ int num_tree_nodes;
+
+ // associativity cannot exceed capacity of tree representation
+ assert(num_sets > 0 && assoc > 1 && assoc <= (Index) sizeof(uint64)*4);
+
+ m_trees = NULL;
+ m_num_levels = 0;
+
+ m_effective_assoc = 1;
+ while(m_effective_assoc < assoc){
+ m_effective_assoc <<= 1; // effective associativity is ceiling power of 2
+ }
+ assoc = m_effective_assoc;
+ while(true){
+ assoc /= 2;
+ if(!assoc) break;
+ m_num_levels++;
+ }
+ assert(m_num_levels < sizeof(unsigned int)*4);
+ num_tree_nodes = ((int)pow(2, m_num_levels))-1;
+ m_trees = new uint64[m_num_sets];
+ for(unsigned int i=0; i< m_num_sets; i++){
+ m_trees[i] = 0;
+ }
+}
+
+inline
+PseudoLRUPolicy::~PseudoLRUPolicy()
+{
+ if(m_trees != NULL)
+ delete[] m_trees;
+}
+
+inline
+void PseudoLRUPolicy::touch(Index set, Index index, Time time){
+ assert(index >= 0 && index < m_assoc);
+ assert(set >= 0 && set < m_num_sets);
+
+ int tree_index = 0;
+ int node_val;
+ for(int i=m_num_levels -1; i>=0; i--){
+ node_val = (index >> i)&1;
+ if(node_val)
+ m_trees[set] |= node_val << tree_index;
+ else
+ m_trees[set] &= ~(1 << tree_index);
+ tree_index = node_val ? (tree_index*2)+2 : (tree_index*2)+1;
+ }
+ m_last_ref_ptr[set][index] = time;
+}
+
+inline
+Index PseudoLRUPolicy::getVictim(Index set) const {
+ // assert(m_assoc != 0);
+
+ Index index = 0;
+
+ int tree_index = 0;
+ int node_val;
+ for(unsigned int i=0;i<m_num_levels;i++){
+ node_val = (m_trees[set]>>tree_index)&1;
+ index += node_val?0:(m_effective_assoc >> (i+1));
+ tree_index = node_val? (tree_index*2)+1 : (tree_index*2)+2;
+ }
+ assert(index >= 0 && index < m_effective_assoc);
+
+ /* return either the found index or the max possible index */
+ /* NOTE: this is not a fair replacement when assoc is not a power of 2 */
+ return (index > (m_assoc-1)) ? m_assoc-1:index;
+}
+
+#endif // PSEUDOLRUPOLICY_H
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
new file mode 100644
index 000000000..59441ff81
--- /dev/null
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -0,0 +1,1161 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: Sequencer.C 1.131 2006/11/06 17:41:01-06:00 bobba@gratiano.cs.wisc.edu $
+ *
+ */
+
+#include "Global.hh"
+#include "Sequencer.hh"
+#include "System.hh"
+#include "Protocol.hh"
+#include "Profiler.hh"
+#include "CacheMemory.hh"
+#include "RubyConfig.hh"
+//#include "Tracer.hh"
+#include "AbstractChip.hh"
+#include "Chip.hh"
+#include "Tester.hh"
+#include "SubBlock.hh"
+#include "Protocol.hh"
+#include "Map.hh"
+#include "interface.hh"
+//#include "XactCommitArbiter.hh"
+// #include "TransactionInterfaceManager.hh"
+//#include "TransactionVersionManager.hh"
+//#include "LazyTransactionVersionManager.hh"
+
+//#define XACT_MGR g_system_ptr->getChip(m_chip_ptr->getID())->getTransactionInterfaceManager(m_version)
+
+Sequencer::Sequencer(AbstractChip* chip_ptr, int version) {
+ m_chip_ptr = chip_ptr;
+ m_version = version;
+
+ m_deadlock_check_scheduled = false;
+ m_outstanding_count = 0;
+
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ m_writeRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
+ m_readRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
+
+ for(int p=0; p < smt_threads; ++p){
+ m_writeRequestTable_ptr[p] = new Map<Address, CacheMsg>;
+ m_readRequestTable_ptr[p] = new Map<Address, CacheMsg>;
+ }
+
+}
+
+Sequencer::~Sequencer() {
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int i=0; i < smt_threads; ++i){
+ if(m_writeRequestTable_ptr[i]){
+ delete m_writeRequestTable_ptr[i];
+ }
+ if(m_readRequestTable_ptr[i]){
+ delete m_readRequestTable_ptr[i];
+ }
+ }
+ if(m_writeRequestTable_ptr){
+ delete [] m_writeRequestTable_ptr;
+ }
+ if(m_readRequestTable_ptr){
+ delete [] m_readRequestTable_ptr;
+ }
+}
+
+void Sequencer::wakeup() {
+ // Check for deadlock of any of the requests
+ Time current_time = g_eventQueue_ptr->getTime();
+ bool deadlock = false;
+
+ // Check across all outstanding requests
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ int total_outstanding = 0;
+ for(int p=0; p < smt_threads; ++p){
+ Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
+ for (int i=0; i<keys.size(); i++) {
+ CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
+ if (current_time - request.getTime() >= g_DEADLOCK_THRESHOLD) {
+ WARN_MSG("Possible Deadlock detected");
+ WARN_EXPR(request);
+ WARN_EXPR(m_chip_ptr->getID());
+ WARN_EXPR(m_version);
+ WARN_EXPR(keys.size());
+ WARN_EXPR(current_time);
+ WARN_EXPR(request.getTime());
+ WARN_EXPR(current_time - request.getTime());
+ WARN_EXPR(*m_readRequestTable_ptr[p]);
+ ERROR_MSG("Aborting");
+ deadlock = true;
+ }
+ }
+
+ keys = m_writeRequestTable_ptr[p]->keys();
+ for (int i=0; i<keys.size(); i++) {
+ CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
+ if (current_time - request.getTime() >= g_DEADLOCK_THRESHOLD) {
+ WARN_MSG("Possible Deadlock detected");
+ WARN_EXPR(request);
+ WARN_EXPR(m_chip_ptr->getID());
+ WARN_EXPR(m_version);
+ WARN_EXPR(current_time);
+ WARN_EXPR(request.getTime());
+ WARN_EXPR(current_time - request.getTime());
+ WARN_EXPR(keys.size());
+ WARN_EXPR(*m_writeRequestTable_ptr[p]);
+ ERROR_MSG("Aborting");
+ deadlock = true;
+ }
+ }
+ total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
+ } // across all request tables
+ assert(m_outstanding_count == total_outstanding);
+
+ if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
+ g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ } else {
+ m_deadlock_check_scheduled = false;
+ }
+}
+
+//returns the total number of requests
+int Sequencer::getNumberOutstanding(){
+ return m_outstanding_count;
+}
+
+// returns the total number of demand requests
+int Sequencer::getNumberOutstandingDemand(){
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ int total_demand = 0;
+ for(int p=0; p < smt_threads; ++p){
+ Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
+ // don't count transactional begin/commit requests
+ if(request.getType() != CacheRequestType_BEGIN_XACT && request.getType() != CacheRequestType_COMMIT_XACT){
+ if(request.getPrefetch() == PrefetchBit_No){
+ total_demand++;
+ }
+ }
+ }
+
+ keys = m_writeRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
+ if(request.getPrefetch() == PrefetchBit_No){
+ total_demand++;
+ }
+ }
+ }
+
+ return total_demand;
+}
+
+int Sequencer::getNumberOutstandingPrefetch(){
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ int total_prefetch = 0;
+ for(int p=0; p < smt_threads; ++p){
+ Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
+ if(request.getPrefetch() == PrefetchBit_Yes){
+ total_prefetch++;
+ }
+ }
+
+ keys = m_writeRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
+ if(request.getPrefetch() == PrefetchBit_Yes){
+ total_prefetch++;
+ }
+ }
+ }
+
+ return total_prefetch;
+}
+
+bool Sequencer::isPrefetchRequest(const Address & lineaddr){
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ // check load requests
+ Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
+ if(line_address(request.getAddress()) == lineaddr){
+ if(request.getPrefetch() == PrefetchBit_Yes){
+ return true;
+ }
+ else{
+ return false;
+ }
+ }
+ }
+
+ // check store requests
+ keys = m_writeRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
+ if(line_address(request.getAddress()) == lineaddr){
+ if(request.getPrefetch() == PrefetchBit_Yes){
+ return true;
+ }
+ else{
+ return false;
+ }
+ }
+ }
+ }
+ // we should've found a matching request
+ cout << "isRequestPrefetch() ERROR request NOT FOUND : " << lineaddr << endl;
+ printProgress(cout);
+ assert(0);
+}
+
+AccessModeType Sequencer::getAccessModeOfRequest(Address addr, int thread){
+ if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
+ return request.getAccessMode();
+ } else if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
+ return request.getAccessMode();
+ } else {
+ printProgress(cout);
+ ERROR_MSG("Request not found in RequestTables");
+ }
+}
+
+Address Sequencer::getLogicalAddressOfRequest(Address addr, int thread){
+ assert(thread >= 0);
+ if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
+ return request.getLogicalAddress();
+ } else if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
+ return request.getLogicalAddress();
+ } else {
+ printProgress(cout);
+ WARN_MSG("Request not found in RequestTables");
+ WARN_MSG(addr);
+ WARN_MSG(thread);
+ ASSERT(0);
+ }
+}
+
+// returns the ThreadID of the request
+int Sequencer::getRequestThreadID(const Address & addr){
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ int thread = -1;
+ int num_found = 0;
+ for(int p=0; p < smt_threads; ++p){
+ if(m_readRequestTable_ptr[p]->exist(addr)){
+ num_found++;
+ thread = p;
+ }
+ if(m_writeRequestTable_ptr[p]->exist(addr)){
+ num_found++;
+ thread = p;
+ }
+ }
+ if(num_found != 1){
+ cout << "getRequestThreadID ERROR too many matching requests addr = " << addr << endl;
+ printProgress(cout);
+ }
+ ASSERT(num_found == 1);
+ ASSERT(thread != -1);
+
+ return thread;
+}
+
+// given a line address, return the request's physical address
+Address Sequencer::getRequestPhysicalAddress(const Address & lineaddr){
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ Address physaddr;
+ int num_found = 0;
+ for(int p=0; p < smt_threads; ++p){
+ if(m_readRequestTable_ptr[p]->exist(lineaddr)){
+ num_found++;
+ physaddr = (m_readRequestTable_ptr[p]->lookup(lineaddr)).getAddress();
+ }
+ if(m_writeRequestTable_ptr[p]->exist(lineaddr)){
+ num_found++;
+ physaddr = (m_writeRequestTable_ptr[p]->lookup(lineaddr)).getAddress();
+ }
+ }
+ if(num_found != 1){
+ cout << "getRequestPhysicalAddress ERROR too many matching requests addr = " << lineaddr << endl;
+ printProgress(cout);
+ }
+ ASSERT(num_found == 1);
+
+ return physaddr;
+}
+
+void Sequencer::printProgress(ostream& out) const{
+
+ int total_demand = 0;
+ out << "Sequencer Stats Version " << m_version << endl;
+ out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
+ out << "---------------" << endl;
+ out << "outstanding requests" << endl;
+
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ Vector<Address> rkeys = m_readRequestTable_ptr[p]->keys();
+ int read_size = rkeys.size();
+ out << "proc " << m_chip_ptr->getID() << " thread " << p << " Read Requests = " << read_size << endl;
+ // print the request table
+ for(int i=0; i < read_size; ++i){
+ CacheMsg & request = m_readRequestTable_ptr[p]->lookup(rkeys[i]);
+ out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << rkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
+ if( request.getPrefetch() == PrefetchBit_No ){
+ total_demand++;
+ }
+ }
+
+ Vector<Address> wkeys = m_writeRequestTable_ptr[p]->keys();
+ int write_size = wkeys.size();
+ out << "proc " << m_chip_ptr->getID() << " thread " << p << " Write Requests = " << write_size << endl;
+ // print the request table
+ for(int i=0; i < write_size; ++i){
+ CacheMsg & request = m_writeRequestTable_ptr[p]->lookup(wkeys[i]);
+ out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
+ if( request.getPrefetch() == PrefetchBit_No ){
+ total_demand++;
+ }
+ }
+
+ out << endl;
+ }
+ out << "Total Number Outstanding: " << m_outstanding_count << endl;
+ out << "Total Number Demand : " << total_demand << endl;
+ out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
+ out << endl;
+ out << endl;
+
+}
+
+void Sequencer::printConfig(ostream& out) {
+ if (TSO) {
+ out << "sequencer: Sequencer - TSO" << endl;
+ } else {
+ out << "sequencer: Sequencer - SC" << endl;
+ }
+ out << " max_outstanding_requests: " << g_SEQUENCER_OUTSTANDING_REQUESTS << endl;
+}
+
+bool Sequencer::empty() const {
+ return m_outstanding_count == 0;
+}
+
+// Insert the request on the correct request table. Return true if
+// the entry was already present.
+bool Sequencer::insertRequest(const CacheMsg& request) {
+ int thread = request.getThreadID();
+ assert(thread >= 0);
+ int total_outstanding = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
+ }
+ assert(m_outstanding_count == total_outstanding);
+
+ // See if we should schedule a deadlock check
+ if (m_deadlock_check_scheduled == false) {
+ g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ m_deadlock_check_scheduled = true;
+ }
+
+ if ((request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ST_XACT) ||
+ (request.getType() == CacheRequestType_LDX_XACT) ||
+ (request.getType() == CacheRequestType_ATOMIC)) {
+ if (m_writeRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) {
+ m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
+ return true;
+ }
+ m_writeRequestTable_ptr[thread]->allocate(line_address(request.getAddress()));
+ m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
+ m_outstanding_count++;
+ } else {
+ if (m_readRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) {
+ m_readRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
+ return true;
+ }
+ m_readRequestTable_ptr[thread]->allocate(line_address(request.getAddress()));
+ m_readRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
+ m_outstanding_count++;
+ }
+
+ g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
+
+ total_outstanding = 0;
+ for(int p=0; p < smt_threads; ++p){
+ total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
+ }
+
+ assert(m_outstanding_count == total_outstanding);
+ return false;
+}
+
+void Sequencer::removeRequest(const CacheMsg& request) {
+ int thread = request.getThreadID();
+ assert(thread >= 0);
+ int total_outstanding = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
+ }
+ assert(m_outstanding_count == total_outstanding);
+
+ if ((request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ST_XACT) ||
+ (request.getType() == CacheRequestType_LDX_XACT) ||
+ (request.getType() == CacheRequestType_ATOMIC)) {
+ m_writeRequestTable_ptr[thread]->deallocate(line_address(request.getAddress()));
+ } else {
+ m_readRequestTable_ptr[thread]->deallocate(line_address(request.getAddress()));
+ }
+ m_outstanding_count--;
+
+ total_outstanding = 0;
+ for(int p=0; p < smt_threads; ++p){
+ total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
+ }
+ assert(m_outstanding_count == total_outstanding);
+}
+
+void Sequencer::writeCallback(const Address& address) {
+ DataBlock data;
+ writeCallback(address, data);
+}
+
+void Sequencer::writeCallback(const Address& address, DataBlock& data) {
+ // process oldest thread first
+ int thread = -1;
+ Time oldest_time = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int t=0; t < smt_threads; ++t){
+ if(m_writeRequestTable_ptr[t]->exist(address)){
+ CacheMsg & request = m_writeRequestTable_ptr[t]->lookup(address);
+ if(thread == -1 || (request.getTime() < oldest_time) ){
+ thread = t;
+ oldest_time = request.getTime();
+ }
+ }
+ }
+ // make sure we found an oldest thread
+ ASSERT(thread != -1);
+
+ CacheMsg & request = m_writeRequestTable_ptr[thread]->lookup(address);
+
+ writeCallback(address, data, GenericMachineType_NULL, PrefetchBit_No, thread);
+}
+
+void Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread) {
+
+ assert(address == line_address(address));
+ assert(thread >= 0);
+ assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
+
+ writeCallback(address, data, respondingMach, thread);
+
+}
+
+void Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) {
+ assert(address == line_address(address));
+ assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
+ CacheMsg request = m_writeRequestTable_ptr[thread]->lookup(address);
+ assert( request.getThreadID() == thread);
+ removeRequest(request);
+
+ assert((request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ST_XACT) ||
+ (request.getType() == CacheRequestType_LDX_XACT) ||
+ (request.getType() == CacheRequestType_ATOMIC));
+
+ hitCallback(request, data, respondingMach, thread);
+
+}
+
+void Sequencer::readCallback(const Address& address) {
+ DataBlock data;
+ readCallback(address, data);
+}
+
+void Sequencer::readCallback(const Address& address, DataBlock& data) {
+ // process oldest thread first
+ int thread = -1;
+ Time oldest_time = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int t=0; t < smt_threads; ++t){
+ if(m_readRequestTable_ptr[t]->exist(address)){
+ CacheMsg & request = m_readRequestTable_ptr[t]->lookup(address);
+ if(thread == -1 || (request.getTime() < oldest_time) ){
+ thread = t;
+ oldest_time = request.getTime();
+ }
+ }
+ }
+ // make sure we found an oldest thread
+ ASSERT(thread != -1);
+
+ CacheMsg & request = m_readRequestTable_ptr[thread]->lookup(address);
+
+ readCallback(address, data, GenericMachineType_NULL, PrefetchBit_No, thread);
+}
+
+void Sequencer::readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread) {
+
+ assert(address == line_address(address));
+ assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
+
+ readCallback(address, data, respondingMach, thread);
+}
+
+void Sequencer::readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) {
+ assert(address == line_address(address));
+ assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
+
+ CacheMsg request = m_readRequestTable_ptr[thread]->lookup(address);
+ assert( request.getThreadID() == thread );
+ removeRequest(request);
+
+ assert((request.getType() == CacheRequestType_LD) ||
+ (request.getType() == CacheRequestType_LD_XACT) ||
+ (request.getType() == CacheRequestType_IFETCH)
+ );
+
+ hitCallback(request, data, respondingMach, thread);
+}
+
+void Sequencer::hitCallback(const CacheMsg& request, DataBlock& data, GenericMachineType respondingMach, int thread) {
+ int size = request.getSize();
+ Address request_address = request.getAddress();
+ Address request_logical_address = request.getLogicalAddress();
+ Address request_line_address = line_address(request_address);
+ CacheRequestType type = request.getType();
+ int threadID = request.getThreadID();
+ Time issued_time = request.getTime();
+ int logical_proc_no = ((m_chip_ptr->getID() * RubyConfig::numberOfProcsPerChip()) + m_version) * RubyConfig::numberofSMTThreads() + threadID;
+
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, size);
+
+ // Set this cache entry to the most recently used
+ if (type == CacheRequestType_IFETCH) {
+ if (Protocol::m_TwoLevelCache) {
+ if (m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
+ m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->setMRU(request_line_address);
+ }
+ }
+ else {
+ if (m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
+ m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->setMRU(request_line_address);
+ }
+ }
+ } else {
+ if (Protocol::m_TwoLevelCache) {
+ if (m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
+ m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->setMRU(request_line_address);
+ }
+ }
+ else {
+ if (m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
+ m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->setMRU(request_line_address);
+ }
+ }
+ }
+
+ assert(g_eventQueue_ptr->getTime() >= issued_time);
+ Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
+
+ if (PROTOCOL_DEBUG_TRACE) {
+ g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), -1, request.getAddress(), "", "Done", "",
+ int_to_string(miss_latency)+" cycles "+GenericMachineType_to_string(respondingMach)+" "+CacheRequestType_to_string(request.getType())+" "+PrefetchBit_to_string(request.getPrefetch()));
+ }
+
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, request_address);
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, request.getPrefetch());
+ if (request.getPrefetch() == PrefetchBit_Yes) {
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, "return");
+ g_system_ptr->getProfiler()->swPrefetchLatency(miss_latency, type, respondingMach);
+ return; // Ignore the software prefetch, don't callback the driver
+ }
+
+ // Profile the miss latency for all non-zero demand misses
+ if (miss_latency != 0) {
+ g_system_ptr->getProfiler()->missLatency(miss_latency, type, respondingMach);
+
+ #if 0
+ uinteger_t tick = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "tick"));
+ uinteger_t tick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "tick_cmpr"));
+ uinteger_t stick = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick"));
+ uinteger_t stick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick_cmpr"));
+ cout << "END PROC " << m_version << hex << " tick = " << tick << " tick_cmpr = " << tick_cmpr << " stick = " << stick << " stick_cmpr = " << stick_cmpr << " cycle = "<< g_eventQueue_ptr->getTime() << dec << endl;
+ #endif
+
+ }
+
+ bool write =
+ (type == CacheRequestType_ST) ||
+ (type == CacheRequestType_ST_XACT) ||
+ (type == CacheRequestType_LDX_XACT) ||
+ (type == CacheRequestType_ATOMIC);
+
+ if (TSO && write) {
+ m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->callBack(line_address(request.getAddress()), data);
+ } else {
+
+ // Copy the correct bytes out of the cache line into the subblock
+ SubBlock subblock(request_address, request_logical_address, size);
+ subblock.mergeFrom(data); // copy the correct bytes from DataBlock in the SubBlock
+
+ // Scan the store buffer to see if there are any outstanding stores we need to collect
+ if (TSO) {
+ m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->updateSubBlock(subblock);
+ }
+
+ // Call into the Driver (Tester or Simics) and let it read and/or modify the sub-block
+ g_system_ptr->getDriver()->hitCallback(m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version, subblock, type, threadID);
+
+ // If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock
+ // (This is only triggered for the non-TSO case)
+ if (write) {
+ assert(!TSO);
+ subblock.mergeTo(data); // copy the correct bytes from SubBlock into the DataBlock
+ }
+ }
+}
+
+void Sequencer::readConflictCallback(const Address& address) {
+ // process oldest thread first
+ int thread = -1;
+ Time oldest_time = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int t=0; t < smt_threads; ++t){
+ if(m_readRequestTable_ptr[t]->exist(address)){
+ CacheMsg & request = m_readRequestTable_ptr[t]->lookup(address);
+ if(thread == -1 || (request.getTime() < oldest_time) ){
+ thread = t;
+ oldest_time = request.getTime();
+ }
+ }
+ }
+ // make sure we found an oldest thread
+ ASSERT(thread != -1);
+
+ CacheMsg & request = m_readRequestTable_ptr[thread]->lookup(address);
+
+ readConflictCallback(address, GenericMachineType_NULL, thread);
+}
+
+void Sequencer::readConflictCallback(const Address& address, GenericMachineType respondingMach, int thread) {
+ assert(address == line_address(address));
+ assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
+
+ CacheMsg request = m_readRequestTable_ptr[thread]->lookup(address);
+ assert( request.getThreadID() == thread );
+ removeRequest(request);
+
+ assert((request.getType() == CacheRequestType_LD) ||
+ (request.getType() == CacheRequestType_LD_XACT) ||
+ (request.getType() == CacheRequestType_IFETCH)
+ );
+
+ conflictCallback(request, respondingMach, thread);
+}
+
+void Sequencer::writeConflictCallback(const Address& address) {
+ // process oldest thread first
+ int thread = -1;
+ Time oldest_time = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int t=0; t < smt_threads; ++t){
+ if(m_writeRequestTable_ptr[t]->exist(address)){
+ CacheMsg & request = m_writeRequestTable_ptr[t]->lookup(address);
+ if(thread == -1 || (request.getTime() < oldest_time) ){
+ thread = t;
+ oldest_time = request.getTime();
+ }
+ }
+ }
+ // make sure we found an oldest thread
+ ASSERT(thread != -1);
+
+ CacheMsg & request = m_writeRequestTable_ptr[thread]->lookup(address);
+
+ writeConflictCallback(address, GenericMachineType_NULL, thread);
+}
+
+void Sequencer::writeConflictCallback(const Address& address, GenericMachineType respondingMach, int thread) {
+ assert(address == line_address(address));
+ assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
+ CacheMsg request = m_writeRequestTable_ptr[thread]->lookup(address);
+ assert( request.getThreadID() == thread);
+ removeRequest(request);
+
+ assert((request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ST_XACT) ||
+ (request.getType() == CacheRequestType_LDX_XACT) ||
+ (request.getType() == CacheRequestType_ATOMIC));
+
+ conflictCallback(request, respondingMach, thread);
+
+}
+
+void Sequencer::conflictCallback(const CacheMsg& request, GenericMachineType respondingMach, int thread) {
+ assert(XACT_MEMORY);
+ int size = request.getSize();
+ Address request_address = request.getAddress();
+ Address request_logical_address = request.getLogicalAddress();
+ Address request_line_address = line_address(request_address);
+ CacheRequestType type = request.getType();
+ int threadID = request.getThreadID();
+ Time issued_time = request.getTime();
+ int logical_proc_no = ((m_chip_ptr->getID() * RubyConfig::numberOfProcsPerChip()) + m_version) * RubyConfig::numberofSMTThreads() + threadID;
+
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, size);
+
+ assert(g_eventQueue_ptr->getTime() >= issued_time);
+ Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
+
+ if (PROTOCOL_DEBUG_TRACE) {
+ g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), -1, request.getAddress(), "", "Conflict", "",
+ int_to_string(miss_latency)+" cycles "+GenericMachineType_to_string(respondingMach)+" "+CacheRequestType_to_string(request.getType())+" "+PrefetchBit_to_string(request.getPrefetch()));
+ }
+
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, request_address);
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, request.getPrefetch());
+ if (request.getPrefetch() == PrefetchBit_Yes) {
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, "return");
+ g_system_ptr->getProfiler()->swPrefetchLatency(miss_latency, type, respondingMach);
+ return; // Ignore the software prefetch, don't callback the driver
+ }
+
+ bool write =
+ (type == CacheRequestType_ST) ||
+ (type == CacheRequestType_ST_XACT) ||
+ (type == CacheRequestType_LDX_XACT) ||
+ (type == CacheRequestType_ATOMIC);
+
+ // Copy the correct bytes out of the cache line into the subblock
+ SubBlock subblock(request_address, request_logical_address, size);
+
+ // Call into the Driver (Tester or Simics)
+ g_system_ptr->getDriver()->conflictCallback(m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version, subblock, type, threadID);
+
+ // If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock
+ // (This is only triggered for the non-TSO case)
+ if (write) {
+ assert(!TSO);
+ }
+}
+
+void Sequencer::printDebug(){
+ //notify driver of debug
+ g_system_ptr->getDriver()->printDebug();
+}
+
+// Returns true if the sequencer already has a load or store outstanding
+bool Sequencer::isReady(const CacheMsg& request) const {
+
+ if (m_outstanding_count >= g_SEQUENCER_OUTSTANDING_REQUESTS) {
+ //cout << "TOO MANY OUTSTANDING: " << m_outstanding_count << " " << g_SEQUENCER_OUTSTANDING_REQUESTS << " VER " << m_version << endl;
+ //printProgress(cout);
+ return false;
+ }
+ int thread = request.getThreadID();
+
+ // This code allows reads to be performed even when we have a write
+ // request outstanding for the line
+ bool write =
+ (request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ST_XACT) ||
+ (request.getType() == CacheRequestType_LDX_XACT) ||
+ (request.getType() == CacheRequestType_ATOMIC);
+
+ // LUKE - disallow more than one request type per address
+ // INVARIANT: at most one request type per address, per processor
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ if( m_writeRequestTable_ptr[p]->exist(line_address(request.getAddress())) ||
+ m_readRequestTable_ptr[p]->exist(line_address(request.getAddress())) ){
+ //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
+ //printProgress(cout);
+ return false;
+ }
+ }
+
+ if (TSO) {
+ return m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady();
+ }
+ return true;
+}
+
+// Called by Driver (Simics or Tester).
+void Sequencer::makeRequest(const CacheMsg& request) {
+ //assert(isReady(request));
+ bool write = (request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ST_XACT) ||
+ (request.getType() == CacheRequestType_LDX_XACT) ||
+ (request.getType() == CacheRequestType_ATOMIC);
+
+ if (TSO && (request.getPrefetch() == PrefetchBit_No) && write) {
+ assert(m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady());
+ m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->insertStore(request);
+ return;
+ }
+
+ bool hit = doRequest(request);
+
+}
+
+bool Sequencer::doRequest(const CacheMsg& request) {
+ bool hit = false;
+ // Check the fast path
+ DataBlock* data_ptr;
+
+ int thread = request.getThreadID();
+
+ hit = tryCacheAccess(line_address(request.getAddress()),
+ request.getType(),
+ request.getProgramCounter(),
+ request.getAccessMode(),
+ request.getSize(),
+ data_ptr);
+
+ if (hit && (request.getType() == CacheRequestType_IFETCH || !REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH) ) {
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, "Fast path hit");
+ hitCallback(request, *data_ptr, GenericMachineType_L1Cache, thread);
+ return true;
+ }
+
+ #if 0
+ uinteger_t tick = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "tick"));
+ uinteger_t tick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "tick_cmpr"));
+ uinteger_t stick = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick"));
+ uinteger_t stick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick_cmpr"));
+ cout << "START PROC " << m_version << hex << " tick = " << tick << " tick_cmpr = " << tick_cmpr << " stick = " << stick << " stick_cmpr = " << stick_cmpr << " cycle = "<< g_eventQueue_ptr->getTime() << dec << endl;;
+ #endif
+
+ if (TSO && (request.getType() == CacheRequestType_LD || request.getType() == CacheRequestType_IFETCH)) {
+
+ // See if we can satisfy the load entirely from the store buffer
+ SubBlock subblock(line_address(request.getAddress()), request.getSize());
+ if (m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->trySubBlock(subblock)) {
+ DataBlock dummy;
+ hitCallback(request, dummy, GenericMachineType_NULL, thread); // Call with an 'empty' datablock, since the data is in the store buffer
+ return true;
+ }
+ }
+
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, "Fast path miss");
+ issueRequest(request);
+ return hit;
+}
+
+void Sequencer::issueRequest(const CacheMsg& request) {
+ bool found = insertRequest(request);
+
+ if (!found) {
+ CacheMsg msg = request;
+ msg.getAddress() = line_address(request.getAddress()); // Make line address
+
+ // Fast Path L1 misses are profiled here - all non-fast path misses are profiled within the generated protocol code
+ if (!REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH) {
+ g_system_ptr->getProfiler()->addPrimaryStatSample(msg, m_chip_ptr->getID());
+ }
+
+ if (PROTOCOL_DEBUG_TRACE) {
+ g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip() + m_version), -1, msg.getAddress(),"", "Begin", "", CacheRequestType_to_string(request.getType()));
+ }
+
+#if 0
+ // Commented out by nate binkert because I removed the trace stuff
+ if (g_system_ptr->getTracer()->traceEnabled()) {
+ g_system_ptr->getTracer()->traceRequest((m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), msg.getAddress(), msg.getProgramCounter(),
+ msg.getType(), g_eventQueue_ptr->getTime());
+ }
+#endif
+
+ Time latency = 0; // initialzed to an null value
+
+ latency = SEQUENCER_TO_CONTROLLER_LATENCY;
+
+ // Send the message to the cache controller
+ assert(latency > 0);
+ m_chip_ptr->m_L1Cache_mandatoryQueue_vec[m_version]->enqueue(msg, latency);
+
+ } // !found
+}
+
+bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
+ const Address& pc, AccessModeType access_mode,
+ int size, DataBlock*& data_ptr) {
+ if (type == CacheRequestType_IFETCH) {
+ if (Protocol::m_TwoLevelCache) {
+ return m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
+ }
+ else {
+ return m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
+ }
+ } else {
+ if (Protocol::m_TwoLevelCache) {
+ return m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
+ }
+ else {
+ return m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
+ }
+ }
+}
+
+void Sequencer::resetRequestTime(const Address& addr, int thread){
+ assert(thread >= 0);
+ //reset both load and store requests, if they exist
+ if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
+ if( request.m_AccessMode != AccessModeType_UserMode){
+ cout << "resetRequestType ERROR read request addr = " << addr << " thread = "<< thread << " is SUPERVISOR MODE" << endl;
+ printProgress(cout);
+ }
+ //ASSERT(request.m_AccessMode == AccessModeType_UserMode);
+ request.setTime(g_eventQueue_ptr->getTime());
+ }
+ if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
+ if( request.m_AccessMode != AccessModeType_UserMode){
+ cout << "resetRequestType ERROR write request addr = " << addr << " thread = "<< thread << " is SUPERVISOR MODE" << endl;
+ printProgress(cout);
+ }
+ //ASSERT(request.m_AccessMode == AccessModeType_UserMode);
+ request.setTime(g_eventQueue_ptr->getTime());
+ }
+}
+
+// removes load request from queue
+void Sequencer::removeLoadRequest(const Address & addr, int thread){
+ removeRequest(getReadRequest(addr, thread));
+}
+
+void Sequencer::removeStoreRequest(const Address & addr, int thread){
+ removeRequest(getWriteRequest(addr, thread));
+}
+
+// returns the read CacheMsg
+CacheMsg & Sequencer::getReadRequest( const Address & addr, int thread ){
+ Address temp = addr;
+ assert(thread >= 0);
+ assert(temp == line_address(temp));
+ assert(m_readRequestTable_ptr[thread]->exist(addr));
+ return m_readRequestTable_ptr[thread]->lookup(addr);
+}
+
+CacheMsg & Sequencer::getWriteRequest( const Address & addr, int thread){
+ Address temp = addr;
+ assert(thread >= 0);
+ assert(temp == line_address(temp));
+ assert(m_writeRequestTable_ptr[thread]->exist(addr));
+ return m_writeRequestTable_ptr[thread]->lookup(addr);
+}
+
+void Sequencer::print(ostream& out) const {
+ out << "[Sequencer: " << m_chip_ptr->getID()
+ << ", outstanding requests: " << m_outstanding_count;
+
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ out << ", read request table[ " << p << " ]: " << *m_readRequestTable_ptr[p]
+ << ", write request table[ " << p << " ]: " << *m_writeRequestTable_ptr[p];
+ }
+ out << "]";
+}
+
+// this can be called from setState whenever coherence permissions are upgraded
+// when invoked, coherence violations will be checked for the given block
+void Sequencer::checkCoherence(const Address& addr) {
+#ifdef CHECK_COHERENCE
+ g_system_ptr->checkGlobalCoherenceInvariant(addr);
+#endif
+}
+
+bool Sequencer::getRubyMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes ) {
+ if(g_SIMICS){
+ for(unsigned int i=0; i < size_in_bytes; i++) {
+ value[i] = SIMICS_read_physical_memory( m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version,
+ addr.getAddress() + i, 1 );
+ }
+ return false; // Do nothing?
+ } else {
+ bool found = false;
+ const Address lineAddr = line_address(addr);
+ DataBlock data;
+ PhysAddress paddr(addr);
+ DataBlock* dataPtr = &data;
+ Chip* n = dynamic_cast<Chip*>(m_chip_ptr);
+ // LUKE - use variable names instead of macros
+ assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL);
+ assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL);
+
+ MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
+ int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
+
+ if (Protocol::m_TwoLevelCache) {
+ if(Protocol::m_CMP){
+ assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
+ }
+ else{
+ assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
+ }
+ }
+
+ if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
+ n->m_L1Cache_L1IcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
+ n->m_L1Cache_L1DcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
+ n->m_L2Cache_L2cacheMemory_vec[l2_ver]->getMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr)){
+// ASSERT(n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr));
+// L1Cache_TBE tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr);
+
+// int offset = addr.getOffset();
+// for(int i=0; i<size_in_bytes; ++i){
+// value[i] = tbeEntry.getDataBlk().getByte(offset + i);
+// }
+
+// found = true;
+ } else {
+ // Address not found
+ //cout << " " << m_chip_ptr->getID() << " NOT IN CACHE, Value at Directory is: " << (int) value[0] << endl;
+ n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
+ int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
+ for(unsigned int i=0; i<size_in_bytes; ++i){
+ int offset = addr.getOffset();
+ value[i] = n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.getByte(offset + i);
+ }
+ // Address not found
+ //WARN_MSG("Couldn't find address");
+ //WARN_EXPR(addr);
+ found = false;
+ }
+ return true;
+ }
+}
+
+bool Sequencer::setRubyMemoryValue(const Address& addr, char *value,
+ unsigned int size_in_bytes) {
+ char test_buffer[64];
+
+ if(g_SIMICS){
+ return false; // Do nothing?
+ } else {
+ // idea here is that coherent cache should find the
+ // latest data, the update it
+ bool found = false;
+ const Address lineAddr = line_address(addr);
+ PhysAddress paddr(addr);
+ DataBlock data;
+ DataBlock* dataPtr = &data;
+ Chip* n = dynamic_cast<Chip*>(m_chip_ptr);
+
+ MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
+ int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
+ // LUKE - use variable names instead of macros
+ //cout << "number of L2caches per chip = " << RubyConfig::numberOfL2CachePerChip(m_version) << endl;
+ //cout << "L1I cache vec size = " << n->m_L1Cache_L1IcacheMemory_vec.size() << endl;
+ //cout << "L1D cache vec size = " << n->m_L1Cache_L1DcacheMemory_vec.size() << endl;
+ //cout << "L1cache_cachememory size = " << n->m_L1Cache_cacheMemory_vec.size() << endl;
+ //cout << "L1cache_l2cachememory size = " << n->m_L1Cache_L2cacheMemory_vec.size() << endl;
+ // if (Protocol::m_TwoLevelCache) {
+// if(Protocol::m_CMP){
+// cout << "CMP L2 cache vec size = " << n->m_L2Cache_L2cacheMemory_vec.size() << endl;
+// }
+// else{
+// cout << "L2 cache vec size = " << n->m_L1Cache_cacheMemory_vec.size() << endl;
+// }
+// }
+
+ assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL);
+ assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL);
+ if (Protocol::m_TwoLevelCache) {
+ if(Protocol::m_CMP){
+ assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
+ }
+ else{
+ assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
+ }
+ }
+
+ if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
+ n->m_L1Cache_L1IcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
+ n->m_L1Cache_L1DcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
+ n->m_L2Cache_L2cacheMemory_vec[l2_ver]->setMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isTagPresent(lineAddr)){
+// L1Cache_TBE& tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr);
+// DataBlock tmpData;
+// int offset = addr.getOffset();
+// for(int i=0; i<size_in_bytes; ++i){
+// tmpData.setByte(offset + i, value[i]);
+// }
+// tbeEntry.setDataBlk(tmpData);
+// tbeEntry.setDirty(true);
+ } else {
+ // Address not found
+ n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
+ int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
+ for(unsigned int i=0; i<size_in_bytes; ++i){
+ int offset = addr.getOffset();
+ n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.setByte(offset + i, value[i]);
+ }
+ found = false;
+ }
+
+ if (found){
+ found = getRubyMemoryValue(addr, test_buffer, size_in_bytes);
+ assert(found);
+ if(value[0] != test_buffer[0]){
+ WARN_EXPR((int) value[0]);
+ WARN_EXPR((int) test_buffer[0]);
+ ERROR_MSG("setRubyMemoryValue failed to set value.");
+ }
+ }
+
+ return true;
+ }
+}
diff --git a/src/mem/ruby/system/Sequencer.hh b/src/mem/ruby/system/Sequencer.hh
new file mode 100644
index 000000000..5dd674655
--- /dev/null
+++ b/src/mem/ruby/system/Sequencer.hh
@@ -0,0 +1,170 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: Sequencer.h 1.70 2006/09/27 14:56:41-05:00 bobba@s1-01.cs.wisc.edu $
+ *
+ * Description:
+ *
+ */
+
+#ifndef SEQUENCER_H
+#define SEQUENCER_H
+
+#include "Global.hh"
+#include "RubyConfig.hh"
+#include "Consumer.hh"
+#include "CacheRequestType.hh"
+#include "AccessModeType.hh"
+#include "GenericMachineType.hh"
+#include "PrefetchBit.hh"
+#include "Map.hh"
+
+class DataBlock;
+class AbstractChip;
+class CacheMsg;
+class Address;
+class MachineID;
+
+class Sequencer : public Consumer {
+public:
+ // Constructors
+ Sequencer(AbstractChip* chip_ptr, int version);
+
+ // Destructor
+ ~Sequencer();
+
+ // Public Methods
+ void wakeup(); // Used only for deadlock detection
+
+ static void printConfig(ostream& out);
+
+ // returns total number of outstanding request (includes prefetches)
+ int getNumberOutstanding();
+ // return only total number of outstanding demand requests
+ int getNumberOutstandingDemand();
+ // return only total number of outstanding prefetch requests
+ int getNumberOutstandingPrefetch();
+
+ // remove load/store request from queue
+ void removeLoadRequest(const Address & addr, int thread);
+ void removeStoreRequest(const Address & addr, int thread);
+
+ void printProgress(ostream& out) const;
+
+ // returns a pointer to the request in the request tables
+ CacheMsg & getReadRequest( const Address & addr, int thread );
+ CacheMsg & getWriteRequest( const Address & addr, int thread );
+
+ // called by Ruby when transaction completes
+ void writeConflictCallback(const Address& address);
+ void readConflictCallback(const Address& address);
+ void writeConflictCallback(const Address& address, GenericMachineType respondingMach, int thread);
+ void readConflictCallback(const Address& address, GenericMachineType respondingMach, int thread);
+
+ void writeCallback(const Address& address, DataBlock& data);
+ void readCallback(const Address& address, DataBlock& data);
+ void writeCallback(const Address& address);
+ void readCallback(const Address& address);
+ void writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread);
+ void readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread);
+ void writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread);
+ void readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread);
+
+ // returns the thread ID of the request
+ int getRequestThreadID(const Address & addr);
+ // returns the physical address of the request
+ Address getRequestPhysicalAddress(const Address & lineaddr);
+ // returns whether a request is a prefetch request
+ bool isPrefetchRequest(const Address & lineaddr);
+
+ //notifies driver of debug print
+ void printDebug();
+
+ // called by Tester or Simics
+ void makeRequest(const CacheMsg& request);
+ bool doRequest(const CacheMsg& request);
+ void issueRequest(const CacheMsg& request);
+ bool isReady(const CacheMsg& request) const;
+ bool empty() const;
+ void resetRequestTime(const Address& addr, int thread);
+ Address getLogicalAddressOfRequest(Address address, int thread);
+ AccessModeType getAccessModeOfRequest(Address address, int thread);
+ //uint64 getSequenceNumberOfRequest(Address addr, int thread);
+
+ void print(ostream& out) const;
+ void checkCoherence(const Address& address);
+
+ bool getRubyMemoryValue(const Address& addr, char* value, unsigned int size_in_bytes);
+ bool setRubyMemoryValue(const Address& addr, char *value, unsigned int size_in_bytes);
+
+ void removeRequest(const CacheMsg& request);
+private:
+ // Private Methods
+ bool tryCacheAccess(const Address& addr, CacheRequestType type, const Address& pc, AccessModeType access_mode, int size, DataBlock*& data_ptr);
+ void conflictCallback(const CacheMsg& request, GenericMachineType respondingMach, int thread);
+ void hitCallback(const CacheMsg& request, DataBlock& data, GenericMachineType respondingMach, int thread);
+ bool insertRequest(const CacheMsg& request);
+
+
+ // Private copy constructor and assignment operator
+ Sequencer(const Sequencer& obj);
+ Sequencer& operator=(const Sequencer& obj);
+
+ // Data Members (m_ prefix)
+ AbstractChip* m_chip_ptr;
+
+ // indicates what processor on the chip this sequencer is associated with
+ int m_version;
+
+ // One request table per SMT thread
+ Map<Address, CacheMsg>** m_writeRequestTable_ptr;
+ Map<Address, CacheMsg>** m_readRequestTable_ptr;
+ // Global outstanding request count, across all request tables
+ int m_outstanding_count;
+ bool m_deadlock_check_scheduled;
+
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Sequencer& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Sequencer& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //SEQUENCER_H
+
diff --git a/src/mem/ruby/system/StoreBuffer.cc b/src/mem/ruby/system/StoreBuffer.cc
new file mode 100644
index 000000000..c6880bdd1
--- /dev/null
+++ b/src/mem/ruby/system/StoreBuffer.cc
@@ -0,0 +1,300 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "Global.hh"
+#include "RubyConfig.hh"
+#include "StoreBuffer.hh"
+#include "AbstractChip.hh"
+#include "System.hh"
+#include "Driver.hh"
+#include "Vector.hh"
+#include "EventQueue.hh"
+#include "AddressProfiler.hh"
+#include "Sequencer.hh"
+#include "SubBlock.hh"
+#include "Profiler.hh"
+
+// *** Begin Helper class ***
+struct StoreBufferEntry {
+ StoreBufferEntry() {} // So we can allocate a vector of StoreBufferEntries
+ StoreBufferEntry(const SubBlock& block, CacheRequestType type, const Address& pc, AccessModeType access_mode, int size, int thread) : m_subblock(block) {
+ m_type = type;
+ m_pc = pc;
+ m_access_mode = access_mode;
+ m_size = size;
+ m_thread = thread;
+ m_time = g_eventQueue_ptr->getTime();
+ }
+
+ void print(ostream& out) const
+ {
+ out << "[StoreBufferEntry: "
+ << "SubBlock: " << m_subblock
+ << ", Type: " << m_type
+ << ", PC: " << m_pc
+ << ", AccessMode: " << m_access_mode
+ << ", Size: " << m_size
+ << ", Thread: " << m_thread
+ << ", Time: " << m_time
+ << "]";
+ }
+
+ SubBlock m_subblock;
+ CacheRequestType m_type;
+ Address m_pc;
+ AccessModeType m_access_mode;
+ int m_size;
+ int m_thread;
+ Time m_time;
+};
+
+extern inline
+ostream& operator<<(ostream& out, const StoreBufferEntry& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+// *** End Helper class ***
+
+const int MAX_ENTRIES = 128;
+
+static void inc_index(int& index)
+{
+ index++;
+ if (index >= MAX_ENTRIES) {
+ index = 0;
+ }
+}
+
+StoreBuffer::StoreBuffer(AbstractChip* chip_ptr, int version) :
+ m_store_cache()
+{
+ m_chip_ptr = chip_ptr;
+ m_version = version;
+ m_queue_ptr = new Vector<StoreBufferEntry>(MAX_ENTRIES);
+ m_queue_ptr->setSize(MAX_ENTRIES);
+ m_pending = false;
+ m_seen_atomic = false;
+ m_head = 0;
+ m_tail = 0;
+ m_size = 0;
+ m_deadlock_check_scheduled = false;
+}
+
+StoreBuffer::~StoreBuffer()
+{
+ delete m_queue_ptr;
+}
+
+// Used only to check for deadlock
+void StoreBuffer::wakeup()
+{
+ // Check for deadlock of any of the requests
+ Time current_time = g_eventQueue_ptr->getTime();
+
+ int queue_pointer = m_head;
+ for (int i=0; i<m_size; i++) {
+ if (current_time - (getEntry(queue_pointer).m_time) >= g_DEADLOCK_THRESHOLD) {
+ WARN_EXPR(getEntry(queue_pointer));
+ WARN_EXPR(m_chip_ptr->getID());
+ WARN_EXPR(current_time);
+ ERROR_MSG("Possible Deadlock detected");
+ }
+ inc_index(queue_pointer);
+ }
+
+ if (m_size > 0) { // If there are still outstanding requests, keep checking
+ g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ } else {
+ m_deadlock_check_scheduled = false;
+ }
+}
+
+void StoreBuffer::printConfig(ostream& out)
+{
+ out << "Store buffer entries: " << MAX_ENTRIES << " (Only valid if TSO is enabled)" << endl;
+}
+
+// Handle an incoming store request, this method is responsible for
+// calling hitCallback as needed
+void StoreBuffer::insertStore(const CacheMsg& request)
+{
+ Address addr = request.getAddress();
+ CacheRequestType type = request.getType();
+ Address pc = request.getProgramCounter();
+ AccessModeType access_mode = request.getAccessMode();
+ int size = request.getSize();
+ int threadID = request.getThreadID();
+
+ DEBUG_MSG(STOREBUFFER_COMP, MedPrio, "insertStore");
+ DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, g_eventQueue_ptr->getTime());
+ assert((type == CacheRequestType_ST) || (type == CacheRequestType_ATOMIC));
+ assert(isReady());
+
+ // See if we should schedule a deadlock check
+ if (m_deadlock_check_scheduled == false) {
+ g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ m_deadlock_check_scheduled = true;
+ }
+
+ // Perform the hit-callback for the store
+ SubBlock subblock(addr, size);
+ if(type == CacheRequestType_ST) {
+ g_system_ptr->getDriver()->hitCallback(m_chip_ptr->getID(), subblock, type, threadID);
+ assert(subblock.getSize() != 0);
+ } else {
+ // wait to perform the hitCallback until later for Atomics
+ }
+
+ // Perform possible pre-fetch
+ if(!isEmpty()) {
+ CacheMsg new_request = request;
+ new_request.getPrefetch() = PrefetchBit_Yes;
+ m_chip_ptr->getSequencer(m_version)->makeRequest(new_request);
+ }
+
+ // Update the StoreCache
+ m_store_cache.add(subblock);
+
+ // Enqueue the entry
+ StoreBufferEntry entry(subblock, type, pc, access_mode, size, threadID); // FIXME
+ enqueue(entry);
+
+ if(type == CacheRequestType_ATOMIC) {
+ m_seen_atomic = true;
+ }
+
+ processHeadOfQueue();
+}
+
+void StoreBuffer::callBack(const Address& addr, DataBlock& data)
+{
+ DEBUG_MSG(STOREBUFFER_COMP, MedPrio, "callBack");
+ DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, g_eventQueue_ptr->getTime());
+ assert(!isEmpty());
+ assert(m_pending == true);
+ assert(line_address(addr) == addr);
+ assert(line_address(m_pending_address) == addr);
+ assert(line_address(peek().m_subblock.getAddress()) == addr);
+ CacheRequestType type = peek().m_type;
+ int threadID = peek().m_thread;
+ assert((type == CacheRequestType_ST) || (type == CacheRequestType_ATOMIC));
+ m_pending = false;
+
+ // If oldest entry was ATOMIC, perform the callback
+ if(type == CacheRequestType_ST) {
+ // We already performed the call back for the store at insert time
+ } else {
+ // We waited to perform the hitCallback until now for Atomics
+ peek().m_subblock.mergeFrom(data); // copy the correct bytes from DataBlock into the SubBlock for the Load part of the atomic Load/Store
+ g_system_ptr->getDriver()->hitCallback(m_chip_ptr->getID(), peek().m_subblock, type, threadID);
+ m_seen_atomic = false;
+
+ /// FIXME - record the time spent in the store buffer - split out ST vs ATOMIC
+ }
+ assert(peek().m_subblock.getSize() != 0);
+
+ // Apply the head entry to the datablock
+ peek().m_subblock.mergeTo(data); // For both the Store and Atomic cases
+
+ // Update the StoreCache
+ m_store_cache.remove(peek().m_subblock);
+
+ // Dequeue the entry from the store buffer
+ dequeue();
+
+ if (isEmpty()) {
+ assert(m_store_cache.isEmpty());
+ }
+
+ if(type == CacheRequestType_ATOMIC) {
+ assert(isEmpty());
+ }
+
+ // See if we can remove any more entries
+ processHeadOfQueue();
+}
+
+void StoreBuffer::processHeadOfQueue()
+{
+ if(!isEmpty() && !m_pending) {
+ StoreBufferEntry& entry = peek();
+ assert(m_pending == false);
+ m_pending = true;
+ m_pending_address = entry.m_subblock.getAddress();
+ CacheMsg request(entry.m_subblock.getAddress(), entry.m_subblock.getAddress(), entry.m_type, entry.m_pc, entry.m_access_mode, entry.m_size, PrefetchBit_No, 0, Address(0), entry.m_thread, 0, false);
+ m_chip_ptr->getSequencer(m_version)->doRequest(request);
+ }
+}
+
+bool StoreBuffer::isReady() const
+{
+ return ((m_size < MAX_ENTRIES) && (!m_seen_atomic));
+}
+
+// Queue implementation methods
+
+StoreBufferEntry& StoreBuffer::peek()
+{
+ return getEntry(m_head);
+}
+
+void StoreBuffer::dequeue()
+{
+ assert(m_size > 0);
+ m_size--;
+ inc_index(m_head);
+}
+
+void StoreBuffer::enqueue(const StoreBufferEntry& entry)
+{
+ // assert(isReady());
+ (*m_queue_ptr)[m_tail] = entry;
+ m_size++;
+ g_system_ptr->getProfiler()->storeBuffer(m_size, m_store_cache.size());
+ inc_index(m_tail);
+}
+
+StoreBufferEntry& StoreBuffer::getEntry(int index)
+{
+ return (*m_queue_ptr)[index];
+}
+
+void StoreBuffer::print(ostream& out) const
+{
+ out << "[StoreBuffer]";
+}
+
diff --git a/src/mem/ruby/system/StoreBuffer.hh b/src/mem/ruby/system/StoreBuffer.hh
new file mode 100644
index 000000000..832e4f0bb
--- /dev/null
+++ b/src/mem/ruby/system/StoreBuffer.hh
@@ -0,0 +1,120 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef StoreBuffer_H
+#define StoreBuffer_H
+
+#include "Global.hh"
+#include "Consumer.hh"
+#include "Address.hh"
+#include "AccessModeType.hh"
+#include "CacheRequestType.hh"
+#include "StoreCache.hh"
+
+class CacheMsg;
+class DataBlock;
+class SubBlock;
+class StoreBufferEntry;
+class AbstractChip;
+
+template <class TYPE> class Vector;
+
+class StoreBuffer : public Consumer {
+public:
+ // Constructors
+ StoreBuffer(AbstractChip* chip_ptr, int version);
+
+ // Destructor
+ ~StoreBuffer();
+
+ // Public Methods
+ void wakeup(); // Used only for deadlock detection
+ void callBack(const Address& addr, DataBlock& data);
+ void insertStore(const CacheMsg& request);
+ void updateSubBlock(SubBlock& sub_block) const { m_store_cache.update(sub_block); }
+ bool trySubBlock(const SubBlock& sub_block) const { assert(isReady()); return m_store_cache.check(sub_block); }
+ void print(ostream& out) const;
+ bool isEmpty() const { return (m_size == 0); }
+ bool isReady() const;
+
+ // Class methods
+ static void printConfig(ostream& out);
+
+private:
+ // Private Methods
+ void processHeadOfQueue();
+
+ StoreBufferEntry& peek();
+ void dequeue();
+ void enqueue(const StoreBufferEntry& entry);
+ StoreBufferEntry& getEntry(int index);
+
+ // Private copy constructor and assignment operator
+ StoreBuffer(const StoreBuffer& obj);
+ StoreBuffer& operator=(const StoreBuffer& obj);
+
+ // Data Members (m_ prefix)
+ int m_version;
+
+ Vector<StoreBufferEntry>* m_queue_ptr;
+ int m_head;
+ int m_tail;
+ int m_size;
+
+ StoreCache m_store_cache;
+
+ AbstractChip* m_chip_ptr;
+ bool m_pending;
+ Address m_pending_address;
+ bool m_seen_atomic;
+ bool m_deadlock_check_scheduled;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const StoreBuffer& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const StoreBuffer& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //StoreBuffer_H
diff --git a/src/mem/ruby/system/StoreCache.cc b/src/mem/ruby/system/StoreCache.cc
new file mode 100644
index 000000000..bc25c50d6
--- /dev/null
+++ b/src/mem/ruby/system/StoreCache.cc
@@ -0,0 +1,178 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "StoreCache.hh"
+#include "System.hh"
+#include "Driver.hh"
+#include "Vector.hh"
+#include "DataBlock.hh"
+#include "SubBlock.hh"
+#include "Map.hh"
+
+// Helper class
+struct StoreCacheEntry {
+ StoreCacheEntry() {
+ m_byte_counters.setSize(RubyConfig::dataBlockBytes());
+ for(int i=0; i<m_byte_counters.size(); i++) {
+ m_byte_counters[i] = 0;
+ }
+ m_line_counter = 0;
+
+ }
+ Address m_addr;
+ DataBlock m_datablock;
+ Vector<int> m_byte_counters;
+ int m_line_counter;
+};
+
+StoreCache::StoreCache()
+{
+ m_internal_cache_ptr = new Map<Address, StoreCacheEntry>;
+}
+
+StoreCache::~StoreCache()
+{
+ delete m_internal_cache_ptr;
+}
+
+bool StoreCache::isEmpty() const
+{
+ return m_internal_cache_ptr->size() == 0;
+}
+
+int StoreCache::size() const { return m_internal_cache_ptr->size(); }
+
+void StoreCache::add(const SubBlock& block)
+{
+ if (m_internal_cache_ptr->exist(line_address(block.getAddress())) == false) {
+ m_internal_cache_ptr->allocate(line_address(block.getAddress()));
+ }
+
+ StoreCacheEntry& entry = m_internal_cache_ptr->lookup(line_address(block.getAddress()));
+
+ // For each byte in entry change the bytes and inc. the counters
+ int starting_offset = block.getAddress().getOffset();
+ int size = block.getSize();
+ for (int index=0; index < size; index++) {
+ // Update counter
+ entry.m_byte_counters[starting_offset+index]++;
+
+ // Record data
+ entry.m_datablock.setByte(starting_offset+index, block.getByte(index));
+
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, block.getAddress());
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, int(block.getByte(index)));
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, starting_offset+index);
+ }
+
+ // Increment the counter
+ entry.m_line_counter++;
+}
+
+void StoreCache::remove(const SubBlock& block)
+{
+ assert(m_internal_cache_ptr->exist(line_address(block.getAddress())));
+
+ StoreCacheEntry& entry = m_internal_cache_ptr->lookup(line_address(block.getAddress()));
+
+ // Decrement the byte counters
+ int starting_offset = block.getAddress().getOffset();
+ int size = block.getSize();
+ for (int index=0; index < size; index++) {
+ // Update counter
+ entry.m_byte_counters[starting_offset+index]--;
+ }
+
+ // Decrement the line counter
+ entry.m_line_counter--;
+ assert(entry.m_line_counter >= 0);
+
+ // Check to see if we should de-allocate this entry
+ if (entry.m_line_counter == 0) {
+ m_internal_cache_ptr->deallocate(line_address(block.getAddress()));
+ }
+}
+
+bool StoreCache::check(const SubBlock& block) const
+{
+ if (m_internal_cache_ptr->exist(line_address(block.getAddress())) == false) {
+ return false;
+ } else {
+ // Lookup the entry
+ StoreCacheEntry& entry = m_internal_cache_ptr->lookup(line_address(block.getAddress()));
+
+ // See if all the bytes are valid
+ int starting_offset = block.getAddress().getOffset();
+ int size = block.getSize();
+ for (int index=0; index < size; index++) {
+ if (entry.m_byte_counters[starting_offset+index] > 0) {
+ // So far so good
+ } else {
+ // not all the bytes were valid
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+void StoreCache::update(SubBlock& block) const
+{
+ if (m_internal_cache_ptr->exist(line_address(block.getAddress()))) {
+ // Lookup the entry
+ StoreCacheEntry& entry = m_internal_cache_ptr->lookup(line_address(block.getAddress()));
+
+ // Copy all appropriate and valid bytes from the store cache to
+ // the SubBlock
+ int starting_offset = block.getAddress().getOffset();
+ int size = block.getSize();
+ for (int index=0; index < size; index++) {
+
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, block.getAddress());
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, int(entry.m_datablock.getByte(starting_offset+index)));
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, starting_offset+index);
+
+ // If this byte is valid, copy the data into the sub-block
+ if (entry.m_byte_counters[starting_offset+index] > 0) {
+ block.setByte(index, entry.m_datablock.getByte(starting_offset+index));
+ }
+ }
+ }
+}
+
+void StoreCache::print(ostream& out) const
+{
+ out << "[StoreCache]";
+}
+
diff --git a/src/mem/ruby/system/StoreCache.hh b/src/mem/ruby/system/StoreCache.hh
new file mode 100644
index 000000000..d92d39888
--- /dev/null
+++ b/src/mem/ruby/system/StoreCache.hh
@@ -0,0 +1,85 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef StoreCache_H
+#define StoreCache_H
+
+#include "Global.hh"
+#include "Address.hh"
+
+
+class DataBlock;
+class SubBlock;
+class StoreCacheEntry;
+
+template <class KEY_TYPE, class VALUE_TYPE> class Map;
+
+class StoreCache {
+public:
+ // Constructors
+ StoreCache();
+
+ // Destructor
+ ~StoreCache();
+
+ // Public Methods
+ void add(const SubBlock& block);
+ void remove(const SubBlock& block);
+ bool check(const SubBlock& block) const;
+ void update(SubBlock& block) const;
+ bool isEmpty() const;
+ int size() const;
+ void print(ostream& out) const;
+
+private:
+ Map<Address, StoreCacheEntry>* m_internal_cache_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const StoreCache& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const StoreCache& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //StoreCache_H
diff --git a/src/mem/ruby/system/System.cc b/src/mem/ruby/system/System.cc
new file mode 100644
index 000000000..6352d8a58
--- /dev/null
+++ b/src/mem/ruby/system/System.cc
@@ -0,0 +1,269 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * System.C
+ *
+ * Description: See System.h
+ *
+ * $Id$
+ *
+ */
+
+
+#include "System.hh"
+#include "Profiler.hh"
+#include "Network.hh"
+#include "Tester.hh"
+#include "SyntheticDriver.hh"
+#include "DeterministicDriver.hh"
+#include "OpalInterface.hh"
+#include "Chip.hh"
+//#include "Tracer.hh"
+#include "Protocol.hh"
+//#include "XactIsolationChecker.hh" // gem5:Arka for decomissioning of log_tm
+//#include "XactCommitArbiter.hh"
+//#include "XactVisualizer.hh"
+#include "M5Driver.hh"
+
+System::System()
+{
+ DEBUG_MSG(SYSTEM_COMP, MedPrio,"initializing");
+
+ m_driver_ptr = NULL;
+ m_profiler_ptr = new Profiler;
+
+ // NETWORK INITIALIZATION
+ // create the network by calling a function that calls new
+ m_network_ptr = Network::createNetwork(RubyConfig::numberOfChips());
+
+ DEBUG_MSG(SYSTEM_COMP, MedPrio,"Constructed network");
+
+ // CHIP INITIALIZATION
+ m_chip_vector.setSize(RubyConfig::numberOfChips());// create the vector of pointers to processors
+ for(int i=0; i<RubyConfig::numberOfChips(); i++) { // for each chip
+ // create the chip
+ m_chip_vector[i] = new Chip(i, m_network_ptr);
+ DEBUG_MSG(SYSTEM_COMP, MedPrio,"Constructed a chip");
+ }
+
+ // These must be after the chips are constructed
+
+#if 0
+ if (!g_SIMICS) {
+ if (g_SYNTHETIC_DRIVER && !g_DETERMINISTIC_DRIVER) {
+ m_driver_ptr = new SyntheticDriver(this);
+ } else if (!g_SYNTHETIC_DRIVER && g_DETERMINISTIC_DRIVER) {
+ m_driver_ptr = new DeterministicDriver(this);
+ } else if (g_SYNTHETIC_DRIVER && g_DETERMINISTIC_DRIVER) {
+ ERROR_MSG("SYNTHETIC and DETERMINISTIC DRIVERS are exclusive and cannot be both enabled");
+ } else {
+ // normally make tester object, otherwise make an opal interface object.
+ if (!OpalInterface::isOpalLoaded()) {
+ m_driver_ptr = new Tester(this);
+ } else {
+ m_driver_ptr = new OpalInterface(this);
+ }
+ }
+ } else {
+ // detect if opal is loaded or not
+ if (OpalInterface::isOpalLoaded()) {
+ m_driver_ptr = new OpalInterface(this);
+ } else {
+ assert(0);
+ /* Need to allocate a driver here */
+ // m_driver_ptr = new SimicsDriver(this);
+ }
+ }
+#endif
+
+ if (g_SYNTHETIC_DRIVER && !g_DETERMINISTIC_DRIVER) {
+ cerr << "Creating Synthetic Driver" << endl;
+ m_driver_ptr = new SyntheticDriver(this);
+ } else if (!g_SYNTHETIC_DRIVER && g_DETERMINISTIC_DRIVER) {
+ cerr << "Creating Deterministic Driver" << endl;
+ m_driver_ptr = new DeterministicDriver(this);
+ } else {
+ cerr << "Creating M5 Driver" << endl;
+ m_driver_ptr = new M5Driver(this);
+ }
+ /* gem5:Binkert for decomissiong of tracer
+ m_tracer_ptr = new Tracer;
+ */
+
+ /* gem5:Arka for decomissiong of log_tm
+ if (XACT_MEMORY) {
+ m_xact_isolation_checker = new XactIsolationChecker;
+ m_xact_commit_arbiter = new XactCommitArbiter;
+ m_xact_visualizer = new XactVisualizer;
+ }
+*/
+ DEBUG_MSG(SYSTEM_COMP, MedPrio,"finished initializing");
+ DEBUG_NEWLINE(SYSTEM_COMP, MedPrio);
+
+}
+
+System::~System()
+{
+ for (int i = 0; i < m_chip_vector.size(); i++) {
+ delete m_chip_vector[i];
+ }
+ delete m_driver_ptr;
+ delete m_network_ptr;
+ delete m_profiler_ptr;
+ /* gem5:Binkert for decomissiong of tracer
+ delete m_tracer_ptr;
+ */
+}
+
+void System::printConfig(ostream& out) const
+{
+ out << "\n================ Begin System Configuration Print ================\n\n";
+ RubyConfig::printConfiguration(out);
+ out << endl;
+ getChip(0)->printConfig(out);
+ m_network_ptr->printConfig(out);
+ m_driver_ptr->printConfig(out);
+ m_profiler_ptr->printConfig(out);
+ out << "\n================ End System Configuration Print ================\n\n";
+}
+
+void System::printStats(ostream& out)
+{
+ const time_t T = time(NULL);
+ tm *localTime = localtime(&T);
+ char buf[100];
+ strftime(buf, 100, "%b/%d/%Y %H:%M:%S", localTime);
+
+ out << "Real time: " << buf << endl;
+
+ m_profiler_ptr->printStats(out);
+ for(int i=0; i<RubyConfig::numberOfChips(); i++) { // for each chip
+ for(int p=0; p<RubyConfig::numberOfProcsPerChip(); p++) {
+ m_chip_vector[i]->m_L1Cache_mandatoryQueue_vec[p]->printStats(out);
+ }
+ }
+ m_network_ptr->printStats(out);
+ m_driver_ptr->printStats(out);
+ Chip::printStats(out);
+}
+
+void System::clearStats() const
+{
+ m_profiler_ptr->clearStats();
+ m_network_ptr->clearStats();
+ m_driver_ptr->clearStats();
+ Chip::clearStats();
+ for(int i=0; i<RubyConfig::numberOfChips(); i++) { // for each chip
+ for(int p=0; p<RubyConfig::numberOfProcsPerChip(); p++) {
+ m_chip_vector[i]->m_L1Cache_mandatoryQueue_vec[p]->clearStats();
+ }
+ }
+}
+
+void System::recordCacheContents(CacheRecorder& tr) const
+{
+ for (int i = 0; i < m_chip_vector.size(); i++) {
+ for (int m_version = 0; m_version < RubyConfig::numberOfProcsPerChip(); m_version++) {
+ if (Protocol::m_TwoLevelCache) {
+ m_chip_vector[i]->m_L1Cache_L1IcacheMemory_vec[m_version]->setAsInstructionCache(true);
+ m_chip_vector[i]->m_L1Cache_L1DcacheMemory_vec[m_version]->setAsInstructionCache(false);
+ } else {
+ m_chip_vector[i]->m_L1Cache_cacheMemory_vec[m_version]->setAsInstructionCache(false);
+ }
+ }
+ m_chip_vector[i]->recordCacheContents(tr);
+ }
+}
+
+void System::opalLoadNotify()
+{
+ if (OpalInterface::isOpalLoaded()) {
+ // change the driver pointer to point to an opal driver
+ delete m_driver_ptr;
+ m_driver_ptr = new OpalInterface(this);
+ }
+}
+
+#ifdef CHECK_COHERENCE
+// This code will check for cases if the given cache block is exclusive in
+// one node and shared in another-- a coherence violation
+//
+// To use, the SLICC specification must call sequencer.checkCoherence(address)
+// when the controller changes to a state with new permissions. Do this
+// in setState. The SLICC spec must also define methods "isBlockShared"
+// and "isBlockExclusive" that are specific to that protocol
+//
+void System::checkGlobalCoherenceInvariant(const Address& addr ) {
+
+ NodeID exclusive = -1;
+ bool sharedDetected = false;
+ NodeID lastShared = -1;
+
+ for (int i = 0; i < m_chip_vector.size(); i++) {
+
+ if (m_chip_vector[i]->isBlockExclusive(addr)) {
+ if (exclusive != -1) {
+ // coherence violation
+ WARN_EXPR(exclusive);
+ WARN_EXPR(m_chip_vector[i]->getID());
+ WARN_EXPR(addr);
+ WARN_EXPR(g_eventQueue_ptr->getTime());
+ ERROR_MSG("Coherence Violation Detected -- 2 exclusive chips");
+ }
+ else if (sharedDetected) {
+ WARN_EXPR(lastShared);
+ WARN_EXPR(m_chip_vector[i]->getID());
+ WARN_EXPR(addr);
+ WARN_EXPR(g_eventQueue_ptr->getTime());
+ ERROR_MSG("Coherence Violation Detected -- exclusive chip with >=1 shared");
+ }
+ else {
+ exclusive = m_chip_vector[i]->getID();
+ }
+ }
+ else if (m_chip_vector[i]->isBlockShared(addr)) {
+ sharedDetected = true;
+ lastShared = m_chip_vector[i]->getID();
+
+ if (exclusive != -1) {
+ WARN_EXPR(lastShared);
+ WARN_EXPR(exclusive);
+ WARN_EXPR(addr);
+ WARN_EXPR(g_eventQueue_ptr->getTime());
+ ERROR_MSG("Coherence Violation Detected -- exclusive chip with >=1 shared");
+ }
+ }
+ }
+}
+#endif
+
+
+
+
diff --git a/src/mem/ruby/system/System.hh b/src/mem/ruby/system/System.hh
new file mode 100644
index 000000000..350f74468
--- /dev/null
+++ b/src/mem/ruby/system/System.hh
@@ -0,0 +1,137 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * System.h
+ *
+ * Description: Contains all of the various parts of the system we are
+ * simulating. Performs allocation, deallocation, and setup of all
+ * the major components of the system
+ *
+ * $Id$
+ *
+ */
+
+#ifndef SYSTEM_H
+#define SYSTEM_H
+
+#include "Global.hh"
+#include "Vector.hh"
+#include "Address.hh"
+#include "RubyConfig.hh"
+#include "MachineType.hh"
+#include "AbstractChip.hh"
+
+class Profiler;
+class Network;
+class Driver;
+class CacheRecorder;
+class Tracer;
+class Sequencer;
+class XactIsolationChecker;
+class XactCommitArbiter;
+class XactVisualizer;
+class TransactionInterfaceManager;
+
+class System {
+public:
+ // Constructors
+ System();
+
+ // Destructor
+ ~System();
+
+ // Public Methods
+ int getNumProcessors() { return RubyConfig::numberOfProcessors(); }
+ int getNumMemories() { return RubyConfig::numberOfMemories(); }
+ Profiler* getProfiler() { return m_profiler_ptr; }
+ Driver* getDriver() { assert(m_driver_ptr != NULL); return m_driver_ptr; }
+ Tracer* getTracer() { assert(m_tracer_ptr != NULL); return m_tracer_ptr; }
+ Network* getNetwork() { assert(m_network_ptr != NULL); return m_network_ptr; }
+ XactIsolationChecker* getXactIsolationChecker() { assert(m_xact_isolation_checker!= NULL); return m_xact_isolation_checker;}
+ XactCommitArbiter* getXactCommitArbiter() { assert(m_xact_commit_arbiter!= NULL); return m_xact_commit_arbiter;}
+ XactVisualizer* getXactVisualizer() { assert(m_xact_visualizer!= NULL); return m_xact_visualizer;}
+
+ AbstractChip* getChip(int chipNumber) const { assert(m_chip_vector[chipNumber] != NULL); return m_chip_vector[chipNumber];}
+ Sequencer* getSequencer(int procNumber) const {
+ assert(procNumber < RubyConfig::numberOfProcessors());
+ return m_chip_vector[procNumber/RubyConfig::numberOfProcsPerChip()]->getSequencer(procNumber%RubyConfig::numberOfProcsPerChip());
+ }
+ TransactionInterfaceManager* getTransactionInterfaceManager(int procNumber) const {
+ return m_chip_vector[procNumber/RubyConfig::numberOfProcsPerChip()]->getTransactionInterfaceManager(procNumber%RubyConfig::numberOfProcsPerChip());
+ }
+ void recordCacheContents(CacheRecorder& tr) const;
+ void printConfig(ostream& out) const;
+ void printStats(ostream& out);
+ void clearStats() const;
+
+ // called to notify the system when opal is loaded
+ void opalLoadNotify();
+
+ void print(ostream& out) const;
+#ifdef CHECK_COHERENCE
+ void checkGlobalCoherenceInvariant(const Address& addr);
+#endif
+
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ System(const System& obj);
+ System& operator=(const System& obj);
+
+ // Data Members (m_ prefix)
+ Network* m_network_ptr;
+ Vector<AbstractChip*> m_chip_vector;
+ Profiler* m_profiler_ptr;
+ Driver* m_driver_ptr;
+ Tracer* m_tracer_ptr;
+ XactIsolationChecker *m_xact_isolation_checker;
+ XactCommitArbiter *m_xact_commit_arbiter;
+ XactVisualizer *m_xact_visualizer;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const System& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+inline
+ostream& operator<<(ostream& out, const System& obj)
+{
+// obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //SYSTEM_H
+
+
+
diff --git a/src/mem/ruby/system/TBETable.hh b/src/mem/ruby/system/TBETable.hh
new file mode 100644
index 000000000..ad1674dca
--- /dev/null
+++ b/src/mem/ruby/system/TBETable.hh
@@ -0,0 +1,165 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TBETable.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef TBETABLE_H
+#define TBETABLE_H
+
+#include "Global.hh"
+#include "Map.hh"
+#include "Address.hh"
+#include "Profiler.hh"
+#include "AbstractChip.hh"
+#include "System.hh"
+
+template<class ENTRY>
+class TBETable {
+public:
+
+ // Constructors
+ TBETable(AbstractChip* chip_ptr);
+
+ // Destructor
+ //~TBETable();
+
+ // Public Methods
+
+ static void printConfig(ostream& out) { out << "TBEs_per_TBETable: " << NUMBER_OF_TBES << endl; }
+
+ bool isPresent(const Address& address) const;
+ void allocate(const Address& address);
+ void deallocate(const Address& address);
+ bool areNSlotsAvailable(int n) const { return (NUMBER_OF_TBES - m_map.size()) >= n; }
+
+ ENTRY& lookup(const Address& address);
+ const ENTRY& lookup(const Address& address) const;
+
+ // Print cache contents
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ TBETable(const TBETable& obj);
+ TBETable& operator=(const TBETable& obj);
+
+ // Data Members (m_prefix)
+ Map<Address, ENTRY> m_map;
+ AbstractChip* m_chip_ptr;
+};
+
+// Output operator declaration
+//ostream& operator<<(ostream& out, const TBETable<ENTRY>& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+template<class ENTRY>
+extern inline
+ostream& operator<<(ostream& out, const TBETable<ENTRY>& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+// ****************************************************************
+
+template<class ENTRY>
+extern inline
+TBETable<ENTRY>::TBETable(AbstractChip* chip_ptr)
+{
+ m_chip_ptr = chip_ptr;
+}
+
+// PUBLIC METHODS
+
+// tests to see if an address is present in the cache
+template<class ENTRY>
+extern inline
+bool TBETable<ENTRY>::isPresent(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(m_map.size() <= NUMBER_OF_TBES);
+ return m_map.exist(address);
+}
+
+template<class ENTRY>
+extern inline
+void TBETable<ENTRY>::allocate(const Address& address)
+{
+ assert(isPresent(address) == false);
+ assert(m_map.size() < NUMBER_OF_TBES);
+ g_system_ptr->getProfiler()->L2tbeUsageSample(m_map.size());
+ m_map.add(address, ENTRY());
+}
+
+template<class ENTRY>
+extern inline
+void TBETable<ENTRY>::deallocate(const Address& address)
+{
+ assert(isPresent(address) == true);
+ assert(m_map.size() > 0);
+ m_map.erase(address);
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+extern inline
+ENTRY& TBETable<ENTRY>::lookup(const Address& address)
+{
+ assert(isPresent(address) == true);
+ return m_map.lookup(address);
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+extern inline
+const ENTRY& TBETable<ENTRY>::lookup(const Address& address) const
+{
+ assert(isPresent(address) == true);
+ return m_map.lookup(address);
+}
+
+template<class ENTRY>
+extern inline
+void TBETable<ENTRY>::print(ostream& out) const
+{
+}
+
+#endif //TBETABLE_H
diff --git a/src/mem/ruby/system/TimerTable.cc b/src/mem/ruby/system/TimerTable.cc
new file mode 100644
index 000000000..a8453d4bb
--- /dev/null
+++ b/src/mem/ruby/system/TimerTable.cc
@@ -0,0 +1,129 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#include "Global.hh"
+#include "TimerTable.hh"
+#include "EventQueue.hh"
+
+TimerTable::TimerTable(Chip* chip_ptr)
+{
+ assert(chip_ptr != NULL);
+ m_consumer_ptr = NULL;
+ m_chip_ptr = chip_ptr;
+ m_next_valid = false;
+ m_next_address = Address(0);
+ m_next_time = 0;
+}
+
+
+bool TimerTable::isReady() const
+{
+ if (m_map.size() == 0) {
+ return false;
+ }
+
+ if (!m_next_valid) {
+ updateNext();
+ }
+ assert(m_next_valid);
+ return (g_eventQueue_ptr->getTime() >= m_next_time);
+}
+
+const Address& TimerTable::readyAddress() const
+{
+ assert(isReady());
+
+ if (!m_next_valid) {
+ updateNext();
+ }
+ assert(m_next_valid);
+ return m_next_address;
+}
+
+void TimerTable::set(const Address& address, Time relative_latency)
+{
+ assert(address == line_address(address));
+ assert(relative_latency > 0);
+ assert(m_map.exist(address) == false);
+ Time ready_time = g_eventQueue_ptr->getTime() + relative_latency;
+ m_map.add(address, ready_time);
+ assert(m_consumer_ptr != NULL);
+ g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, ready_time);
+ m_next_valid = false;
+
+ // Don't always recalculate the next ready address
+ if (ready_time <= m_next_time) {
+ m_next_valid = false;
+ }
+}
+
+void TimerTable::unset(const Address& address)
+{
+ assert(address == line_address(address));
+ assert(m_map.exist(address) == true);
+ m_map.remove(address);
+
+ // Don't always recalculate the next ready address
+ if (address == m_next_address) {
+ m_next_valid = false;
+ }
+}
+
+void TimerTable::print(ostream& out) const
+{
+}
+
+
+void TimerTable::updateNext() const
+{
+ if (m_map.size() == 0) {
+ assert(m_next_valid == false);
+ return;
+ }
+
+ Vector<Address> addresses = m_map.keys();
+ m_next_address = addresses[0];
+ m_next_time = m_map.lookup(m_next_address);
+
+ // Search for the minimum time
+ int size = addresses.size();
+ for (int i=1; i<size; i++) {
+ Address maybe_next_address = addresses[i];
+ Time maybe_next_time = m_map.lookup(maybe_next_address);
+ if (maybe_next_time < m_next_time) {
+ m_next_time = maybe_next_time;
+ m_next_address= maybe_next_address;
+ }
+ }
+ m_next_valid = true;
+}
diff --git a/src/mem/ruby/system/TimerTable.hh b/src/mem/ruby/system/TimerTable.hh
new file mode 100644
index 000000000..c7f77efb1
--- /dev/null
+++ b/src/mem/ruby/system/TimerTable.hh
@@ -0,0 +1,98 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TimerTable.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef TIMERTABLE_H
+#define TIMERTABLE_H
+
+#include "Global.hh"
+#include "Map.hh"
+#include "Address.hh"
+class Consumer;
+class Chip;
+
+class TimerTable {
+public:
+
+ // Constructors
+ TimerTable(Chip* chip_ptr);
+
+ // Destructor
+ //~TimerTable();
+
+ // Class Methods
+ static void printConfig(ostream& out) {}
+
+ // Public Methods
+ void setConsumer(Consumer* consumer_ptr) { ASSERT(m_consumer_ptr==NULL); m_consumer_ptr = consumer_ptr; }
+ void setDescription(const string& name) { m_name = name; }
+
+ bool isReady() const;
+ const Address& readyAddress() const;
+ bool isSet(const Address& address) const { return m_map.exist(address); }
+ void set(const Address& address, Time relative_latency);
+ void unset(const Address& address);
+ void print(ostream& out) const;
+private:
+ // Private Methods
+ void updateNext() const;
+
+ // Private copy constructor and assignment operator
+ TimerTable(const TimerTable& obj);
+ TimerTable& operator=(const TimerTable& obj);
+
+ // Data Members (m_prefix)
+ Map<Address, Time> m_map;
+ Chip* m_chip_ptr;
+ mutable bool m_next_valid;
+ mutable Time m_next_time; // Only valid if m_next_valid is true
+ mutable Address m_next_address; // Only valid if m_next_valid is true
+ Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
+ string m_name;
+};
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const TimerTable& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+#endif //TIMERTABLE_H
diff --git a/src/mem/ruby/tester/BarrierGenerator.cc b/src/mem/ruby/tester/BarrierGenerator.cc
new file mode 100644
index 000000000..79b9c6d2b
--- /dev/null
+++ b/src/mem/ruby/tester/BarrierGenerator.cc
@@ -0,0 +1,333 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: BarrierGenerator.C 1.3 2005/01/19 13:12:35-06:00 mikem@maya.cs.wisc.edu $
+ *
+ */
+
+#include "BarrierGenerator.hh"
+#include "Sequencer.hh"
+#include "System.hh"
+#include "RubyConfig.hh"
+#include "SubBlock.hh"
+#include "SyntheticDriver.hh"
+#include "Chip.hh"
+
+BarrierGenerator::BarrierGenerator(NodeID node, SyntheticDriver& driver) :
+ m_driver(driver)
+{
+ m_status = BarrierGeneratorStatus_Thinking;
+ m_last_transition = 0;
+ m_node = node;
+ m_counter = 0;
+ proc_counter = 0;
+ m_local_sense = false;
+
+ m_total_think = 0;
+ m_think_periods = 0;
+
+ g_eventQueue_ptr->scheduleEvent(this, 1+(random() % 200));
+}
+
+BarrierGenerator::~BarrierGenerator()
+{
+}
+
+void BarrierGenerator::wakeup()
+{
+ DEBUG_EXPR(TESTER_COMP, MedPrio, m_node);
+ DEBUG_EXPR(TESTER_COMP, MedPrio, m_status);
+
+ if (m_status == BarrierGeneratorStatus_Thinking) {
+ m_barrier_done = false;
+ m_local_sense = !m_local_sense;
+ m_status = BarrierGeneratorStatus_Test_Pending;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ initiateTest(); // Test
+ } else if (m_status == BarrierGeneratorStatus_Test_Waiting) {
+ m_status = BarrierGeneratorStatus_Test_Pending;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ initiateTest(); // Test
+ } else if (m_status == BarrierGeneratorStatus_Release_Waiting) {
+ m_status = BarrierGeneratorStatus_Release_Pending;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ initiateRelease(); // Test
+ } else if (m_status == BarrierGeneratorStatus_StoreBarrierCounter_Waiting) {
+ m_status = BarrierGeneratorStatus_StoreBarrierCounter_Pending;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ initiateStoreCtr();
+ } else if (m_status == BarrierGeneratorStatus_StoreFlag_Waiting) {
+ m_status = BarrierGeneratorStatus_StoreFlag_Pending;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ initiateStoreFlag();
+ } else if (m_status == BarrierGeneratorStatus_Holding) {
+ m_status = BarrierGeneratorStatus_Release_Pending;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ initiateRelease(); // Release
+ } else if (m_status == BarrierGeneratorStatus_Before_Swap) {
+ m_status = BarrierGeneratorStatus_Swap_Pending;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ initiateSwap();
+ } else if (m_status == BarrierGeneratorStatus_SpinFlag_Ready) {
+ m_status = BarrierGeneratorStatus_SpinFlag_Pending;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ initiateLoadFlag();
+ } else {
+ WARN_EXPR(m_status);
+ ERROR_MSG("Invalid status");
+ }
+}
+
+void BarrierGenerator::performCallback(NodeID proc, SubBlock& data)
+{
+ Address address = data.getAddress();
+ assert(proc == m_node);
+
+ DEBUG_EXPR(TESTER_COMP, LowPrio, proc);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, m_status);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, address);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, data);
+
+ if (m_status == BarrierGeneratorStatus_Test_Pending) {
+ uint8 dat = data.readByte();
+ uint8 lock = dat >> 7;
+ if (lock == 1) {
+ // Locked - keep spinning
+ m_status = BarrierGeneratorStatus_Test_Waiting;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ } else {
+ // Unlocked - try the swap
+ m_driver.recordTestLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ m_status = BarrierGeneratorStatus_Before_Swap;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ }
+ } else if (m_status == BarrierGeneratorStatus_Swap_Pending) {
+ m_driver.recordSwapLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ uint8 dat = data.readByte();
+ uint8 lock = dat >> 7;
+ if (lock == 1) {
+ // We failed to aquire the lock
+ m_status = BarrierGeneratorStatus_Test_Waiting;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ } else {
+ // We acquired the lock
+ dat = dat | 0x80;
+ data.writeByte(dat);
+ m_status = BarrierGeneratorStatus_StoreBarrierCounter_Waiting;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ DEBUG_MSG(TESTER_COMP, HighPrio, "Acquired");
+ DEBUG_EXPR(TESTER_COMP, HighPrio, proc);
+ DEBUG_EXPR(TESTER_COMP, HighPrio, g_eventQueue_ptr->getTime());
+ // g_eventQueue_ptr->scheduleEvent(this, holdTime());
+
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+
+ // initiateLoadCtr();
+ }
+ } else if (m_status == BarrierGeneratorStatus_StoreBarrierCounter_Pending) {
+
+ // if value == p, reset counter and set local sense flag
+ uint8 ctr = data.readByte();
+ //uint8 sense = ctr >> 4;
+ ctr = ctr & 0x0F;
+
+ ctr++;
+ data.writeByte( ctr | 0x80); // store counter and lock
+
+ //cout << m_node << " incremented Barrier_ctr to " << (int)ctr << ", " << data << "\n";
+
+ if (ctr == (uint8) 16) {
+
+ data.writeByte( 0x0 );
+ m_status = BarrierGeneratorStatus_StoreFlag_Waiting;
+ m_barrier_done = true;
+
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ }
+ else {
+
+ m_status = BarrierGeneratorStatus_Release_Waiting;
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ }
+ } else if (m_status == BarrierGeneratorStatus_StoreFlag_Pending) {
+
+ // write flag
+ if (m_local_sense) {
+ data.writeByte( 0x01 );
+ }
+ else {
+ data.writeByte( 0x00 );
+ }
+
+ m_status = BarrierGeneratorStatus_Release_Waiting;
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+
+ } else if (m_status == BarrierGeneratorStatus_Release_Pending) {
+ m_driver.recordReleaseLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ // We're releasing the lock
+ uint8 dat = data.readByte();
+ dat = dat & 0x7F;
+ data.writeByte(dat);
+
+ if (m_barrier_done) {
+ m_counter++;
+ proc_counter++;
+ if (m_counter < g_tester_length) {
+ m_status = BarrierGeneratorStatus_Thinking;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ } else {
+
+ m_driver.reportDone(proc_counter, m_node);
+ m_last_transition = g_eventQueue_ptr->getTime();
+ }
+ }
+ else {
+ m_status = BarrierGeneratorStatus_SpinFlag_Ready;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ }
+ } else if (m_status == BarrierGeneratorStatus_SpinFlag_Pending) {
+
+ uint8 sense = data.readByte();
+
+
+ if (sense != m_local_sense) {
+ m_status = BarrierGeneratorStatus_SpinFlag_Ready;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ }
+ else {
+ m_counter++;
+ proc_counter++;
+ if (m_counter < g_tester_length) {
+ m_status = BarrierGeneratorStatus_Thinking;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ } else {
+ m_driver.reportDone(proc_counter, m_node);
+ m_status = BarrierGeneratorStatus_Done;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ }
+ }
+
+ } else {
+ WARN_EXPR(m_status);
+ ERROR_MSG("Invalid status");
+ }
+}
+
+int BarrierGenerator::thinkTime()
+{
+ int ret;
+ float ratio = g_think_fudge_factor;
+
+ // return 400;
+
+ if (ratio == 0) {
+ return g_think_time;
+ }
+
+ int r = random();
+ int x = (int) ( (float)g_think_time*ratio*2.0);
+ int mod = r % x;
+
+
+ int rand = ( mod+1 - ((float)g_think_time*ratio) );
+
+ ret = (g_think_time + rand);
+
+ m_total_think += ret;
+ m_think_periods++;
+
+ return ret;
+}
+
+int BarrierGenerator::waitTime() const
+{
+ return g_wait_time;
+}
+
+
+void BarrierGenerator::initiateTest()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Test");
+ sequencer()->makeRequest(CacheMsg(Address(0x40), CacheRequestType_LD, Address(1), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false));
+}
+
+void BarrierGenerator::initiateSwap()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Swap");
+ sequencer()->makeRequest(CacheMsg(Address(0x40), CacheRequestType_ATOMIC, Address(2), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false));
+}
+
+void BarrierGenerator::initiateRelease()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Release");
+ sequencer()->makeRequest(CacheMsg(Address(0x40), CacheRequestType_ST, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false));
+}
+
+void BarrierGenerator::initiateLoadCtr()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating load of barrier counter");
+ sequencer()->makeRequest(CacheMsg(Address(0x40), CacheRequestType_LD, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false));
+}
+
+void BarrierGenerator::initiateStoreCtr()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating load of barrier counter");
+ sequencer()->makeRequest(CacheMsg(Address(0x40), CacheRequestType_ST, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false));
+}
+
+void BarrierGenerator::initiateStoreFlag()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating load of barrier counter");
+ sequencer()->makeRequest(CacheMsg(Address(0x00), CacheRequestType_ST, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false));
+}
+
+void BarrierGenerator::initiateLoadFlag()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating load of barrier counter");
+ sequencer()->makeRequest(CacheMsg(Address(0x00), CacheRequestType_LD, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false));
+}
+
+
+Sequencer* BarrierGenerator::sequencer() const
+{
+ return g_system_ptr->getChip(m_node/RubyConfig::numberOfProcsPerChip())->getSequencer(m_node%RubyConfig::numberOfProcsPerChip());
+}
+
+void BarrierGenerator::print(ostream& out) const
+{
+}
+
diff --git a/src/mem/ruby/tester/BarrierGenerator.hh b/src/mem/ruby/tester/BarrierGenerator.hh
new file mode 100644
index 000000000..1b16755a5
--- /dev/null
+++ b/src/mem/ruby/tester/BarrierGenerator.hh
@@ -0,0 +1,138 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef BARRIERGENERATOR_H
+#define BARRIERGENERATOR_H
+
+#include "Global.hh"
+#include "Consumer.hh"
+#include "NodeID.hh"
+#include "Address.hh"
+
+class Sequencer;
+class SubBlock;
+class SyntheticDriver;
+
+
+enum BarrierGeneratorStatus {
+ BarrierGeneratorStatus_FIRST,
+ BarrierGeneratorStatus_Thinking = BarrierGeneratorStatus_FIRST,
+ BarrierGeneratorStatus_Test_Pending,
+ BarrierGeneratorStatus_Test_Waiting,
+ BarrierGeneratorStatus_Before_Swap,
+ BarrierGeneratorStatus_Swap_Pending,
+ BarrierGeneratorStatus_Holding,
+ BarrierGeneratorStatus_Release_Pending,
+ BarrierGeneratorStatus_Release_Waiting,
+ BarrierGeneratorStatus_StoreFlag_Waiting,
+ BarrierGeneratorStatus_StoreFlag_Pending,
+ BarrierGeneratorStatus_Done,
+ BarrierGeneratorStatus_SpinFlag_Ready,
+ BarrierGeneratorStatus_SpinFlag_Pending,
+ BarrierGeneratorStatus_LoadBarrierCounter_Pending,
+ BarrierGeneratorStatus_StoreBarrierCounter_Pending,
+ BarrierGeneratorStatus_StoreBarrierCounter_Waiting,
+ BarrierGeneratorStatus_NUM
+};
+
+
+// UNCOMMENT THIS FOR A SINGLE WORK QUEUE
+// static int m_counter;
+
+class BarrierGenerator : public Consumer {
+public:
+ // Constructors
+ BarrierGenerator(NodeID node, SyntheticDriver& driver);
+
+ // Destructor
+ ~BarrierGenerator();
+
+ // Public Methods
+ void wakeup();
+ void performCallback(NodeID proc, SubBlock& data);
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+ int thinkTime() ;
+ int waitTime() const;
+ void initiateTest();
+ void initiateSwap();
+ void initiateRelease();
+ void initiateLoadCtr();
+ void initiateStoreCtr();
+ void initiateLoadFlag();
+ void initiateStoreFlag();
+ Sequencer* sequencer() const;
+
+ // Private copy constructor and assignment operator
+ BarrierGenerator(const BarrierGenerator& obj);
+ BarrierGenerator& operator=(const BarrierGenerator& obj);
+
+ // Data Members (m_ prefix)
+ SyntheticDriver& m_driver;
+ NodeID m_node;
+ BarrierGeneratorStatus m_status;
+ int proc_counter;
+
+ int m_counter;
+
+ bool m_local_sense;
+ bool m_barrier_done;
+
+ Time m_last_transition;
+ Address m_address;
+
+ int m_total_think;
+ int m_think_periods;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const BarrierGenerator& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const BarrierGenerator& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //REQUESTGENERATOR_H
+
diff --git a/src/mem/ruby/tester/Check.cc b/src/mem/ruby/tester/Check.cc
new file mode 100644
index 000000000..3e2649709
--- /dev/null
+++ b/src/mem/ruby/tester/Check.cc
@@ -0,0 +1,251 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "Check.hh"
+#include "Sequencer.hh"
+#include "System.hh"
+#include "SubBlock.hh"
+#include "Chip.hh"
+
+Check::Check(const Address& address, const Address& pc)
+{
+ m_status = TesterStatus_Idle;
+
+ pickValue();
+ pickInitiatingNode();
+ changeAddress(address);
+ m_pc = pc;
+ m_access_mode = AccessModeType(random() % AccessModeType_NUM);
+ m_store_count = 0;
+}
+
+void Check::initiate()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating");
+ DEBUG_EXPR(TESTER_COMP, MedPrio, *this);
+
+ // current CMP protocol doesn't support prefetches
+ if (!Protocol::m_CMP && (random() & 0xf) == 0) { // 1 in 16 chance
+ initiatePrefetch(); // Prefetch from random processor
+ }
+
+ if(m_status == TesterStatus_Idle) {
+ initiateAction();
+ } else if(m_status == TesterStatus_Ready) {
+ initiateCheck();
+ } else {
+ // Pending - do nothing
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating action/check - failed: action/check is pending\n");
+ }
+}
+
+void Check::initiatePrefetch(Sequencer* targetSequencer_ptr)
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating prefetch");
+
+ CacheRequestType type;
+ if ((random() & 0x7) != 0) { // 1 in 8 chance
+ if ((random() & 0x1) == 0) { // 50% chance
+ type = CacheRequestType_LD;
+ } else {
+ type = CacheRequestType_IFETCH;
+ }
+ } else {
+ type = CacheRequestType_ST;
+ }
+ assert(targetSequencer_ptr != NULL);
+ CacheMsg request(m_address, m_address, type, m_pc, m_access_mode, 0, PrefetchBit_Yes, 0, Address(0), 0 /* only 1 SMT thread */, 0, false);
+ if (targetSequencer_ptr->isReady(request)) {
+ targetSequencer_ptr->makeRequest(request);
+ }
+}
+
+void Check::initiatePrefetch()
+{
+ // Any sequencer can issue a prefetch for this address
+ Sequencer* targetSequencer_ptr = g_system_ptr->getChip(random() % RubyConfig::numberOfChips())->getSequencer(random() % RubyConfig::numberOfProcsPerChip());
+ assert(targetSequencer_ptr != NULL);
+ initiatePrefetch(targetSequencer_ptr);
+}
+
+void Check::initiateAction()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Action");
+ assert(m_status == TesterStatus_Idle);
+
+ CacheRequestType type = CacheRequestType_ST;
+ if ((random() & 0x1) == 0) { // 50% chance
+ type = CacheRequestType_ATOMIC;
+ }
+
+ CacheMsg request(Address(m_address.getAddress()+m_store_count), Address(m_address.getAddress()+m_store_count), type, m_pc, m_access_mode, 1, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */, 0, false);
+ Sequencer* sequencer_ptr = initiatingSequencer();
+ if (sequencer_ptr->isReady(request) == false) {
+ DEBUG_MSG(TESTER_COMP, MedPrio, "failed to initiate action - sequencer not ready\n");
+ } else {
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating action - successful\n");
+ DEBUG_EXPR(TESTER_COMP, MedPrio, m_status);
+ m_status = TesterStatus_Action_Pending;
+ sequencer_ptr->makeRequest(request);
+ }
+ DEBUG_EXPR(TESTER_COMP, MedPrio, m_status);
+}
+
+void Check::initiateCheck()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Check");
+ assert(m_status == TesterStatus_Ready);
+
+ CacheRequestType type = CacheRequestType_LD;
+ if ((random() & 0x1) == 0) { // 50% chance
+ type = CacheRequestType_IFETCH;
+ }
+
+ CacheMsg request(m_address, m_address, type, m_pc, m_access_mode, CHECK_SIZE, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */, 0, false);
+ Sequencer* sequencer_ptr = initiatingSequencer();
+ if (sequencer_ptr->isReady(request) == false) {
+ DEBUG_MSG(TESTER_COMP, MedPrio, "failed to initiate check - sequencer not ready\n");
+ } else {
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating check - successful\n");
+ DEBUG_MSG(TESTER_COMP, MedPrio, m_status);
+ m_status = TesterStatus_Check_Pending;
+ sequencer_ptr->makeRequest(request);
+ }
+ DEBUG_MSG(TESTER_COMP, MedPrio, m_status);
+}
+
+void Check::performCallback(NodeID proc, SubBlock& data)
+{
+ Address address = data.getAddress();
+ // assert(getAddress() == address); // This isn't exactly right since we now have multi-byte checks
+ assert(getAddress().getLineAddress() == address.getLineAddress());
+
+ DEBUG_MSG(TESTER_COMP, MedPrio, "Callback");
+ DEBUG_EXPR(TESTER_COMP, MedPrio, *this);
+
+ if (m_status == TesterStatus_Action_Pending) {
+ DEBUG_MSG(TESTER_COMP, MedPrio, "Action callback");
+ // Perform store
+ data.setByte(0, m_value+m_store_count); // We store one byte at a time
+ m_store_count++;
+
+ if (m_store_count == CHECK_SIZE) {
+ m_status = TesterStatus_Ready;
+ } else {
+ m_status = TesterStatus_Idle;
+ }
+ } else if (m_status == TesterStatus_Check_Pending) {
+ DEBUG_MSG(TESTER_COMP, MedPrio, "Check callback");
+ // Perform load/check
+ for(int byte_number=0; byte_number<CHECK_SIZE; byte_number++) {
+ if (uint8(m_value+byte_number) != data.getByte(byte_number) && (DATA_BLOCK == true)) {
+ WARN_EXPR(proc);
+ WARN_EXPR(address);
+ WARN_EXPR(data);
+ WARN_EXPR(byte_number);
+ WARN_EXPR((int)m_value+byte_number);
+ WARN_EXPR((int)data.getByte(byte_number));
+ WARN_EXPR(*this);
+ WARN_EXPR(g_eventQueue_ptr->getTime());
+ ERROR_MSG("Action/check failure");
+ }
+ }
+ DEBUG_MSG(TESTER_COMP, HighPrio, "Action/check success:");
+ DEBUG_EXPR(TESTER_COMP, HighPrio, *this);
+ DEBUG_EXPR(TESTER_COMP, MedPrio, data);
+
+ m_status = TesterStatus_Idle;
+ pickValue();
+
+ } else {
+ WARN_EXPR(*this);
+ WARN_EXPR(proc);
+ WARN_EXPR(data);
+ WARN_EXPR(m_status);
+ WARN_EXPR(g_eventQueue_ptr->getTime());
+ ERROR_MSG("Unexpected TesterStatus");
+ }
+
+ DEBUG_EXPR(TESTER_COMP, MedPrio, proc);
+ DEBUG_EXPR(TESTER_COMP, MedPrio, data);
+ DEBUG_EXPR(TESTER_COMP, MedPrio, getAddress().getLineAddress());
+ DEBUG_MSG(TESTER_COMP, MedPrio, "Callback done");
+ DEBUG_EXPR(TESTER_COMP, MedPrio, *this);
+}
+
+void Check::changeAddress(const Address& address)
+{
+ assert((m_status == TesterStatus_Idle) || (m_status == TesterStatus_Ready));
+ m_status = TesterStatus_Idle;
+ m_address = address;
+ m_store_count = 0;
+}
+
+Sequencer* Check::initiatingSequencer() const
+{
+ return g_system_ptr->getChip(m_initiatingNode/RubyConfig::numberOfProcsPerChip())->getSequencer(m_initiatingNode%RubyConfig::numberOfProcsPerChip());
+}
+
+void Check::pickValue()
+{
+ assert(m_status == TesterStatus_Idle);
+ m_status = TesterStatus_Idle;
+ // DEBUG_MSG(TESTER_COMP, MedPrio, m_status);
+ DEBUG_MSG(TESTER_COMP, MedPrio, *this);
+ m_value = random() & 0xff; // One byte
+ // DEBUG_MSG(TESTER_COMP, MedPrio, m_value);
+ DEBUG_MSG(TESTER_COMP, MedPrio, *this);
+ m_store_count = 0;
+}
+
+void Check::pickInitiatingNode()
+{
+ assert((m_status == TesterStatus_Idle) || (m_status == TesterStatus_Ready));
+ m_status = TesterStatus_Idle;
+ DEBUG_MSG(TESTER_COMP, MedPrio, m_status);
+ m_initiatingNode = (random() % RubyConfig::numberOfProcessors());
+ DEBUG_MSG(TESTER_COMP, MedPrio, m_initiatingNode);
+ m_store_count = 0;
+}
+
+void Check::print(ostream& out) const
+{
+ out << "["
+ << m_address << ", value: "
+ << (int) m_value << ", status: "
+ << m_status << ", initiating node: "
+ << m_initiatingNode << ", store_count: "
+ << m_store_count
+ << "]" << flush;
+}
diff --git a/src/mem/ruby/tester/Check.hh b/src/mem/ruby/tester/Check.hh
new file mode 100644
index 000000000..31959262d
--- /dev/null
+++ b/src/mem/ruby/tester/Check.hh
@@ -0,0 +1,107 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef CHECK_H
+#define CHECK_H
+
+#include "Global.hh"
+#include "Address.hh"
+#include "NodeID.hh"
+#include "TesterStatus.hh"
+#include "AccessModeType.hh"
+class Sequencer;
+class SubBlock;
+
+const int CHECK_SIZE_BITS = 2;
+const int CHECK_SIZE = (1<<CHECK_SIZE_BITS);
+
+class Check {
+public:
+ // Constructors
+ Check(const Address& address, const Address& pc);
+
+ // Default Destructor
+ //~Check();
+
+ // Public Methods
+
+ void initiate(); // Does Action or Check or nether
+ void performCallback(NodeID proc, SubBlock& data);
+ const Address& getAddress() { return m_address; }
+ void changeAddress(const Address& address);
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+ void initiatePrefetch(Sequencer* targetSequencer_ptr);
+ void initiatePrefetch();
+ void initiateAction();
+ void initiateCheck();
+
+ Sequencer* initiatingSequencer() const;
+
+ void pickValue();
+ void pickInitiatingNode();
+
+ // Using default copy constructor and assignment operator
+ // Check(const Check& obj);
+ // Check& operator=(const Check& obj);
+
+ // Data Members (m_ prefix)
+ TesterStatus m_status;
+ uint8 m_value;
+ int m_store_count;
+ NodeID m_initiatingNode;
+ Address m_address;
+ Address m_pc;
+ AccessModeType m_access_mode;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Check& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Check& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //CHECK_H
diff --git a/src/mem/ruby/tester/CheckTable.cc b/src/mem/ruby/tester/CheckTable.cc
new file mode 100644
index 000000000..488b58144
--- /dev/null
+++ b/src/mem/ruby/tester/CheckTable.cc
@@ -0,0 +1,128 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "CheckTable.hh"
+#include "Check.hh"
+#include "Map.hh"
+
+CheckTable::CheckTable()
+{
+ m_lookup_map_ptr = new Map<Address, Check*>;
+ physical_address_t physical = 0;
+ Address address;
+
+ const int size1 = 32;
+ const int size2 = 100;
+
+ // The first set is to get some false sharing
+ physical = 1000;
+ for (int i=0; i<size1; i++) {
+ // Setup linear addresses
+ address.setAddress(physical);
+ addCheck(address);
+ physical += CHECK_SIZE;
+ }
+
+ // The next two sets are to get some limited false sharing and cache conflicts
+ physical = 1000;
+ for (int i=0; i<size2; i++) {
+ // Setup linear addresses
+ address.setAddress(physical);
+ addCheck(address);
+ physical += 256;
+ }
+
+ physical = 1000 + CHECK_SIZE;
+ for (int i=0; i<size2; i++) {
+ // Setup linear addresses
+ address.setAddress(physical);
+ addCheck(address);
+ physical += 256;
+ }
+}
+
+CheckTable::~CheckTable()
+{
+ int size = m_check_vector.size();
+ for (int i=0; i<size; i++) {
+ delete m_check_vector[i];
+ }
+ delete m_lookup_map_ptr;
+}
+
+void CheckTable::addCheck(const Address& address)
+{
+ if (log_int(CHECK_SIZE) != 0) {
+ if (address.bitSelect(0,CHECK_SIZE_BITS-1) != 0) {
+ ERROR_MSG("Check not aligned");
+ }
+ }
+
+ for (int i=0; i<CHECK_SIZE; i++) {
+ if (m_lookup_map_ptr->exist(Address(address.getAddress()+i))) {
+ // A mapping for this byte already existed, discard the entire check
+ return;
+ }
+ }
+
+ Check* check_ptr = new Check(address, Address(100+m_check_vector.size()));
+ for (int i=0; i<CHECK_SIZE; i++) {
+ // Insert it once per byte
+ m_lookup_map_ptr->add(Address(address.getAddress()+i), check_ptr);
+ }
+ m_check_vector.insertAtBottom(check_ptr);
+}
+
+Check* CheckTable::getRandomCheck()
+{
+ return m_check_vector[random() % m_check_vector.size()];
+}
+
+Check* CheckTable::getCheck(const Address& address)
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "Looking for check by address");
+ DEBUG_EXPR(TESTER_COMP, MedPrio, address);
+
+ if (m_lookup_map_ptr->exist(address)) {
+ Check* check = m_lookup_map_ptr->lookup(address);
+ assert(check != NULL);
+ return check;
+ } else {
+ return NULL;
+ }
+}
+
+void CheckTable::print(ostream& out) const
+{
+}
diff --git a/src/mem/ruby/tester/CheckTable.hh b/src/mem/ruby/tester/CheckTable.hh
new file mode 100644
index 000000000..4a162f5bc
--- /dev/null
+++ b/src/mem/ruby/tester/CheckTable.hh
@@ -0,0 +1,93 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef CHECKTABLE_H
+#define CHECKTABLE_H
+
+#include "Global.hh"
+#include "Vector.hh"
+
+class Address;
+class Check;
+template <class KEY_TYPE, class VALUE_TYPE> class Map;
+
+class CheckTable {
+public:
+ // Constructors
+ CheckTable();
+
+ // Destructor
+ ~CheckTable();
+
+ // Public Methods
+
+ Check* getRandomCheck();
+ Check* getCheck(const Address& address);
+
+ // bool isPresent(const Address& address) const;
+ // void removeCheckFromTable(const Address& address);
+ // bool isTableFull() const;
+ // Need a method to select a check or retrieve a check
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+ void addCheck(const Address& address);
+
+ // Private copy constructor and assignment operator
+ CheckTable(const CheckTable& obj);
+ CheckTable& operator=(const CheckTable& obj);
+
+ // Data Members (m_ prefix)
+ Vector<Check*> m_check_vector;
+ Map<Address, Check*>* m_lookup_map_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const CheckTable& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const CheckTable& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //CHECKTABLE_H
diff --git a/src/mem/ruby/tester/DetermGETXGenerator.cc b/src/mem/ruby/tester/DetermGETXGenerator.cc
new file mode 100644
index 000000000..1caebbdab
--- /dev/null
+++ b/src/mem/ruby/tester/DetermGETXGenerator.cc
@@ -0,0 +1,151 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+// This Deterministic Generator generates GETX requests for all nodes in the system
+// The GETX requests are generated one at a time in round-robin fashion 0...1...2...etc.
+
+#include "DetermGETXGenerator.hh"
+#include "DetermGETXGeneratorStatus.hh"
+#include "LockStatus.hh"
+#include "Sequencer.hh"
+#include "System.hh"
+#include "RubyConfig.hh"
+#include "SubBlock.hh"
+#include "DeterministicDriver.hh"
+#include "Chip.hh"
+
+DetermGETXGenerator::DetermGETXGenerator(NodeID node, DeterministicDriver& driver) :
+ m_driver(driver)
+{
+ m_status = DetermGETXGeneratorStatus_Thinking;
+ m_last_transition = 0;
+ m_node = node;
+ m_address = Address(9999); // initialize to null value
+ m_counter = 0;
+
+ // don't know exactly when this node needs to request so just guess randomly
+ g_eventQueue_ptr->scheduleEvent(this, 1+(random() % 200));
+}
+
+DetermGETXGenerator::~DetermGETXGenerator()
+{
+}
+
+void DetermGETXGenerator::wakeup()
+{
+ DEBUG_EXPR(TESTER_COMP, MedPrio, m_node);
+ DEBUG_EXPR(TESTER_COMP, MedPrio, m_status);
+
+ // determine if this node is next for the GETX round robin request
+ if (m_status == DetermGETXGeneratorStatus_Thinking) {
+ if (m_driver.isStoreReady(m_node)) {
+ pickAddress();
+ m_status = DetermGETXGeneratorStatus_Store_Pending; // Store Pending
+ m_last_transition = g_eventQueue_ptr->getTime();
+ initiateStore(); // GETX
+ } else { // I'll check again later
+ g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ }
+ } else {
+ WARN_EXPR(m_status);
+ ERROR_MSG("Invalid status");
+ }
+
+}
+
+void DetermGETXGenerator::performCallback(NodeID proc, SubBlock& data)
+{
+ Address address = data.getAddress();
+ assert(proc == m_node);
+ assert(address == m_address);
+
+ DEBUG_EXPR(TESTER_COMP, LowPrio, proc);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, m_status);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, address);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, data);
+
+ if (m_status == DetermGETXGeneratorStatus_Store_Pending) {
+ m_driver.recordStoreLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ data.writeByte(m_node);
+ m_driver.storeCompleted(m_node, data.getAddress()); // advance the store queue
+
+ m_counter++;
+ if (m_counter < g_tester_length) {
+ m_status = DetermGETXGeneratorStatus_Thinking;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ } else {
+ m_driver.reportDone();
+ m_status = DetermGETXGeneratorStatus_Done;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ }
+
+ } else {
+ WARN_EXPR(m_status);
+ ERROR_MSG("Invalid status");
+ }
+}
+
+int DetermGETXGenerator::thinkTime() const
+{
+ return g_think_time;
+}
+
+int DetermGETXGenerator::waitTime() const
+{
+ return g_wait_time;
+}
+
+void DetermGETXGenerator::pickAddress()
+{
+ assert(m_status == DetermGETXGeneratorStatus_Thinking);
+
+ m_address = m_driver.getNextStoreAddr(m_node);
+}
+
+void DetermGETXGenerator::initiateStore()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Store");
+ sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_ST, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */, 0, false));
+}
+
+Sequencer* DetermGETXGenerator::sequencer() const
+{
+ return g_system_ptr->getChip(m_node/RubyConfig::numberOfProcsPerChip())->getSequencer(m_node%RubyConfig::numberOfProcsPerChip());
+}
+
+void DetermGETXGenerator::print(ostream& out) const
+{
+}
+
diff --git a/src/mem/ruby/tester/DetermGETXGenerator.hh b/src/mem/ruby/tester/DetermGETXGenerator.hh
new file mode 100644
index 000000000..eff1eb6b3
--- /dev/null
+++ b/src/mem/ruby/tester/DetermGETXGenerator.hh
@@ -0,0 +1,104 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+// This Deterministic Generator generates GETX requests for all nodes in the system
+// The GETX requests are generated one at a time in round-robin fashion 0...1...2...etc.
+
+#ifndef DETERMGETXGENERATOR_H
+#define DETERMGETXGENERATOR_H
+
+#include "Global.hh"
+#include "Consumer.hh"
+#include "DetermGETXGeneratorStatus.hh"
+#include "NodeID.hh"
+#include "Address.hh"
+#include "SpecifiedGenerator.hh"
+
+class Sequencer;
+class SubBlock;
+class DeterministicDriver;
+
+class DetermGETXGenerator : public SpecifiedGenerator {
+public:
+ // Constructors
+ DetermGETXGenerator(NodeID node, DeterministicDriver& driver);
+
+ // Destructor
+ ~DetermGETXGenerator();
+
+ // Public Methods
+ void wakeup();
+ void performCallback(NodeID proc, SubBlock& data);
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+ int thinkTime() const;
+ int waitTime() const;
+ void initiateStore();
+ void pickAddress();
+
+ Sequencer* sequencer() const;
+
+ // copy constructor and assignment operator
+ DetermGETXGenerator(const DetermGETXGenerator& obj);
+ DetermGETXGenerator& operator=(const DetermGETXGenerator& obj);
+
+ // Data Members (m_ prefix)
+ DetermGETXGeneratorStatus m_status;
+ int m_counter;
+ Address m_address;
+ NodeID m_node;
+ DeterministicDriver& m_driver;
+ Time m_last_transition;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const DetermGETXGenerator& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const DetermGETXGenerator& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //DETERMGETXGENERATOR_H
+
diff --git a/src/mem/ruby/tester/DetermInvGenerator.cc b/src/mem/ruby/tester/DetermInvGenerator.cc
new file mode 100644
index 000000000..020c2fe96
--- /dev/null
+++ b/src/mem/ruby/tester/DetermInvGenerator.cc
@@ -0,0 +1,202 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+// This Deterministic Generator generates GETS request for all nodes in the system
+// then Invalidates them with a GETX. The GETS and GETX request are generated one
+// at a time in round-robin fashion 0...1...2...etc.
+
+#include "DetermInvGenerator.hh"
+#include "DetermInvGeneratorStatus.hh"
+#include "LockStatus.hh"
+#include "Sequencer.hh"
+#include "System.hh"
+#include "RubyConfig.hh"
+#include "SubBlock.hh"
+#include "DeterministicDriver.hh"
+#include "Chip.hh"
+
+DetermInvGenerator::DetermInvGenerator(NodeID node, DeterministicDriver& driver) :
+ m_driver(driver)
+{
+ m_status = DetermInvGeneratorStatus_Thinking;
+ m_last_transition = 0;
+ m_node = node;
+ m_address = Address(9999); // initiate to a NULL value
+ m_counter = 0;
+
+ // don't know exactly when this node needs to request so just guess randomly
+ g_eventQueue_ptr->scheduleEvent(this, 1+(random() % 200));
+}
+
+DetermInvGenerator::~DetermInvGenerator()
+{
+}
+
+void DetermInvGenerator::wakeup()
+{
+ DEBUG_EXPR(TESTER_COMP, MedPrio, m_node);
+ DEBUG_EXPR(TESTER_COMP, MedPrio, m_status);
+
+ // determine if this node is next for the load round robin request
+ if (m_status == DetermInvGeneratorStatus_Thinking) {
+ // is a load ready and waiting and are my transactions insync with global transactions
+ if (m_driver.isLoadReady(m_node) && m_counter == m_driver.getStoresCompleted()) {
+ pickLoadAddress();
+ m_status = DetermInvGeneratorStatus_Load_Pending; // Load Pending
+ m_last_transition = g_eventQueue_ptr->getTime();
+ initiateLoad(); // GETS
+ } else { // I'll check again later
+ g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ }
+ } else if (m_status == DetermInvGeneratorStatus_Load_Complete) {
+ if (m_driver.isStoreReady(m_node, m_address)) { // do a store in this transaction or start the next one
+ if (m_driver.isLoadReady((0), m_address)) { // everyone is in S for this address i.e. back to node 0
+ m_status = DetermInvGeneratorStatus_Store_Pending;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ initiateStore(); // GETX
+ } else { // I'm next, I just have to wait for all loads to complete
+ g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ }
+ } else { // I'm not next to store, go back to thinking
+ m_status = DetermInvGeneratorStatus_Thinking;
+ g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ }
+ } else {
+ WARN_EXPR(m_status);
+ ERROR_MSG("Invalid status");
+ }
+
+}
+
+void DetermInvGenerator::performCallback(NodeID proc, SubBlock& data)
+{
+ Address address = data.getAddress();
+ assert(proc == m_node);
+ assert(address == m_address);
+
+ DEBUG_EXPR(TESTER_COMP, LowPrio, proc);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, m_status);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, address);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, data);
+
+ if (m_status == DetermInvGeneratorStatus_Load_Pending) {
+ m_driver.recordLoadLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ NodeID firstByte = data.readByte(); // dummy read
+
+ m_driver.loadCompleted(m_node, data.getAddress());
+
+ if (!m_driver.isStoreReady(m_node, m_address)) { // if we don't have to store, we are done for this transaction
+ m_counter++;
+ }
+ if (m_counter < g_tester_length) {
+ m_status = DetermInvGeneratorStatus_Load_Complete;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ } else {
+ m_driver.reportDone();
+ m_status = DetermInvGeneratorStatus_Done;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ }
+
+ } else if (m_status == DetermInvGeneratorStatus_Store_Pending) {
+ m_driver.recordStoreLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ data.writeByte(m_node);
+ m_driver.storeCompleted(m_node, data.getAddress()); // advance the store queue
+
+ m_counter++;
+ if (m_counter < g_tester_length) {
+ m_status = DetermInvGeneratorStatus_Thinking;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ } else {
+ m_driver.reportDone();
+ m_status = DetermInvGeneratorStatus_Done;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ }
+ } else {
+ WARN_EXPR(m_status);
+ ERROR_MSG("Invalid status");
+ }
+
+ DEBUG_EXPR(TESTER_COMP, LowPrio, proc);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, m_status);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, address);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, data);
+
+}
+
+int DetermInvGenerator::thinkTime() const
+{
+ return g_think_time;
+}
+
+int DetermInvGenerator::waitTime() const
+{
+ return g_wait_time;
+}
+
+int DetermInvGenerator::holdTime() const
+{
+ return g_hold_time;
+}
+
+void DetermInvGenerator::pickLoadAddress()
+{
+ assert(m_status == DetermInvGeneratorStatus_Thinking);
+
+ m_address = m_driver.getNextLoadAddr(m_node);
+}
+
+void DetermInvGenerator::initiateLoad()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Load");
+ sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_LD, Address(1), AccessModeType_UserMode, 1, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */, 0, false));
+}
+
+void DetermInvGenerator::initiateStore()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Store");
+ sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_ST, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */, 0, false));
+}
+
+Sequencer* DetermInvGenerator::sequencer() const
+{
+ return g_system_ptr->getChip(m_node/RubyConfig::numberOfProcsPerChip())->getSequencer(m_node%RubyConfig::numberOfProcsPerChip());
+}
+
+void DetermInvGenerator::print(ostream& out) const
+{
+ out << "[DetermInvGenerator]" << endl;
+}
+
diff --git a/src/mem/ruby/tester/DetermInvGenerator.hh b/src/mem/ruby/tester/DetermInvGenerator.hh
new file mode 100644
index 000000000..a72895f3f
--- /dev/null
+++ b/src/mem/ruby/tester/DetermInvGenerator.hh
@@ -0,0 +1,109 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+// This Deterministic Generator generates GETS request for all nodes in the system
+// then Invalidates them with a GETX. The GETS and GETX request are generated one
+// at a time in round-robin fashion 0...1...2...etc.
+
+#ifndef DETERMINVGENERATOR_H
+#define DETERMINVGENERATOR_H
+
+#include "Global.hh"
+#include "Consumer.hh"
+#include "DetermInvGeneratorStatus.hh"
+#include "NodeID.hh"
+#include "Address.hh"
+#include "SpecifiedGenerator.hh"
+
+class Sequencer;
+class SubBlock;
+class DeterministicDriver;
+
+class DetermInvGenerator : public SpecifiedGenerator {
+public:
+ // Constructors
+ DetermInvGenerator(NodeID node, DeterministicDriver& driver);
+
+ // Destructor
+ ~DetermInvGenerator();
+
+ // Public Methods
+ void wakeup();
+ void performCallback(NodeID proc, SubBlock& data);
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+ int thinkTime() const;
+ int waitTime() const;
+ int holdTime() const;
+ void initiateLoad();
+ void initiateStore();
+ void pickLoadAddress();
+ void pickStoreAddress();
+
+ Sequencer* sequencer() const;
+
+ // copy constructor and assignment operator
+ DetermInvGenerator(const DetermInvGenerator& obj);
+ DetermInvGenerator& operator=(const DetermInvGenerator& obj);
+
+ // Data Members (m_ prefix)
+ DetermInvGeneratorStatus m_status;
+ int m_counter;
+ Address m_address;
+ NodeID m_node;
+ DeterministicDriver& m_driver;
+ Time m_last_transition;
+
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const DetermInvGenerator& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const DetermInvGenerator& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //DETERMINVGENERATOR_H
+
diff --git a/src/mem/ruby/tester/DetermSeriesGETSGenerator.cc b/src/mem/ruby/tester/DetermSeriesGETSGenerator.cc
new file mode 100644
index 000000000..815919559
--- /dev/null
+++ b/src/mem/ruby/tester/DetermSeriesGETSGenerator.cc
@@ -0,0 +1,149 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "DetermSeriesGETSGenerator.hh"
+#include "DetermSeriesGETSGeneratorStatus.hh"
+#include "LockStatus.hh"
+#include "Sequencer.hh"
+#include "System.hh"
+#include "RubyConfig.hh"
+#include "SubBlock.hh"
+#include "DeterministicDriver.hh"
+#include "Chip.hh"
+
+DetermSeriesGETSGenerator::DetermSeriesGETSGenerator(NodeID node, DeterministicDriver& driver) :
+ m_driver(driver)
+{
+ m_status = DetermSeriesGETSGeneratorStatus_Thinking;
+ m_last_transition = 0;
+ m_node = node;
+ m_address = Address(9999); // initialize to null value
+ m_counter = 0;
+
+ // don't know exactly when this node needs to request so just guess randomly
+ g_eventQueue_ptr->scheduleEvent(this, 1+(random() % 200));
+}
+
+DetermSeriesGETSGenerator::~DetermSeriesGETSGenerator()
+{
+}
+
+void DetermSeriesGETSGenerator::wakeup()
+{
+ DEBUG_EXPR(TESTER_COMP, MedPrio, m_node);
+ DEBUG_EXPR(TESTER_COMP, MedPrio, m_status);
+
+ // determine if this node is next for the SeriesGETS round robin request
+ if (m_status == DetermSeriesGETSGeneratorStatus_Thinking) {
+ if (m_driver.isLoadReady(m_node)) {
+ pickAddress();
+ m_status = DetermSeriesGETSGeneratorStatus_Load_Pending; // Load Pending
+ m_last_transition = g_eventQueue_ptr->getTime();
+ initiateLoad(); // SeriesGETS
+ } else { // I'll check again later
+ g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ }
+ } else {
+ WARN_EXPR(m_status);
+ ERROR_MSG("Invalid status");
+ }
+
+}
+
+void DetermSeriesGETSGenerator::performCallback(NodeID proc, SubBlock& data)
+{
+ Address address = data.getAddress();
+ assert(proc == m_node);
+ assert(address == m_address);
+
+ DEBUG_EXPR(TESTER_COMP, LowPrio, proc);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, m_status);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, address);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, data);
+
+ if (m_status == DetermSeriesGETSGeneratorStatus_Load_Pending) {
+ m_driver.recordLoadLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ data.writeByte(m_node);
+ m_driver.loadCompleted(m_node, data.getAddress()); // advance the load queue
+
+ m_counter++;
+ // do we still have more requests to complete before the next proc starts?
+ if (m_counter < g_tester_length*g_NUM_COMPLETIONS_BEFORE_PASS) {
+ m_status = DetermSeriesGETSGeneratorStatus_Thinking;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ } else {
+ m_driver.reportDone();
+ m_status = DetermSeriesGETSGeneratorStatus_Done;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ }
+
+ } else {
+ WARN_EXPR(m_status);
+ ERROR_MSG("Invalid status");
+ }
+}
+
+int DetermSeriesGETSGenerator::thinkTime() const
+{
+ return g_think_time;
+}
+
+int DetermSeriesGETSGenerator::waitTime() const
+{
+ return g_wait_time;
+}
+
+void DetermSeriesGETSGenerator::pickAddress()
+{
+ assert(m_status == DetermSeriesGETSGeneratorStatus_Thinking);
+
+ m_address = m_driver.getNextLoadAddr(m_node);
+}
+
+void DetermSeriesGETSGenerator::initiateLoad()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Load");
+ sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_IFETCH, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */, 0, false));
+}
+
+Sequencer* DetermSeriesGETSGenerator::sequencer() const
+{
+ return g_system_ptr->getChip(m_node/RubyConfig::numberOfProcsPerChip())->getSequencer(m_node%RubyConfig::numberOfProcsPerChip());
+}
+
+void DetermSeriesGETSGenerator::print(ostream& out) const
+{
+}
+
diff --git a/src/mem/ruby/tester/DetermSeriesGETSGenerator.hh b/src/mem/ruby/tester/DetermSeriesGETSGenerator.hh
new file mode 100644
index 000000000..25d4886a0
--- /dev/null
+++ b/src/mem/ruby/tester/DetermSeriesGETSGenerator.hh
@@ -0,0 +1,106 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+// This Deterministic Generator generates a series of GETS requests for a given node.
+// Sequentially goes through all nodes in the system
+// This generator is used to tune the HW prefetcher
+// The GETS requests are generated one at a time in round-robin fashion 0...1...2...etc.
+
+#ifndef DETERMSERIESGETSGENERATOR_H
+#define DETERMSERIESGETSGENERATOR_H
+
+#include "Global.hh"
+#include "Consumer.hh"
+#include "DetermSeriesGETSGeneratorStatus.hh"
+#include "NodeID.hh"
+#include "Address.hh"
+#include "SpecifiedGenerator.hh"
+
+class Sequencer;
+class SubBlock;
+class DeterministicDriver;
+
+class DetermSeriesGETSGenerator : public SpecifiedGenerator {
+public:
+ // Constructors
+ DetermSeriesGETSGenerator(NodeID node, DeterministicDriver& driver);
+
+ // Destructor
+ ~DetermSeriesGETSGenerator();
+
+ // Public Methods
+ void wakeup();
+ void performCallback(NodeID proc, SubBlock& data);
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+ int thinkTime() const;
+ int waitTime() const;
+ void initiateLoad();
+ void pickAddress();
+
+ Sequencer* sequencer() const;
+
+ // copy constructor and assignment operator
+ DetermSeriesGETSGenerator(const DetermSeriesGETSGenerator& obj);
+ DetermSeriesGETSGenerator& operator=(const DetermSeriesGETSGenerator& obj);
+
+ // Data Members (m_ prefix)
+ DetermSeriesGETSGeneratorStatus m_status;
+ int m_counter;
+ Address m_address;
+ NodeID m_node;
+ DeterministicDriver& m_driver;
+ Time m_last_transition;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const DetermSeriesGETSGenerator& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const DetermSeriesGETSGenerator& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //DETERMSeriesGETSGENERATOR_H
+
diff --git a/src/mem/ruby/tester/DeterministicDriver.cc b/src/mem/ruby/tester/DeterministicDriver.cc
new file mode 100644
index 000000000..dd0507201
--- /dev/null
+++ b/src/mem/ruby/tester/DeterministicDriver.cc
@@ -0,0 +1,282 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "Global.hh"
+#include "System.hh"
+#include "DeterministicDriver.hh"
+#include "EventQueue.hh"
+#include "SpecifiedGenerator.hh"
+#include "DetermGETXGenerator.hh"
+#include "DetermInvGenerator.hh"
+#include "DetermSeriesGETSGenerator.hh"
+#include "SubBlock.hh"
+#include "Chip.hh"
+
+DeterministicDriver::DeterministicDriver(System* sys_ptr)
+{
+ if (g_SIMICS) {
+ ERROR_MSG("g_SIMICS should not be defined.");
+ }
+
+ m_finish_time = 0;
+ m_last_issue = -11;
+ m_done_counter = 0;
+ m_loads_completed = 0;
+ m_stores_completed = 0;
+
+ m_numCompletionsPerNode = g_NUM_COMPLETIONS_BEFORE_PASS;
+
+ m_last_progress_vector.setSize(RubyConfig::numberOfProcessors());
+ for (int i=0; i<m_last_progress_vector.size(); i++) {
+ m_last_progress_vector[i] = 0;
+ }
+
+ m_load_vector.setSize(g_deterministic_addrs);
+ for (int i=0; i<m_load_vector.size(); i++) {
+ m_load_vector[i] = -1; // No processor last held it
+ }
+
+ m_store_vector.setSize(g_deterministic_addrs);
+ for (int i=0; i<m_store_vector.size(); i++) {
+ m_store_vector[i] = -1; // No processor last held it
+ }
+
+ m_generator_vector.setSize(RubyConfig::numberOfProcessors());
+
+ SpecifiedGeneratorType generator = string_to_SpecifiedGeneratorType(g_SpecifiedGenerator);
+
+ for (int i=0; i<m_generator_vector.size(); i++) {
+ switch (generator) {
+ case SpecifiedGeneratorType_DetermGETXGenerator:
+ m_generator_vector[i] = new DetermGETXGenerator(i, *this);
+ break;
+ case SpecifiedGeneratorType_DetermSeriesGETSGenerator:
+ m_generator_vector[i] = new DetermSeriesGETSGenerator(i, *this);
+ break;
+ case SpecifiedGeneratorType_DetermInvGenerator:
+ m_generator_vector[i] = new DetermInvGenerator(i, *this);
+ break;
+ default:
+ ERROR_MSG("Unexpected specified generator type");
+ }
+ }
+
+ // add the tester consumer to the global event queue
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+}
+
+DeterministicDriver::~DeterministicDriver()
+{
+ for (int i=0; i<m_last_progress_vector.size(); i++) {
+ delete m_generator_vector[i];
+ }
+}
+
+void DeterministicDriver::hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread)
+{
+ DEBUG_EXPR(TESTER_COMP, MedPrio, data);
+
+ m_generator_vector[proc]->performCallback(proc, data);
+
+ // Mark that we made progress
+ m_last_progress_vector[proc] = g_eventQueue_ptr->getTime();
+}
+
+bool DeterministicDriver::isStoreReady(NodeID node)
+{
+ return isAddrReady(node, m_store_vector);
+}
+
+bool DeterministicDriver::isStoreReady(NodeID node, Address addr)
+{
+ return isAddrReady(node, m_store_vector, addr);
+}
+
+bool DeterministicDriver::isLoadReady(NodeID node)
+{
+ return isAddrReady(node, m_load_vector);
+}
+
+bool DeterministicDriver::isLoadReady(NodeID node, Address addr)
+{
+ return isAddrReady(node, m_load_vector, addr);
+}
+
+// searches for any address in the addr_vector
+bool DeterministicDriver::isAddrReady(NodeID node, Vector<NodeID> addr_vector)
+{
+ for (int i=0; i<addr_vector.size(); i++) {
+ if (((addr_vector[i]+1)%RubyConfig::numberOfProcessors() == node) &&
+ (m_loads_completed+m_stores_completed >= m_numCompletionsPerNode*node) && // is this node next
+ (g_eventQueue_ptr->getTime() >= m_last_issue + 10)) { // controll rate of requests
+ return true;
+ }
+ }
+ return false;
+}
+
+// test for a particular addr
+bool DeterministicDriver::isAddrReady(NodeID node, Vector<NodeID> addr_vector, Address addr)
+{
+ int addr_number = addr.getAddress()/RubyConfig::dataBlockBytes();
+
+ ASSERT ((addr_number >= 0) && (addr_number < addr_vector.size()));
+
+ if (((addr_vector[addr_number]+1)%RubyConfig::numberOfProcessors() == node) &&
+ (m_loads_completed+m_stores_completed >= m_numCompletionsPerNode*node) && // is this node next
+ (g_eventQueue_ptr->getTime() >= m_last_issue + 10)) { // controll rate of requests
+ return true;
+ } else {
+ return false;
+ }
+}
+
+void DeterministicDriver::loadCompleted(NodeID node, Address addr)
+{
+ m_loads_completed++;
+ setNextAddr(node, addr, m_load_vector);
+}
+
+void DeterministicDriver::storeCompleted(NodeID node, Address addr)
+{
+ m_stores_completed++;
+ setNextAddr(node, addr, m_store_vector);
+}
+
+void DeterministicDriver::setNextAddr(NodeID node, Address addr, Vector<NodeID>& addr_vector)
+{
+ // mark the addr vector that this proc was the last to use the particular address
+ int addr_number = addr.getAddress()/RubyConfig::dataBlockBytes();
+ addr_vector[addr_number] = node;
+}
+
+Address DeterministicDriver::getNextLoadAddr(NodeID node)
+{
+ return getNextAddr(node, m_load_vector);
+}
+
+Address DeterministicDriver::getNextStoreAddr(NodeID node)
+{
+ return getNextAddr(node, m_store_vector);
+}
+
+Address DeterministicDriver::getNextAddr(NodeID node, Vector<NodeID> addr_vector)
+{
+
+ // This method deterministically picks the next addr the node should acquirer
+ // The addrs cycle through according to NodeID 0->1->...->lastID->0...
+
+ Address addr;
+
+ // should only be called if we know a addr is ready for the node
+ ASSERT(isAddrReady(node, addr_vector));
+
+ for (int addr_number=0; addr_number<addr_vector.size(); addr_number++) {
+ //for (int addr_number=addr_vector.size()-1; addr_number>0; addr_number--) {
+
+ // is this node next in line for the addr
+ if (((addr_vector[addr_number]+1)%RubyConfig::numberOfProcessors()) == node) {
+
+ // One addr per cache line
+ addr.setAddress(addr_number * RubyConfig::dataBlockBytes());
+ }
+ }
+
+ m_last_issue = g_eventQueue_ptr->getTime();
+
+ return addr;
+}
+
+
+void DeterministicDriver::reportDone()
+{
+ m_done_counter++;
+ if ((m_done_counter == RubyConfig::numberOfProcessors())) {
+ //|| (m_done_counter == g_tester_length)) {
+ m_finish_time = g_eventQueue_ptr->getTime();
+ }
+}
+
+void DeterministicDriver::recordLoadLatency(Time time)
+{
+ m_load_latency.add(time);
+}
+
+void DeterministicDriver::recordStoreLatency(Time time)
+{
+ m_store_latency.add(time);
+}
+
+void DeterministicDriver::wakeup()
+{
+ // checkForDeadlock();
+ if (m_done_counter < RubyConfig::numberOfProcessors()) {
+ g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ }
+}
+
+void DeterministicDriver::checkForDeadlock()
+{
+ int size = m_last_progress_vector.size();
+ Time current_time = g_eventQueue_ptr->getTime();
+ for (int processor=0; processor<size; processor++) {
+ if ((current_time - m_last_progress_vector[processor]) > g_DEADLOCK_THRESHOLD) {
+ WARN_EXPR(processor);
+ Sequencer* seq_ptr = g_system_ptr->getChip(processor/RubyConfig::numberOfProcsPerChip())->getSequencer(processor%RubyConfig::numberOfProcsPerChip());
+ assert(seq_ptr != NULL);
+ // if (seq_ptr->isRequestPending()) {
+ // WARN_EXPR(seq_ptr->pendingAddress());
+ // }
+ WARN_EXPR(current_time);
+ WARN_EXPR(m_last_progress_vector[processor]);
+ WARN_EXPR(current_time - m_last_progress_vector[processor]);
+ ERROR_MSG("Deadlock detected.");
+ }
+ }
+}
+
+void DeterministicDriver::printStats(ostream& out) const
+{
+ out << endl;
+ out << "DeterministicDriver Stats" << endl;
+ out << "---------------------" << endl;
+
+ out << "finish_time: " << m_finish_time << endl;
+ out << "load_latency: " << m_load_latency << endl;
+ out << "store_latency: " << m_store_latency << endl;
+}
+
+void DeterministicDriver::print(ostream& out) const
+{
+}
diff --git a/src/mem/ruby/tester/DeterministicDriver.hh b/src/mem/ruby/tester/DeterministicDriver.hh
new file mode 100644
index 000000000..3d0bae73d
--- /dev/null
+++ b/src/mem/ruby/tester/DeterministicDriver.hh
@@ -0,0 +1,125 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef DETERMINISTICDRIVER_H
+#define DETERMINISTICDRIVER_H
+
+#include "Global.hh"
+#include "Driver.hh"
+#include "Histogram.hh"
+#include "CacheRequestType.hh"
+
+class System;
+class SpecifiedGenerator;
+
+class DeterministicDriver : public Driver, public Consumer {
+public:
+ // Constructors
+ DeterministicDriver(System* sys_ptr);
+
+ // Destructor
+ ~DeterministicDriver();
+
+ // Public Methods
+ bool isStoreReady(NodeID node);
+ bool isLoadReady(NodeID node);
+ bool isStoreReady(NodeID node, Address addr);
+ bool isLoadReady(NodeID node, Address addr);
+ void loadCompleted(NodeID node, Address addr);
+ void storeCompleted(NodeID node, Address addr);
+ Address getNextLoadAddr(NodeID node);
+ Address getNextStoreAddr(NodeID node);
+ int getLoadsCompleted() { return m_loads_completed; }
+ int getStoresCompleted() { return m_stores_completed; }
+
+ void reportDone();
+ void recordLoadLatency(Time time);
+ void recordStoreLatency(Time time);
+
+ void hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread);
+ void wakeup();
+ void printStats(ostream& out) const;
+ void clearStats() {}
+ void printConfig(ostream& out) const {}
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+ void checkForDeadlock();
+
+ Address getNextAddr(NodeID node, Vector<NodeID> addr_vector);
+ bool isAddrReady(NodeID node, Vector<NodeID> addr_vector);
+ bool isAddrReady(NodeID node, Vector<NodeID> addr_vector, Address addr);
+ void setNextAddr(NodeID node, Address addr, Vector<NodeID>& addr_vector);
+
+ // Private copy constructor and assignment operator
+ DeterministicDriver(const DeterministicDriver& obj);
+ DeterministicDriver& operator=(const DeterministicDriver& obj);
+
+ // Data Members (m_ prefix)
+ Vector<Time> m_last_progress_vector;
+ Vector<SpecifiedGenerator*> m_generator_vector;
+ Vector<NodeID> m_load_vector; // Processor last to load the addr
+ Vector<NodeID> m_store_vector; // Processor last to store the addr
+
+ int m_done_counter;
+ int m_loads_completed;
+ int m_stores_completed;
+ // enforces the previous node to have a certain # of completions
+ // before next node starts
+ int m_numCompletionsPerNode;
+
+ Histogram m_load_latency;
+ Histogram m_store_latency;
+ Time m_finish_time;
+ Time m_last_issue;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const DeterministicDriver& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const DeterministicDriver& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //DETERMINISTICDRIVER_H
diff --git a/src/mem/ruby/tester/Instruction.cc b/src/mem/ruby/tester/Instruction.cc
new file mode 100644
index 000000000..8528a4094
--- /dev/null
+++ b/src/mem/ruby/tester/Instruction.cc
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 1999 by Mark Hill and David Wood for the Wisconsin
+ * Multifacet Project. ALL RIGHTS RESERVED.
+ *
+ * ##HEADER##
+ *
+ * This software is furnished under a license and may be used and
+ * copied only in accordance with the terms of such license and the
+ * inclusion of the above copyright notice. This software or any
+ * other copies thereof or any derivative works may not be provided or
+ * otherwise made available to any other persons. Title to and
+ * ownership of the software is retained by Mark Hill and David Wood.
+ * Any use of this software must include the above copyright notice.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS". THE LICENSOR MAKES NO
+ * WARRANTIES ABOUT ITS CORRECTNESS OR PERFORMANCE.
+ * */
+
+/*
+ * $Id: Instruction.C 1.2 05/08/26 00:54:48-05:00 xu@s0-32.cs.wisc.edu $
+ *
+ * Description:
+ *
+ */
+
+#include "Instruction.hh"
+
+Instruction::Instruction(){
+ m_opcode = Opcode_NUM_OPCODES;
+ m_address = Address(physical_address_t(0));
+}
+
+Instruction::Instruction(Opcode op, Address addr){
+ m_opcode = op;
+ m_address = addr;
+ assert(addr.getAddress() == 0);
+}
+
+void Instruction::init(Opcode op, Address addr){
+ m_opcode = op;
+ m_address = addr;
+ //cout << "Instruction(" << op << ", " << m_address << ")" << endl;
+}
+
+Opcode Instruction::getOpcode(){
+ return m_opcode;
+}
+
+Address Instruction::getAddress(){
+ return m_address;
+}
diff --git a/src/mem/ruby/tester/Instruction.hh b/src/mem/ruby/tester/Instruction.hh
new file mode 100644
index 000000000..674447056
--- /dev/null
+++ b/src/mem/ruby/tester/Instruction.hh
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 1999 by Mark Hill and David Wood for the Wisconsin
+ * Multifacet Project. ALL RIGHTS RESERVED.
+ *
+ * ##HEADER##
+ *
+ * This software is furnished under a license and may be used and
+ * copied only in accordance with the terms of such license and the
+ * inclusion of the above copyright notice. This software or any
+ * other copies thereof or any derivative works may not be provided or
+ * otherwise made available to any other persons. Title to and
+ * ownership of the software is retained by Mark Hill and David Wood.
+ * Any use of this software must include the above copyright notice.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS". THE LICENSOR MAKES NO
+ * WARRANTIES ABOUT ITS CORRECTNESS OR PERFORMANCE.
+ * */
+
+/*
+ * $Id: Instruction.h 1.2 05/05/24 12:15:47-05:00 kmoore@balder.cs.wisc.edu $
+ *
+ * Description:
+ *
+ */
+
+#ifndef INSTRUCTION_H
+#define INSTRUCTION_H
+
+#include "Address.hh"
+
+
+enum Opcode {
+ Opcode_BEGIN,
+ Opcode_LD,
+ Opcode_ST,
+ Opcode_INC,
+ Opcode_COMMIT,
+ Opcode_DONE,
+ Opcode_NUM_OPCODES
+};
+
+class Instruction {
+ public:
+ Instruction();
+ Instruction(Opcode op, Address addr);
+
+ void init(Opcode op, Address addr);
+ Opcode getOpcode();
+ Address getAddress();
+
+ private:
+ Opcode m_opcode;
+ Address m_address;
+
+};
+
+#endif
diff --git a/src/mem/ruby/tester/RaceyDriver.cc b/src/mem/ruby/tester/RaceyDriver.cc
new file mode 100644
index 000000000..4ed26da31
--- /dev/null
+++ b/src/mem/ruby/tester/RaceyDriver.cc
@@ -0,0 +1,139 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "Global.hh"
+#include "System.hh"
+#include "RaceyDriver.hh"
+#include "EventQueue.hh"
+#include "RaceyPseudoThread.hh"
+#include "SubBlock.hh"
+
+RaceyDriver::RaceyDriver()
+{
+ if (g_SIMICS) {
+ ERROR_MSG("g_SIMICS should not be defined.");
+ }
+
+ // debug transition?
+ if(false) {
+ assert(g_debug_ptr);
+ g_debug_ptr->setDebugTime(1);
+ }
+
+ m_finish_time = 0;
+ m_done_counter = 0;
+ m_wakeup_thread0 = false;
+
+ // racey at least need two processors
+ assert(RubyConfig::numberOfProcessors() >= 2);
+
+ // init all racey pseudo threads
+ m_racey_pseudo_threads.setSize(RubyConfig::numberOfProcessors());
+ for (int i=0; i<m_racey_pseudo_threads.size(); i++) {
+ m_racey_pseudo_threads[i] = new RaceyPseudoThread(i, *this);
+ }
+
+ // add this driver to the global event queue, for deadlock detection
+ g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+}
+
+RaceyDriver::~RaceyDriver()
+{
+ for (int i=0; i<m_racey_pseudo_threads.size(); i++) {
+ delete m_racey_pseudo_threads[i];
+ }
+}
+
+void RaceyDriver::hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread)
+{
+ DEBUG_EXPR(TESTER_COMP, MedPrio, data);
+ m_racey_pseudo_threads[proc]->performCallback(proc, data);
+}
+
+integer_t RaceyDriver::getInstructionCount(int procID) const
+{
+ return m_racey_pseudo_threads[procID]->getInstructionCounter();
+}
+
+int RaceyDriver::runningThreads()
+{
+ return RubyConfig::numberOfProcessors() - m_done_counter;
+}
+
+// used to wake up thread 0 whenever other thread finishes
+void RaceyDriver::registerThread0Wakeup()
+{
+ m_wakeup_thread0 = true;
+}
+
+void RaceyDriver::joinThread()
+{
+ m_done_counter++;
+ if (m_done_counter == RubyConfig::numberOfProcessors()) {
+ m_finish_time = g_eventQueue_ptr->getTime();
+ }
+
+ if(m_wakeup_thread0) {
+ g_eventQueue_ptr->scheduleEvent(m_racey_pseudo_threads[0], 1);
+ m_wakeup_thread0 = false;
+ }
+}
+
+void RaceyDriver::wakeup()
+{
+ // check for deadlock
+ for(int i = 0 ; i < m_racey_pseudo_threads.size(); i++) {
+ m_racey_pseudo_threads[i]->checkForDeadlock();
+ }
+
+ // schedule next wakeup
+ if (m_done_counter < RubyConfig::numberOfProcessors()) {
+ g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ }
+}
+
+void RaceyDriver::printStats(ostream& out) const
+{
+ assert(m_done_counter == RubyConfig::numberOfProcessors());
+ out << endl;
+ out << "RaceyDriver Stats" << endl;
+ out << "---------------------" << endl;
+
+ out << "execution signature: " << m_racey_pseudo_threads[0]->getSignature() << endl;
+ out << "finish_time: " << m_finish_time << endl;
+}
+
+void RaceyDriver::print(ostream& out) const
+{
+}
diff --git a/src/mem/ruby/tester/RaceyDriver.hh b/src/mem/ruby/tester/RaceyDriver.hh
new file mode 100644
index 000000000..a1a821b96
--- /dev/null
+++ b/src/mem/ruby/tester/RaceyDriver.hh
@@ -0,0 +1,112 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: The driver interface between racey pseudo threads and ruby
+ * memory timing simulator.
+ *
+ */
+
+#ifndef RACEYDRIVER_H
+#define RACEYDRIVER_H
+
+#include "Global.hh"
+#include "Driver.hh"
+
+class RaceyPseudoThread;
+
+class RaceyDriver : public Driver, public Consumer {
+public:
+ // Constructors
+ RaceyDriver();
+
+ // Destructor
+ ~RaceyDriver();
+
+ // Public Methods
+ int runningThreads();
+ void registerThread0Wakeup();
+ void joinThread();
+ bool Thread0Initialized() {
+ return m_racey_pseudo_threads[0]->getInitializedState();
+ };
+
+ void hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread);
+ void wakeup();
+ void printStats(ostream& out) const;
+ void clearStats() {}
+ void printConfig(ostream& out) const {}
+
+ integer_t getInstructionCount(int procID) const;
+
+ // save/load cpu states
+ void saveCPUStates(int cpu_id, string filename) {
+ m_racey_pseudo_threads[cpu_id]->saveCPUStates(filename);
+ };
+ void loadCPUStates(int cpu_id, string filename) {
+ m_racey_pseudo_threads[cpu_id]->loadCPUStates(filename);
+ };
+
+ // reset IC
+ void resetIC(int cpu_id) {
+ m_racey_pseudo_threads[cpu_id]->resetIC();
+ }
+
+ void print(ostream& out) const;
+private:
+
+ // Private copy constructor and assignment operator
+ RaceyDriver(const RaceyDriver& obj);
+ RaceyDriver& operator=(const RaceyDriver& obj);
+
+ // Data Members (m_ prefix)
+ Vector<RaceyPseudoThread*> m_racey_pseudo_threads;
+ int m_done_counter;
+ bool m_wakeup_thread0;
+
+ Time m_finish_time;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const RaceyDriver& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const RaceyDriver& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //RACEYDRIVER_H
diff --git a/src/mem/ruby/tester/RequestGenerator.cc b/src/mem/ruby/tester/RequestGenerator.cc
new file mode 100644
index 000000000..71a183315
--- /dev/null
+++ b/src/mem/ruby/tester/RequestGenerator.cc
@@ -0,0 +1,196 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "RequestGenerator.hh"
+#include "RequestGeneratorStatus.hh"
+#include "LockStatus.hh"
+#include "Sequencer.hh"
+#include "System.hh"
+#include "RubyConfig.hh"
+#include "SubBlock.hh"
+#include "SyntheticDriver.hh"
+#include "Chip.hh"
+
+RequestGenerator::RequestGenerator(NodeID node, SyntheticDriver& driver) :
+ m_driver(driver)
+{
+ m_status = RequestGeneratorStatus_Thinking;
+ m_last_transition = 0;
+ m_node = node;
+ pickAddress();
+ m_counter = 0;
+
+ //g_eventQueue_ptr->scheduleEvent(this, 1+(random() % 200));
+}
+
+RequestGenerator::~RequestGenerator()
+{
+}
+
+void RequestGenerator::wakeup()
+{
+ DEBUG_EXPR(TESTER_COMP, MedPrio, m_node);
+ DEBUG_EXPR(TESTER_COMP, MedPrio, m_status);
+
+ if (m_status == RequestGeneratorStatus_Thinking) {
+ m_status = RequestGeneratorStatus_Test_Pending;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ initiateTest(); // Test
+ } else if (m_status == RequestGeneratorStatus_Holding) {
+ m_status = RequestGeneratorStatus_Release_Pending;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ initiateRelease(); // Release
+ } else if (m_status == RequestGeneratorStatus_Before_Swap) {
+ m_status = RequestGeneratorStatus_Swap_Pending;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ initiateSwap();
+ } else {
+ WARN_EXPR(m_status);
+ ERROR_MSG("Invalid status");
+ }
+}
+
+void RequestGenerator::performCallback(NodeID proc, SubBlock& data)
+{
+ Address address = data.getAddress();
+ assert(proc == m_node);
+ assert(address == m_address);
+
+ DEBUG_EXPR(TESTER_COMP, LowPrio, proc);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, m_status);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, address);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, data);
+
+ if (m_status == RequestGeneratorStatus_Test_Pending) {
+ // m_driver.recordTestLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ if (data.readByte() == LockStatus_Locked) {
+ // Locked - keep spinning
+ m_status = RequestGeneratorStatus_Thinking;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ } else {
+ // Unlocked - try the swap
+ m_driver.recordTestLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ m_status = RequestGeneratorStatus_Before_Swap;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ }
+ } else if (m_status == RequestGeneratorStatus_Swap_Pending) {
+ m_driver.recordSwapLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ if (data.readByte() == LockStatus_Locked) {
+ // We failed to aquire the lock
+ m_status = RequestGeneratorStatus_Thinking;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ } else {
+ // We acquired the lock
+ data.writeByte(LockStatus_Locked);
+ m_status = RequestGeneratorStatus_Holding;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ DEBUG_MSG(TESTER_COMP, HighPrio, "Acquired");
+ DEBUG_EXPR(TESTER_COMP, HighPrio, proc);
+ DEBUG_EXPR(TESTER_COMP, HighPrio, g_eventQueue_ptr->getTime());
+ g_eventQueue_ptr->scheduleEvent(this, holdTime());
+ }
+ } else if (m_status == RequestGeneratorStatus_Release_Pending) {
+ m_driver.recordReleaseLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ // We're releasing the lock
+ data.writeByte(LockStatus_Unlocked);
+
+ m_counter++;
+ if (m_counter < g_tester_length) {
+ m_status = RequestGeneratorStatus_Thinking;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ pickAddress();
+ g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ } else {
+ m_driver.reportDone();
+ m_status = RequestGeneratorStatus_Done;
+ m_last_transition = g_eventQueue_ptr->getTime();
+ }
+ } else {
+ WARN_EXPR(m_status);
+ ERROR_MSG("Invalid status");
+ }
+}
+
+int RequestGenerator::thinkTime() const
+{
+ return g_think_time;
+}
+
+int RequestGenerator::waitTime() const
+{
+ return g_wait_time;
+}
+
+int RequestGenerator::holdTime() const
+{
+ return g_hold_time;
+}
+
+void RequestGenerator::pickAddress()
+{
+ assert(m_status == RequestGeneratorStatus_Thinking);
+ m_address = m_driver.pickAddress(m_node);
+}
+
+void RequestGenerator::initiateTest()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Test");
+ sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_LD, Address(1), AccessModeType_UserMode, 1, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */, 0, false));
+}
+
+void RequestGenerator::initiateSwap()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Swap");
+ sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_ATOMIC, Address(2), AccessModeType_UserMode, 1, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */, 0, false));
+}
+
+void RequestGenerator::initiateRelease()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Release");
+ sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_ST, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */, 0, false));
+}
+
+Sequencer* RequestGenerator::sequencer() const
+{
+ return g_system_ptr->getChip(m_node/RubyConfig::numberOfProcsPerChip())->getSequencer(m_node%RubyConfig::numberOfProcsPerChip());
+}
+
+void RequestGenerator::print(ostream& out) const
+{
+ out << "[RequestGenerator]" << endl;
+}
+
diff --git a/src/mem/ruby/tester/RequestGenerator.hh b/src/mem/ruby/tester/RequestGenerator.hh
new file mode 100644
index 000000000..3296f7951
--- /dev/null
+++ b/src/mem/ruby/tester/RequestGenerator.hh
@@ -0,0 +1,102 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef REQUESTGENERATOR_H
+#define REQUESTGENERATOR_H
+
+#include "Global.hh"
+#include "Consumer.hh"
+#include "RequestGeneratorStatus.hh"
+#include "NodeID.hh"
+#include "Address.hh"
+
+class Sequencer;
+class SubBlock;
+class SyntheticDriver;
+
+class RequestGenerator : public Consumer {
+public:
+ // Constructors
+ RequestGenerator(NodeID node, SyntheticDriver& driver);
+
+ // Destructor
+ ~RequestGenerator();
+
+ // Public Methods
+ void wakeup();
+ void performCallback(NodeID proc, SubBlock& data);
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+ int thinkTime() const;
+ int waitTime() const;
+ int holdTime() const;
+ void initiateTest();
+ void initiateSwap();
+ void initiateRelease();
+ void pickAddress();
+ Sequencer* sequencer() const;
+
+ // Private copy constructor and assignment operator
+ RequestGenerator(const RequestGenerator& obj);
+ RequestGenerator& operator=(const RequestGenerator& obj);
+
+ // Data Members (m_ prefix)
+ SyntheticDriver& m_driver;
+ NodeID m_node;
+ RequestGeneratorStatus m_status;
+ int m_counter;
+ Time m_last_transition;
+ Address m_address;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const RequestGenerator& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const RequestGenerator& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //REQUESTGENERATOR_H
+
diff --git a/src/mem/ruby/tester/SpecifiedGenerator.cc b/src/mem/ruby/tester/SpecifiedGenerator.cc
new file mode 100644
index 000000000..e6ee802d4
--- /dev/null
+++ b/src/mem/ruby/tester/SpecifiedGenerator.cc
@@ -0,0 +1,48 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "SpecifiedGenerator.hh"
+#include "Sequencer.hh"
+#include "System.hh"
+#include "SubBlock.hh"
+#include "SyntheticDriver.hh"
+
+SpecifiedGenerator::SpecifiedGenerator()
+{
+}
+
+SpecifiedGenerator::~SpecifiedGenerator()
+{
+}
+
diff --git a/src/mem/ruby/tester/SpecifiedGenerator.hh b/src/mem/ruby/tester/SpecifiedGenerator.hh
new file mode 100644
index 000000000..d22c56f49
--- /dev/null
+++ b/src/mem/ruby/tester/SpecifiedGenerator.hh
@@ -0,0 +1,69 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef SPECIFIEDGENERATOR_H
+#define SPECIFIEDGENERATOR_H
+
+#include "Global.hh"
+#include "Consumer.hh"
+#include "NodeID.hh"
+
+class Sequencer;
+class SubBlock;
+
+class SpecifiedGenerator : public Consumer {
+public:
+ // Constructors
+ SpecifiedGenerator();
+
+ // Destructor
+ virtual ~SpecifiedGenerator() = 0;
+
+ // Public Methods
+ virtual void wakeup() = 0;
+ virtual void performCallback(NodeID proc, SubBlock& data) = 0;
+
+ virtual void print(ostream& out) const = 0;
+protected:
+ // accessible by subclasses
+
+private:
+ // inaccessible by subclasses
+
+};
+
+#endif //SPECIFIEDGENERATOR_H
+
diff --git a/src/mem/ruby/tester/SyntheticDriver.cc b/src/mem/ruby/tester/SyntheticDriver.cc
new file mode 100644
index 000000000..d2028ba07
--- /dev/null
+++ b/src/mem/ruby/tester/SyntheticDriver.cc
@@ -0,0 +1,296 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "Global.hh"
+#include "System.hh"
+#include "SyntheticDriver.hh"
+#include "EventQueue.hh"
+//#ifndef XACT_MEM
+#include "RequestGenerator.hh"
+//#endif
+//#include "XactAbortRequestGenerator.hh"
+//#include "XactRequestGenerator.hh"
+#include "SubBlock.hh"
+#include "Chip.hh"
+
+SyntheticDriver::SyntheticDriver(System* sys_ptr)
+{
+ cout << "SyntheticDriver::SyntheticDriver" << endl;
+ if (g_SIMICS) {
+ ERROR_MSG("g_SIMICS should not be defined.");
+ }
+
+ m_finish_time = 0;
+ m_done_counter = 0;
+
+ m_last_progress_vector.setSize(RubyConfig::numberOfProcessors());
+ for (int i=0; i<m_last_progress_vector.size(); i++) {
+ m_last_progress_vector[i] = 0;
+ }
+
+ m_lock_vector.setSize(g_synthetic_locks);
+ for (int i=0; i<m_lock_vector.size(); i++) {
+ m_lock_vector[i] = -1; // No processor last held it
+ }
+
+ m_request_generator_vector.setSize(RubyConfig::numberOfProcessors());
+ for (int i=0; i<m_request_generator_vector.size(); i++) {
+ if(XACT_MEMORY){
+ //m_request_generator_vector[i] = new XactRequestGenerator(i, *this);
+ } else {
+ m_request_generator_vector[i] = new RequestGenerator(i, *this);
+ }
+ }
+
+ // add the tester consumer to the global event queue
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+}
+
+SyntheticDriver::~SyntheticDriver()
+{
+ for (int i=0; i<m_last_progress_vector.size(); i++) {
+ delete m_request_generator_vector[i];
+ }
+}
+
+void SyntheticDriver::hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread)
+{
+ DEBUG_EXPR(TESTER_COMP, MedPrio, data);
+ //cout << " " << proc << " in S.D. hitCallback" << endl;
+ if(XACT_MEMORY){
+ //XactRequestGenerator* reqGen = static_cast<XactRequestGenerator*>(m_request_generator_vector[proc]);
+ //reqGen->performCallback(proc, data);
+ } else {
+ m_request_generator_vector[proc]->performCallback(proc, data);
+ }
+
+ // Mark that we made progress
+ m_last_progress_vector[proc] = g_eventQueue_ptr->getTime();
+}
+
+void SyntheticDriver::abortCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread)
+{
+ //cout << "SyntheticDriver::abortCallback" << endl;
+ DEBUG_EXPR(TESTER_COMP, MedPrio, data);
+
+ if(XACT_MEMORY){
+ //XactRequestGenerator* reqGen = static_cast<XactRequestGenerator*>(m_request_generator_vector[proc]);
+ //reqGen->abortTransaction();
+ //reqGen->performCallback(proc, data);
+ } else {
+ m_request_generator_vector[proc]->performCallback(proc, data);
+ }
+
+ // Mark that we made progress
+ m_last_progress_vector[proc] = g_eventQueue_ptr->getTime();
+}
+
+// For Transactional Memory
+/*
+// called whenever we send a nack
+void SyntheticDriver::notifySendNack( int proc, const Address & addr, uint64 remote_timestamp, const MachineID & remote_id ){
+ if(XACT_MEMORY){
+ //XactRequestGenerator* reqGen = static_cast<XactRequestGenerator*>(m_request_generator_vector[proc]);
+ //reqGen->notifySendNack(addr, remote_timestamp, remote_id);
+ }
+ else{
+ cout << "notifySendNack NOT USING TM" << endl;
+ ASSERT(0);
+ }
+}
+
+// called whenever we receive a NACK
+// Either for a demand request or log store
+void SyntheticDriver::notifyReceiveNack( int proc, const Address & addr, uint64 remote_timestamp, const MachineID & remote_id ){
+ if(XACT_MEMORY){
+ //XactRequestGenerator* reqGen = static_cast<XactRequestGenerator*>(m_request_generator_vector[proc]);
+ //reqGen->notifyReceiveNack(addr, remote_timestamp, remote_id);
+ }
+ else{
+ cout << "notifyReceiveNack NOT USING TM" << endl;
+ ASSERT(0);
+ }
+}
+
+// called whenever we received ALL the NACKs. Take abort or retry action here
+void SyntheticDriver::notifyReceiveNackFinal(int proc, const Address & addr){
+ if(XACT_MEMORY){
+ //XactRequestGenerator* reqGen = static_cast<XactRequestGenerator*>(m_request_generator_vector[proc]);
+ //reqGen->notifyReceiveNackFinal(addr);
+ }
+ else{
+ cout << "notifyReceiveNackFinal NOT USING TM" << endl;
+ ASSERT(0);
+ }
+}
+
+// called during abort handling
+// void SyntheticDriver::notifyAbortStart( const Address & handlerPC ){
+
+// }
+
+// void SyntheticDriver::notifyAbortComplete( const Address & newPC ){
+
+// }
+*/
+
+Address SyntheticDriver::pickAddress(NodeID node)
+{
+ // This methods picks a random lock that we were NOT that last
+ // processor to acquire. Why? Without this change 2 and 4
+ // processor runs, the odds of having the lock in your cache in
+ // read/write state is 50% or 25%, respectively. This effect can
+ // make our 'throughput per processor' results look too strange.
+
+ Address addr;
+ // FIXME - make this a parameter of the workload
+ bool done = false;
+ int lock_number = 0;
+ int counter = 0;
+ while (1) {
+ // Pick a random lock
+ lock_number = random() % m_lock_vector.size();
+
+ // Were we the last to acquire the lock?
+ if (m_lock_vector[lock_number] != node) {
+ break;
+ }
+
+ // Don't keep trying forever, since if there is only one lock, we're always the last to try to obtain the lock
+ counter++;
+ if (counter > 10) {
+ break;
+ }
+ }
+
+ // We're going to acquire it soon, so we can update the last
+ // processor to hold the lock at this time
+ m_lock_vector[lock_number] = node;
+
+ // One lock per cache line
+ addr.setAddress(lock_number * RubyConfig::dataBlockBytes());
+ return addr;
+}
+
+void SyntheticDriver::reportDone()
+{
+ m_done_counter++;
+ if (m_done_counter == RubyConfig::numberOfProcessors()) {
+ m_finish_time = g_eventQueue_ptr->getTime();
+ }
+}
+
+void SyntheticDriver::recordTestLatency(Time time)
+{
+ m_test_latency.add(time);
+}
+
+void SyntheticDriver::recordSwapLatency(Time time)
+{
+ m_swap_latency.add(time);
+}
+
+void SyntheticDriver::recordReleaseLatency(Time time)
+{
+ m_release_latency.add(time);
+}
+
+void SyntheticDriver::wakeup()
+{
+ // checkForDeadlock();
+ if (m_done_counter < RubyConfig::numberOfProcessors()) {
+ g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ }
+}
+
+void SyntheticDriver::checkForDeadlock()
+{
+ int size = m_last_progress_vector.size();
+ Time current_time = g_eventQueue_ptr->getTime();
+ for (int processor=0; processor<size; processor++) {
+ if ((current_time - m_last_progress_vector[processor]) > g_DEADLOCK_THRESHOLD) {
+ WARN_EXPR(processor);
+ Sequencer* seq_ptr = g_system_ptr->getChip(processor/RubyConfig::numberOfProcsPerChip())->getSequencer(processor%RubyConfig::numberOfProcsPerChip());
+ assert(seq_ptr != NULL);
+ // if (seq_ptr->isRequestPending()) {
+ // WARN_EXPR(seq_ptr->pendingAddress());
+ // }
+ WARN_EXPR(current_time);
+ WARN_EXPR(m_last_progress_vector[processor]);
+ WARN_EXPR(current_time - m_last_progress_vector[processor]);
+ ERROR_MSG("Deadlock detected.");
+ }
+ }
+}
+
+integer_t SyntheticDriver::readPhysicalMemory(int procID, physical_address_t address,
+ int len ){
+ char buffer[8];
+ ASSERT(len <= 8);
+ Sequencer* seq = g_system_ptr->getChip(procID/RubyConfig::numberOfProcsPerChip())->getSequencer(procID%RubyConfig::numberOfProcsPerChip());
+ assert(seq != NULL);
+ bool found = seq->getRubyMemoryValue(Address(address), buffer, len );
+ ASSERT(found);
+ return *((integer_t *) buffer);
+}
+
+void SyntheticDriver::writePhysicalMemory( int procID, physical_address_t address,
+ integer_t value, int len ){
+ char buffer[8];
+ ASSERT(len <= 8);
+
+ memcpy(buffer, (const void*) &value, len);
+ DEBUG_EXPR(TESTER_COMP, MedPrio, "");
+ Sequencer* seq = g_system_ptr->getChip(procID/RubyConfig::numberOfProcsPerChip())->getSequencer(procID%RubyConfig::numberOfProcsPerChip());
+ assert(seq != NULL);
+ bool found = seq->setRubyMemoryValue(Address(address), buffer, len );
+ ASSERT(found);
+ //return found;
+}
+
+void SyntheticDriver::printStats(ostream& out) const
+{
+ out << endl;
+ out << "SyntheticDriver Stats" << endl;
+ out << "---------------------" << endl;
+
+ out << "synthetic_finish_time: " << m_finish_time << endl;
+ out << "test_latency: " << m_test_latency << endl;
+ out << "swap_latency: " << m_swap_latency << endl;
+ out << "release_latency: " << m_release_latency << endl;
+}
+
+void SyntheticDriver::print(ostream& out) const
+{
+}
diff --git a/src/mem/ruby/tester/SyntheticDriver.hh b/src/mem/ruby/tester/SyntheticDriver.hh
new file mode 100644
index 000000000..278891ba2
--- /dev/null
+++ b/src/mem/ruby/tester/SyntheticDriver.hh
@@ -0,0 +1,118 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef SYNTHETICDRIVER_H
+#define SYNTHETICDRIVER_H
+
+#include "Global.hh"
+#include "Driver.hh"
+#include "Histogram.hh"
+#include "CacheRequestType.hh"
+
+class System;
+class RequestGenerator;
+
+class SyntheticDriver : public Driver, public Consumer {
+public:
+ // Constructors
+ SyntheticDriver(System* sys_ptr);
+
+ // Destructor
+ ~SyntheticDriver();
+
+ // Public Methods
+ Address pickAddress(NodeID node);
+ void reportDone();
+ void recordTestLatency(Time time);
+ void recordSwapLatency(Time time);
+ void recordReleaseLatency(Time time);
+
+ void hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread);
+ void conflictCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread) {assert(0)};
+ void abortCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread);
+ void wakeup();
+ void printStats(ostream& out) const;
+ void clearStats() {}
+ void printConfig(ostream& out) const {}
+
+ integer_t readPhysicalMemory(int procID, physical_address_t address,
+ int len );
+
+ void writePhysicalMemory( int procID, physical_address_t address,
+ integer_t value, int len );
+
+ void print(ostream& out) const;
+
+ // For handling NACKs/retries
+ //void notifySendNack( int procID, const Address & addr, uint64 remote_timestamp, const MachineID & remote_id);
+ //void notifyReceiveNack( int procID, const Address & addr, uint64 remote_timestamp, const MachineID & remote_id);
+ //void notifyReceiveNackFinal( int procID, const Address & addr);
+
+private:
+ // Private Methods
+ void checkForDeadlock();
+
+ // Private copy constructor and assignment operator
+ SyntheticDriver(const SyntheticDriver& obj);
+ SyntheticDriver& operator=(const SyntheticDriver& obj);
+
+ // Data Members (m_ prefix)
+ Vector<Time> m_last_progress_vector;
+ Vector<RequestGenerator*> m_request_generator_vector;
+ Vector<NodeID> m_lock_vector; // Processor last to hold the lock
+ int m_done_counter;
+
+ Histogram m_test_latency;
+ Histogram m_swap_latency;
+ Histogram m_release_latency;
+ Time m_finish_time;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const SyntheticDriver& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const SyntheticDriver& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //SYNTHETICDRIVER_H
diff --git a/src/mem/ruby/tester/Tester.cc b/src/mem/ruby/tester/Tester.cc
new file mode 100644
index 000000000..0e6f12cdc
--- /dev/null
+++ b/src/mem/ruby/tester/Tester.cc
@@ -0,0 +1,116 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "Global.hh"
+#include "System.hh"
+#include "Tester.hh"
+#include "EventQueue.hh"
+#include "SubBlock.hh"
+#include "Check.hh"
+#include "Chip.hh"
+
+Tester::Tester(System* sys_ptr)
+{
+ if (g_SIMICS) {
+ ERROR_MSG("g_SIMICS should not be defined.");
+ }
+
+ g_callback_counter = 0;
+
+ // add the tester consumer to the global event queue
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+
+ m_last_progress_vector.setSize(RubyConfig::numberOfProcessors());
+ for (int i=0; i<m_last_progress_vector.size(); i++) {
+ m_last_progress_vector[i] = 0;
+ }
+}
+
+Tester::~Tester()
+{
+}
+
+void Tester::hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread)
+{
+ // Mark that we made progress
+ m_last_progress_vector[proc] = g_eventQueue_ptr->getTime();
+ g_callback_counter++;
+
+ // This tells us our store has 'completed' or for a load gives us
+ // back the data to make the check
+ DEBUG_EXPR(TESTER_COMP, MedPrio, proc);
+ DEBUG_EXPR(TESTER_COMP, MedPrio, data);
+ Check* check_ptr = m_checkTable.getCheck(data.getAddress());
+ assert(check_ptr != NULL);
+ check_ptr->performCallback(proc, data);
+
+}
+
+void Tester::wakeup()
+{
+ if (g_callback_counter < g_tester_length) {
+ // Try to perform an action or check
+ Check* check_ptr = m_checkTable.getRandomCheck();
+ assert(check_ptr != NULL);
+ check_ptr->initiate();
+
+ checkForDeadlock();
+
+ g_eventQueue_ptr->scheduleEvent(this, 2);
+ }
+}
+
+void Tester::checkForDeadlock()
+{
+ int size = m_last_progress_vector.size();
+ Time current_time = g_eventQueue_ptr->getTime();
+ for (int processor=0; processor<size; processor++) {
+ if ((current_time - m_last_progress_vector[processor]) > g_DEADLOCK_THRESHOLD) {
+ WARN_EXPR(current_time);
+ WARN_EXPR(m_last_progress_vector[processor]);
+ WARN_EXPR(current_time - m_last_progress_vector[processor]);
+ WARN_EXPR(processor);
+ Sequencer* seq_ptr = g_system_ptr->getChip(processor/RubyConfig::numberOfProcsPerChip())->getSequencer(processor%RubyConfig::numberOfProcsPerChip());
+ assert(seq_ptr != NULL);
+ WARN_EXPR(*seq_ptr);
+ ERROR_MSG("Deadlock detected.");
+ }
+ }
+}
+
+void Tester::print(ostream& out) const
+{
+ out << "[Tester]" << endl;
+}
+
diff --git a/src/mem/ruby/tester/Tester.hh b/src/mem/ruby/tester/Tester.hh
new file mode 100644
index 000000000..35563a3b4
--- /dev/null
+++ b/src/mem/ruby/tester/Tester.hh
@@ -0,0 +1,93 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef TESTER_H
+#define TESTER_H
+
+#include "Global.hh"
+#include "Driver.hh"
+#include "CheckTable.hh"
+#include "CacheRequestType.hh"
+
+class System;
+
+class Tester : public Driver, public Consumer {
+public:
+ // Constructors
+ Tester(System* sys_ptr);
+
+ // Destructor
+ ~Tester();
+
+ // Public Methods
+
+ void hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread);
+ void wakeup();
+ void printStats(ostream& out) const {}
+ void clearStats() {}
+ void printConfig(ostream& out) const {}
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ void checkForDeadlock();
+
+ // Private copy constructor and assignment operator
+ Tester(const Tester& obj);
+ Tester& operator=(const Tester& obj);
+
+ // Data Members (m_ prefix)
+
+ CheckTable m_checkTable;
+ Vector<Time> m_last_progress_vector;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Tester& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Tester& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //TESTER_H
diff --git a/src/mem/ruby/tester/XactAbortRequestGenerator.cc b/src/mem/ruby/tester/XactAbortRequestGenerator.cc
new file mode 100644
index 000000000..e562aa760
--- /dev/null
+++ b/src/mem/ruby/tester/XactAbortRequestGenerator.cc
@@ -0,0 +1,403 @@
+/*
+ * Copyright (c) 1999 by Mark Hill and David Wood for the Wisconsin
+ * Multifacet Project. ALL RIGHTS RESERVED.
+ *
+ * ##HEADER##
+ *
+ * This software is furnished under a license and may be used and
+ * copied only in accordance with the terms of such license and the
+ * inclusion of the above copyright notice. This software or any
+ * other copies thereof or any derivative works may not be provided or
+ * otherwise made available to any other persons. Title to and
+ * ownership of the software is retained by Mark Hill and David Wood.
+ * Any use of this software must include the above copyright notice.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS". THE LICENSOR MAKES NO
+ * WARRANTIES ABOUT ITS CORRECTNESS OR PERFORMANCE.
+ * */
+
+/*
+ * $Id$
+ *
+ */
+
+#ifdef XACT_MEM
+
+#include "XactAbortRequestGenerator.hh"
+#include "LockStatus.hh"
+#include "Sequencer.hh"
+#include "System.hh"
+#include "RubyConfig.hh"
+#include "SubBlock.hh"
+#include "SyntheticDriver.hh"
+#include "Chip.hh"
+#include "Instruction.hh"
+#include "TransactionManager.hh"
+
+//uint8 XactAbortRequestGenerator::testArray[MAX_ADDRESS];
+//uint8 XactAbortRequestGenerator::dataArray[MAX_ADDRESS];
+Vector<uint8> XactAbortRequestGenerator::testArray;
+
+XactAbortRequestGenerator::XactAbortRequestGenerator(NodeID node, SyntheticDriver& driver) :
+ RequestGenerator(node, driver), m_driver(driver)
+{
+ //DEBUG_EXPR(TESTER_COMP, MedPrio, "#### -- Creating XactAbortRequestGenerator\n");
+ cout << "#### -- Creating XactAbortRequestGenerator " << node << endl;
+
+ testArray.setSize(g_MEMORY_SIZE_BYTES);
+ assert(testArray.size() == g_MEMORY_SIZE_BYTES);
+ // Create instructions
+ m_instructions = new Instruction[XACT_LENGTH];
+ newTransaction();
+
+ m_xact_status = XactAbortRequestGeneratorStatus_Ready;
+ m_last_transition = 0;
+ m_node = node;
+ //pickAddress();
+ m_counter = 0;
+ m_register = 5;
+ m_pc = 0;
+
+ //for(int i=0; i<XACT_SIZE; ++i){
+ //testArray[i] = 64;
+ //}
+
+ //testArray = new uint8[XACT_SIZE];
+ //dataArray = new uint8[XACT_SIZE];
+ g_eventQueue_ptr->scheduleEvent(this, 1+(random() % 200));
+}
+
+void XactAbortRequestGenerator::newTransaction(){
+ int num_stores = 16;
+ int num_loads = XACT_LENGTH - num_stores - 2;
+
+ for(int i=0; i<XACT_LENGTH; ++i){
+ if (i == 0){
+ m_instructions[i].init(Opcode_BEGIN, Address(1));
+ } else if (i == XACT_LENGTH - 1){
+ m_instructions[i].init(Opcode_COMMIT, Address(1));
+ } else if (i < num_loads) {
+ physical_address_t address = i % XACT_SIZE;
+ ASSERT(address < XACT_SIZE);
+
+ int selectOpcode = random() % 2;
+ Opcode op;
+ switch(selectOpcode){
+ case 0:
+ op = Opcode_LD;
+ break;
+ case 1:
+ op = Opcode_INC;
+ break;
+ };
+ m_instructions[i].init(op, Address(address));
+ } else {
+ physical_address_t address = i - num_loads;
+ ASSERT(address < XACT_SIZE);
+ Opcode op = Opcode_ST;
+ m_instructions[i].init(op, Address(address));
+ }
+ }
+}
+
+XactAbortRequestGenerator::~XactAbortRequestGenerator()
+{
+ delete m_instructions;
+}
+
+void XactAbortRequestGenerator::wakeup()
+{
+ assert(m_xact_status == XactAbortRequestGeneratorStatus_Ready || m_xact_status == XactAbortRequestGeneratorStatus_Aborted);
+ m_xact_status = XactAbortRequestGeneratorStatus_Blocked;
+ DEBUG_EXPR(TESTER_COMP, MedPrio, m_node);
+ DEBUG_EXPR(TESTER_COMP, MedPrio, m_xact_status);
+
+ m_last_transition = g_eventQueue_ptr->getTime();
+ execute();
+}
+
+void XactAbortRequestGenerator::execute(){
+ assert(m_pc >= 0 && m_pc < XACT_LENGTH);
+ Instruction current = m_instructions[m_pc];
+ //cout << " " << m_node << " executing pc: " << m_pc;
+ switch (current.getOpcode()){
+ case Opcode_BEGIN:
+ //cout << " -- begin.";
+ initiateBeginTransaction();
+ break;
+ case Opcode_LD:
+ //cout << " -- load: " << current.getAddress();
+ initiateLoad(current.getAddress());
+ break;
+ case Opcode_INC:
+ //cout << " -- inc.";
+ initiateInc(current.getAddress());
+ break;
+ case Opcode_ST:
+ //cout << " -- store: " << current.getAddress();
+ initiateStore(current.getAddress());
+ break;
+ case Opcode_COMMIT:
+ //cout << " -- commit.";
+ initiateCommit();
+ break;
+ default:
+ WARN_EXPR(current.getOpcode());
+ ERROR_MSG("Invalid opcode");
+ };
+ //cout << endl;
+}
+
+void XactAbortRequestGenerator::performCallback(NodeID proc, SubBlock& data)
+{
+ assert(m_xact_status == XactAbortRequestGeneratorStatus_Waiting ||
+ m_xact_status == XactAbortRequestGeneratorStatus_Aborted);
+ assert(proc == m_node);
+
+ Address address = data.getAddress();
+
+ DEBUG_EXPR(TESTER_COMP, LowPrio, proc);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, m_xact_status);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, address);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, data);
+
+ m_last_transition = g_eventQueue_ptr->getTime();
+
+ //cout << " " << m_node << " in performCallback, pc:" << m_pc
+ // << ", addr:" << address << endl;
+ if(m_xact_status == XactAbortRequestGeneratorStatus_Aborted){
+ cout << " " << m_node << " aborted, resetting pc." << endl;
+ m_pc = 0;
+ m_register = 5;
+ m_xact_status = XactAbortRequestGeneratorStatus_Ready;
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ } else {
+ m_xact_status = XactAbortRequestGeneratorStatus_Blocked;
+
+ bool found;
+ uint8 value;
+ switch (m_instructions[m_pc].getOpcode()){
+ case Opcode_BEGIN:
+ m_driver.recordTestLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ m_register = 5;
+ m_xact_status = XactAbortRequestGeneratorStatus_Ready;
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ m_pc++;
+ break;
+ case Opcode_LD:
+ m_driver.recordTestLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ m_register = data.getByte(0);
+ //cout << " " << m_node << " " << g_eventQueue_ptr->getTime() << " Callback--LD: " << (int) m_register << endl;
+ m_xact_status = XactAbortRequestGeneratorStatus_Ready;
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ m_pc++;
+ break;
+ //case Opcode_INC: // We shouldn't get a callback for this!
+ //m_driver.recordSwapLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+
+ // break;
+ case Opcode_ST:
+ m_driver.recordReleaseLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ //data.setByte(address.getOffset(), m_register);
+ data.setByte(0, m_register);
+ //cout << " " << m_node << " " << g_eventQueue_ptr->getTime() << " Callback--ST: " << (int) m_register << endl;
+
+ //dataArray[address.getAddress()] = m_register;
+ found = sequencer()->setRubyMemoryValue(address, (char *) (&m_register), 1);
+ assert(found);
+ found = sequencer()->getRubyMemoryValue(address, (char *) (&value), 1);
+ assert(found);
+ assert(value == m_register);
+
+ m_xact_status = XactAbortRequestGeneratorStatus_Ready;
+ g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ m_pc++;
+ break;
+ case Opcode_COMMIT:
+ m_counter++;
+ cout << " " << m_node << " callback--commit, counter is " << m_counter << " length is: " << g_tester_length << endl;
+ // Check for correctness
+ checkCorrectness();
+
+ m_driver.recordReleaseLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+
+ if (m_counter < g_tester_length) {
+ m_last_transition = g_eventQueue_ptr->getTime();
+ //pickAddress(); // Necessary?
+
+ // Create new random transaction
+ newTransaction();
+ m_pc = 0;
+
+ m_xact_status = XactAbortRequestGeneratorStatus_Ready;
+ g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ } else {
+ cout << "Ending" << endl;
+ m_driver.reportDone();
+ m_xact_status = XactAbortRequestGeneratorStatus_Done;
+ }
+ break;
+ default:
+ ERROR_MSG("Invalid Opcode");
+ };
+ }
+}
+
+int XactAbortRequestGenerator::thinkTime() const
+{
+ return g_think_time;
+}
+
+int XactAbortRequestGenerator::waitTime() const
+{
+ return g_wait_time;
+}
+
+int XactAbortRequestGenerator::holdTime() const
+{
+ return g_hold_time;
+}
+
+void XactAbortRequestGenerator::pickAddress()
+{
+ //m_address = m_driver.pickAddress(m_node);
+}
+
+void XactAbortRequestGenerator::initiateBeginTransaction()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "### -- initiating Begin Transaction");
+ cout << "### -- initiating Begin " << m_node << endl;
+ m_xact_status = XactAbortRequestGeneratorStatus_Waiting;
+ sequencer()->makeRequest(CacheMsg(Address(physical_address_t(0)), Address(physical_address_t(0)), CacheRequestType_BEGIN_XACT, Address(m_pc), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false, Address(0), transactionManager()->getTransactionLevel(0), 0, 0 /* only 1 SMT thread */, transactionManager()->getTimestamp(0), transactionManager()->inExposedAction(0), 0));
+ transactionManager()->beginTransaction();
+}
+
+void XactAbortRequestGenerator::initiateStore(Address addr)
+{
+ //DEBUG_MSG(TESTER_COMP, MedPrio, "### -- initiating Store");
+ //cout << "### -- initiating Store " << m_node << endl;
+ m_xact_status = XactAbortRequestGeneratorStatus_Waiting;
+ ASSERT(transactionManager()->inTransaction(0));
+ sequencer()->makeRequest(CacheMsg(addr, addr, CacheRequestType_ST_XACT, Address(m_pc), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false, Address(0), transactionManager()->getTransactionLevel(0), 0, 0 /* only 1 SMT thread */, transactionManager()->getTimestamp(0), transactionManager()->inExposedAction(0), 0));
+}
+
+void XactAbortRequestGenerator::initiateCommit()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "### -- initiating Commit ");
+ cout << "### -- initiating Commit " << m_node << endl;
+
+ m_xact_status = XactAbortRequestGeneratorStatus_Waiting;
+ sequencer()->makeRequest(CacheMsg(Address(physical_address_t(0)), Address(physical_address_t(0)), CacheRequestType_COMMIT_XACT, Address(m_pc), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false, Address(0), transactionManager()->getTransactionLevel(0), 0, 0 /* only 1 SMT thread */, transactionManager()->getTimestamp(0), transactionManager()->inExposedAction(0), 0));
+ transactionManager()->commitTransaction();
+}
+
+void XactAbortRequestGenerator::initiateLoad(Address addr)
+{
+ //DEBUG_MSG(TESTER_COMP, MedPrio, "### -- initiating Load ");
+ //cout << "### -- initiating Load " << m_node << endl;
+ m_xact_status = XactAbortRequestGeneratorStatus_Waiting;
+ ASSERT(transactionManager()->inTransaction(0));
+ sequencer()->makeRequest(CacheMsg(addr, addr, CacheRequestType_LD_XACT, Address(m_pc), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false, Address(0), transactionManager()->getTransactionLevel(0), 0, 0 /* only 1 SMT thread */, transactionManager()->getTimestamp(0), transactionManager()->inExposedAction(0), 0));
+}
+
+void XactAbortRequestGenerator::initiateInc(Address addr)
+{
+ //DEBUG_MSG(TESTER_COMP, MedPrio, "### -- initiating Load ");
+ //cout << "### -- initiating Inc " << m_node << endl;
+ m_register++;
+ m_xact_status = XactAbortRequestGeneratorStatus_Ready;
+ g_eventQueue_ptr->scheduleEvent(this, holdTime());
+ m_pc++;
+}
+
+void XactAbortRequestGenerator::checkCorrectness(){
+ // Execute the transaction on the test array
+ int testPC = 0;
+ bool done = false;
+ for(int i=0; i<XACT_LENGTH && !done; ++i){
+ Opcode op = m_instructions[i].getOpcode();
+ Address addr = m_instructions[i].getAddress();
+ ASSERT(addr.getAddress() < testArray.size());
+ uint8 reg_val;
+ switch(op){
+ case Opcode_BEGIN:
+ reg_val = 0;
+ break; // do nothing
+ case Opcode_LD:
+ reg_val = testArray[addr.getAddress()];
+ //cout << m_node << " LD: " << addr << ", " << (int) reg_val << endl;
+ break;
+ case Opcode_INC:
+ reg_val++;
+ //cout << m_node << " INC: " << (int) reg_val << endl;
+ break;
+ case Opcode_ST:
+ testArray[addr.getAddress()] = reg_val;
+ //cout << m_node << " ST: " << addr << ", " << (int) reg_val << endl;
+ break;
+ case Opcode_COMMIT:
+ done = true;
+ break;
+ default:
+ ERROR_MSG("Invalid Opcode.");
+ };
+ }
+
+ bool success = true;
+ uint8 ruby_value;
+ bool found = false;
+ for(int i=0; i<XACT_LENGTH && !done; ++i){
+ Opcode op = m_instructions[i].getOpcode();
+ Address addr = m_instructions[i].getAddress();
+
+ uint8 reg_val;
+ switch(op){
+ case Opcode_BEGIN:
+ case Opcode_INC:
+ break; // do nothing
+ case Opcode_LD:
+ case Opcode_ST:
+ found = sequencer()->getRubyMemoryValue(m_instructions[i].getAddress(), (char *) &ruby_value, 1);
+ assert(found);
+
+ if (ruby_value != testArray[i]){
+ success = false;
+ WARN_MSG("DATA MISMATCH!");
+ WARN_EXPR((int) ruby_value);
+ WARN_EXPR((int) testArray[i]);
+ WARN_EXPR(i);
+ assert(success);
+ }
+ break;
+ case Opcode_COMMIT:
+ done = true;
+ break;
+ default:
+ ERROR_MSG("Invalid Opcode.");
+ };
+ }
+ cout << m_node << " CORRECT!" << endl;
+}
+
+Sequencer* XactAbortRequestGenerator::sequencer() const
+{
+ return g_system_ptr->getChip(m_node/RubyConfig::numberOfProcsPerChip())->getSequencer(m_node%RubyConfig::numberOfProcsPerChip());
+}
+
+TransactionManager* XactAbortRequestGenerator::transactionManager() const
+{
+ return g_system_ptr->getChip(m_node/RubyConfig::numberOfProcsPerChip())->getTransactionManager(m_node%RubyConfig::numberOfProcsPerChip());
+}
+
+void XactAbortRequestGenerator::print(ostream& out) const
+{
+}
+
+void XactAbortRequestGenerator::abortTransaction(){
+ cout << " " << m_node << " *** ABORT! ***" << endl;
+ //m_pc = 0;
+ //m_register = 5;
+ m_xact_status = XactAbortRequestGeneratorStatus_Aborted;
+}
+
+#endif //XACT_MEM
diff --git a/src/mem/ruby/tester/XactAbortRequestGenerator.hh b/src/mem/ruby/tester/XactAbortRequestGenerator.hh
new file mode 100644
index 000000000..90ec1bf1b
--- /dev/null
+++ b/src/mem/ruby/tester/XactAbortRequestGenerator.hh
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 1999 by Mark Hill and David Wood for the Wisconsin
+ * Multifacet Project. ALL RIGHTS RESERVED.
+ *
+ * ##HEADER##
+ *
+ * This software is furnished under a license and may be used and
+ * copied only in accordance with the terms of such license and the
+ * inclusion of the above copyright notice. This software or any
+ * other copies thereof or any derivative works may not be provided or
+ * otherwise made available to any other persons. Title to and
+ * ownership of the software is retained by Mark Hill and David Wood.
+ * Any use of this software must include the above copyright notice.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS". THE LICENSOR MAKES NO
+ * WARRANTIES ABOUT ITS CORRECTNESS OR PERFORMANCE.
+ * */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef XACTABORTREQUESTGENERATOR_H
+#define XACTABORTREQUESTGENERATOR_H
+
+#ifdef XACT_MEM
+
+#include "RequestGenerator.hh"
+#include "global.hh"
+#include "Consumer.hh"
+#include "NodeID.hh"
+#include "Address.hh"
+
+class Sequencer;
+class SubBlock;
+class SyntheticDriver;
+class Instruction;
+class TransactionManager;
+
+#define MAX_ADDRESS 16777216
+
+enum XactAbortRequestGeneratorStatus {
+ XactAbortRequestGeneratorStatus_Waiting,
+ XactAbortRequestGeneratorStatus_Ready,
+ XactAbortRequestGeneratorStatus_Blocked,
+ XactAbortRequestGeneratorStatus_Aborted,
+ XactAbortRequestGeneratorStatus_Done
+};
+
+class XactAbortRequestGenerator : public RequestGenerator {
+public:
+ // Constructors
+ XactAbortRequestGenerator(NodeID node, SyntheticDriver& driver);
+
+ // Destructor
+ ~XactAbortRequestGenerator();
+
+ // Public Methods
+ void wakeup();
+ void performCallback(NodeID proc, SubBlock& data);
+ void abortTransaction();
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+ int thinkTime() const;
+ int waitTime() const;
+ int holdTime() const;
+ void initiateBeginTransaction();
+ void initiateStore(Address a);
+ void initiateCommit();
+ void initiateInc(Address a);
+ void initiateLoad(Address a);
+ void pickAddress();
+ Sequencer* sequencer() const;
+ TransactionManager* transactionManager() const;
+ void execute();
+
+ void checkCorrectness();
+
+ // Private copy constructor and assignment operator
+ XactAbortRequestGenerator(const XactAbortRequestGenerator& obj);
+ XactAbortRequestGenerator& operator=(const XactAbortRequestGenerator& obj);
+
+ void newTransaction();
+
+ // Data Members (m_ prefix)
+ SyntheticDriver& m_driver;
+ NodeID m_node;
+ XactAbortRequestGeneratorStatus m_xact_status;
+ int m_counter;
+ Time m_last_transition;
+ Address m_address;
+
+ Instruction *m_instructions;
+ int m_pc;
+ uint8 m_register;
+ static Vector<uint8> testArray;
+ //static uint8 dataArray[];
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const XactAbortRequestGenerator& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const XactAbortRequestGenerator& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //XACT_MEM
+
+#endif //REQUESTGENERATOR_H
+
diff --git a/src/mem/ruby/tester/XactRequestGenerator.cc b/src/mem/ruby/tester/XactRequestGenerator.cc
new file mode 100644
index 000000000..c7870bb25
--- /dev/null
+++ b/src/mem/ruby/tester/XactRequestGenerator.cc
@@ -0,0 +1,637 @@
+/*
+ * Copyright (c) 1999 by Mark Hill and David Wood for the Wisconsin
+ * Multifacet Project. ALL RIGHTS RESERVED.
+ *
+ * ##HEADER##
+ *
+ * This software is furnished under a license and may be used and
+ * copied only in accordance with the terms of such license and the
+ * inclusion of the above copyright notice. This software or any
+ * other copies thereof or any derivative works may not be provided or
+ * otherwise made available to any other persons. Title to and
+ * ownership of the software is retained by Mark Hill and David Wood.
+ * Any use of this software must include the above copyright notice.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS". THE LICENSOR MAKES NO
+ * WARRANTIES ABOUT ITS CORRECTNESS OR PERFORMANCE.
+ * */
+
+/*
+ * $Id: XactRequestGenerator.C 1.7 05/09/22 16:49:19-05:00 xu@s0-29.cs.wisc.edu $
+ *
+ */
+
+#include "XactRequestGenerator.hh"
+#include "LockStatus.hh"
+#include "Sequencer.hh"
+#include "System.hh"
+#include "RubyConfig.hh"
+#include "SubBlock.hh"
+#include "SyntheticDriver.hh"
+#include "Chip.hh"
+#include "Instruction.hh"
+
+XactRequestGenerator::XactRequestGenerator(NodeID node, SyntheticDriver& driver) :
+ m_driver(driver), RequestGenerator(node, driver)
+{
+ DEBUG_EXPR(TESTER_COMP, MedPrio, "#### -- Creating XactRequestGenerator\n");
+ cout << "#### -- Creating XactRequestGenerator " << node << endl;
+
+ assert(XACT_SIZE > 0);
+ testArray = new uint8[g_MEMORY_SIZE_BYTES];;
+ // Create instructions
+ m_instructions = new Instruction[XACT_LENGTH];
+ newTransaction(true);
+
+ m_status = XactRequestGeneratorStatus_Ready;
+ m_last_transition = 0;
+ m_node = node;
+ //pickAddress();
+ m_counter = 0;
+ m_register = 5;
+ m_pc = 0;
+
+ m_abortPending = false;
+ g_eventQueue_ptr->scheduleEvent(this, 1+(random() % 200));
+}
+
+void XactRequestGenerator::newTransaction(bool init){
+ // important: reset abort flag
+ m_abortPending = false;
+
+ int depth = 0;
+ bool prev_ldst = false;
+ m_size = (random() % (XACT_LENGTH-2)) + 2;
+ cout << "XactRequestGenerator::newTransaction m_size=" << m_size << endl;
+ ASSERT(m_size >= 2);
+ if (!init)
+ ASSERT(!transactionManager()->inTransaction(0));
+ m_transaction = (random() % 2);
+
+ if(m_transaction){
+ cout << " " << m_node << " new transaction " << endl;
+ } else {
+ cout << " " << m_node << " new NON-transaction " << endl;
+ }
+
+ cout << "***INSTR STREAM ";
+ for(int i=0; i<m_size; ++i){
+ if (i == 0 && m_transaction){ // new xact must start with begin
+ m_instructions[i].init(Opcode_BEGIN, Address(1));
+ depth++;
+ cout << "begin ";
+ prev_ldst = false;
+ } else if (i == m_size - 1){
+ if(m_transaction) { // new xact must end with commit
+ m_instructions[i].init(Opcode_COMMIT, Address(1));
+ depth--;
+ cout << "commit ";
+ } else {
+ m_instructions[i].init(Opcode_DONE, Address(1));
+ cout << "done ";
+ }
+ } else {
+ int selectAction;
+ if (!m_transaction) { // non-xact: must choose op
+ selectAction = 1;
+ } else { // xact
+ if (depth == m_size - i) { // must choose commit
+ selectAction = 0;
+ } else if (prev_ldst) { // only choose xact if intervenient ld/st
+ if (m_size - i < depth + 3) { // can choose op or
+ // commit (can't choose
+ // begin)
+ selectAction = (random() % 2);
+ } else if (depth == 0) { // can choose begin or op (can't
+ // choose commit)
+ selectAction = (random() % 2);
+ if (selectAction == 0) selectAction = 2;
+ } else { // can choose begin, op, or commit
+ selectAction = (random() % 3);
+ }
+ } else {
+ selectAction = 1;
+ }
+ }
+
+ physical_address_t address;
+ int selectOpcode;
+ switch (selectAction) {
+ case 0: // commit
+ m_instructions[i].init(Opcode_COMMIT, Address(1));
+ depth--;
+ cout << "commit ";
+ prev_ldst = false;
+ break;
+ case 1:
+ address = (random() % XACT_SIZE);
+ //cout << "address: " << address << endl;
+ //cout << "XACT_SIZE: " << XACT_SIZE << endl;
+
+ //assert(address < XACT_SIZE);
+ //physical_address_t address = 0;
+ selectOpcode = random() % 3;
+ Opcode op;
+ switch(selectOpcode){
+ case 0:
+ op = Opcode_LD;
+ cout << "ld ";
+ break;
+ case 1:
+ op = Opcode_ST;
+ cout << "st ";
+ break;
+ case 2:
+ op = Opcode_INC;
+ cout << "inc ";
+ break;
+ default:
+ assert(false);
+ };
+ assert(op < Opcode_NUM_OPCODES);
+ m_instructions[i].init(op, Address(address));
+ prev_ldst = true;
+ break;
+ case 2:
+ m_instructions[i].init(Opcode_BEGIN, Address(1));
+ depth++;
+ cout << "begin ";
+ prev_ldst = false;
+ break;
+ default:
+ assert(false);
+ };
+ }
+ }
+ cout << endl;
+ if(m_transaction){
+ ASSERT(m_instructions[0].getOpcode() == Opcode_BEGIN);
+ }
+}
+
+XactRequestGenerator::~XactRequestGenerator()
+{
+ if(testArray){
+ delete [] testArray;
+ testArray = NULL;
+ }
+ delete m_instructions;
+}
+
+void XactRequestGenerator::wakeup()
+{
+ DEBUG_EXPR(TESTER_COMP, MedPrio, m_node);
+ DEBUG_EXPR(TESTER_COMP, MedPrio, m_status);
+
+ assert(m_status == XactRequestGeneratorStatus_Ready || m_status == XactRequestGeneratorStatus_Aborted);
+ m_status = XactRequestGeneratorStatus_Blocked;
+
+ m_last_transition = g_eventQueue_ptr->getTime();
+ execute();
+}
+
+void XactRequestGenerator::execute(){
+ cout << "XactRequestGenerator::execute m_node=" << m_node << " m_pc=" << m_pc << " m_size=" << m_size << endl;
+ assert(m_pc >= 0);
+ assert(m_pc < m_size);
+ assert(m_pc < XACT_LENGTH);
+
+ Instruction current = m_instructions[m_pc];
+ switch (current.getOpcode()){
+ case Opcode_BEGIN:
+ cout << " -- begin.";
+ initiateBeginTransaction();
+ break;
+ case Opcode_LD:
+ cout << " -- load: " << current.getAddress();
+ initiateLoad(current.getAddress());
+ break;
+ case Opcode_INC:
+ cout << " -- inc.";
+ initiateInc(current.getAddress());
+ break;
+ case Opcode_ST:
+ cout << " -- store: " << current.getAddress();
+ initiateStore(current.getAddress());
+ break;
+ case Opcode_COMMIT:
+ cout << " -- commit.";
+ initiateCommit();
+ break;
+ case Opcode_DONE:
+ cout << " -- done.";
+ initiateDone();
+ break;
+ default:
+ WARN_EXPR(current.getOpcode());
+ ERROR_MSG("Invalid opcode");
+ };
+ cout << endl;
+}
+
+void XactRequestGenerator::performCallback(NodeID proc, SubBlock& data)
+{
+ cout << "XactRequestGenerator::performCallback m_node=" << m_node << endl;
+ assert(m_status == XactRequestGeneratorStatus_Waiting ||
+ m_status == XactRequestGeneratorStatus_Aborted);
+ assert(proc == m_node);
+
+ Address address = data.getAddress();
+ //assert(address == m_address);
+
+ DEBUG_EXPR(TESTER_COMP, LowPrio, proc);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, m_status);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, address);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, data);
+
+ m_last_transition = g_eventQueue_ptr->getTime();
+
+ cout << " " << m_node << " in performCallback, pc:" << m_pc
+ << ", addr:" << address << endl;
+
+
+ int depth;
+ if(m_status == XactRequestGeneratorStatus_Aborted){
+ depth = transactionManager()->postAbortIndex(0);
+ m_pc = pc_stack[depth];
+ cout << "XactRequestGenerator::performCallback m_node=" << m_node << " setting m_pc=" << m_pc << endl;
+ printPcStack(depth);
+ m_register = 5;
+ m_status = XactRequestGeneratorStatus_Ready;
+ g_eventQueue_ptr->scheduleEvent(this, ABORT_RETRY_TIME);
+ } else {
+ m_status = XactRequestGeneratorStatus_Blocked;
+ bool found, outermost;
+ uint8 value;
+ switch (m_instructions[m_pc].getOpcode()){
+ case Opcode_BEGIN:
+ m_driver.recordTestLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ m_register = 5;
+ m_status = XactRequestGeneratorStatus_Ready;
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ depth = transactionManager()->getDepth(0);
+ if (!transactionManager()->isSubsuming(0)) {
+ pc_stack[depth - 1] = m_pc;
+ cout << "XactRequestGenerator::performCallback m_node=" << m_node << " SETTING PC_STACK" << endl;
+ printPcStack(depth);
+ }
+ m_pc++;
+ break;
+ case Opcode_LD:
+ m_driver.recordTestLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ m_register = data.getByte(0);
+ //cout << " " << m_node << " " << g_eventQueue_ptr->getTime() << " Callback--LD: " << (int) m_register << endl;
+ m_status = XactRequestGeneratorStatus_Ready;
+ g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ m_pc++;
+ break;
+ //case Opcode_INC: // We shouldn't get a callback for this!
+ //m_driver.recordSwapLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+
+ // break;
+ case Opcode_ST:
+ m_driver.recordReleaseLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+ //data.setByte(address.getOffset(), m_register);
+ data.setByte(0, m_register);
+ //cout << " " << m_node << " " << g_eventQueue_ptr->getTime() << " Callback--ST: " << (int) m_register << endl;
+
+ //dataArray[address.getAddress()] = m_register;
+ found = sequencer()->setRubyMemoryValue(address, (char *) (&m_register), 1);
+ assert(found);
+ found = sequencer()->getRubyMemoryValue(address, (char *) (&value), 1);
+ assert(found);
+ assert(value == m_register);
+
+ m_status = XactRequestGeneratorStatus_Ready;
+ g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ m_pc++;
+ break;
+ case Opcode_COMMIT:
+ outermost = transactionManager()->getDepth(0) == 1;
+ if (outermost) { // about to commit outermost
+ m_counter++;
+ }
+
+ cout << " " << m_node << " callback--commit, counter is " << m_counter << " length is: " << g_tester_length << endl;
+ // Check for correctness
+
+ checkCorrectness();
+ transactionManager()->commitTransaction();
+
+ m_driver.recordReleaseLatency(g_eventQueue_ptr->getTime() - m_last_transition);
+
+ if (outermost) {
+ if (m_counter < g_tester_length) {
+ m_last_transition = g_eventQueue_ptr->getTime();
+ //pickAddress(); // Necessary?
+
+ // Create new random transaction
+ cout << "CREATING NEW RANDOM XACT SET" << endl;
+ newTransaction(false);
+ m_pc = 0;
+
+ m_status = XactRequestGeneratorStatus_Ready;
+ g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ } else {
+ cout << " " << m_node << " Done." << endl;
+ m_driver.reportDone();
+ m_status = XactRequestGeneratorStatus_Done;
+ }
+ } else {
+ m_status = XactRequestGeneratorStatus_Ready;
+ g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ m_pc++;
+ }
+ break;
+ default:
+ ERROR_MSG("Invalid Opcode");
+ };
+ }
+}
+
+int XactRequestGenerator::thinkTime() const
+{
+ return g_think_time;
+}
+
+int XactRequestGenerator::waitTime() const
+{
+ return g_wait_time;
+}
+
+int XactRequestGenerator::holdTime() const
+{
+ return g_hold_time;
+}
+
+void XactRequestGenerator::pickAddress()
+{
+ //m_address = m_driver.pickAddress(m_node);
+}
+
+void XactRequestGenerator::initiateBeginTransaction()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "### -- initiating Begin Transaction");
+ cout << "### -- initiating Begin " << m_node << endl;
+ m_status = XactRequestGeneratorStatus_Waiting;
+ sequencer()->makeRequest(CacheMsg(Address(physical_address_t(0)), Address(physical_address_t(0)), CacheRequestType_BEGIN_XACT, Address(m_pc), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false, Address(0), transactionManager()->getTransactionLevel(0), 0, 0 /* only 1 SMT thread */, transactionManager()->getTimestamp(0), transactionManager()->inExposedAction(0), 0));
+ transactionManager()->beginTransaction();
+}
+
+void XactRequestGenerator::initiateStore(Address addr)
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "### -- initiating Store");
+ DEBUG_MSG(TESTER_COMP, MedPrio, addr);
+ cout << "### -- initiating Store " << m_node << endl;
+ m_status = XactRequestGeneratorStatus_Waiting;
+ if(m_transaction){
+ sequencer()->makeRequest(CacheMsg(addr, addr, CacheRequestType_ST_XACT, Address(m_pc), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false, Address(0), transactionManager()->getTransactionLevel(0), 0, 0 /* only 1 SMT thread */, transactionManager()->getTimestamp(0), transactionManager()->inExposedAction(0), 0));
+ } else {
+ sequencer()->makeRequest(CacheMsg(addr, addr, CacheRequestType_ST, Address(m_pc), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false, Address(0), transactionManager()->getTransactionLevel(0), 0, 0 /* only 1 SMT thread */, transactionManager()->getTimestamp(0),transactionManager()->inExposedAction(0), 0));
+ }
+}
+
+void XactRequestGenerator::initiateCommit()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "### -- initiating Commit ");
+ cout << "### -- initiating Commit " << m_node << endl;
+
+ m_status = XactRequestGeneratorStatus_Waiting;
+ sequencer()->makeRequest(CacheMsg(Address(physical_address_t(0)), Address(physical_address_t(0)), CacheRequestType_COMMIT_XACT, Address(m_pc), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false, Address(0), transactionManager()->getTransactionLevel(0), 0, 0 /* only 1 SMT thread */, transactionManager()->getTimestamp(0), transactionManager()->inExposedAction(0), 0));
+}
+
+void XactRequestGenerator::initiateLoad(Address addr)
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "### -- initiating Load ");
+ DEBUG_MSG(TESTER_COMP, MedPrio, addr);
+
+ m_status = XactRequestGeneratorStatus_Waiting;
+ if(m_transaction){
+ cout << "### -- initiating Load XACT" << m_node << endl;
+ sequencer()->makeRequest(CacheMsg(addr, addr, CacheRequestType_LD_XACT, Address(m_pc), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false, Address(0), transactionManager()->getTransactionLevel(0), 0, 0 /* only 1 SMT thread */, transactionManager()->getTimestamp(0), transactionManager()->inExposedAction(0), 0 ));
+ } else {
+ cout << "### -- initiating Load " << m_node << endl;
+ sequencer()->makeRequest(CacheMsg(addr, addr, CacheRequestType_LD, Address(m_pc), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false, Address(0), transactionManager()->getTransactionLevel(0), 0, 0 /* only 1 SMT thread */, transactionManager()->getTimestamp(0), transactionManager()->inExposedAction(0), 0));
+ }
+}
+
+void XactRequestGenerator::initiateInc(Address addr)
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "### -- initiating Inc ");
+ DEBUG_MSG(TESTER_COMP, MedPrio, addr);
+ cout << "### -- initiating Inc " << m_node << endl;
+ m_register++;
+ m_status = XactRequestGeneratorStatus_Ready;
+ g_eventQueue_ptr->scheduleEvent(this, holdTime());
+ m_pc++;
+}
+
+void XactRequestGenerator::initiateDone()
+{
+ DEBUG_MSG(TESTER_COMP, MedPrio, "### -- initiating Done ");
+ cout << "### -- initiating Done " << m_node << endl;
+ newTransaction(false);
+ m_pc = 0;
+ //m_register = 5;
+
+ m_status = XactRequestGeneratorStatus_Ready;
+ g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+
+}
+
+void XactRequestGenerator::checkCorrectness(){
+ // Execute the transaction on the test array
+ int testPC = 0;
+ bool done = false;
+ for(int i=0; i<XACT_LENGTH && !done; ++i){
+ Opcode op = m_instructions[i].getOpcode();
+ Address addr = m_instructions[i].getAddress();
+
+ uint8 reg_val;
+ switch(op){
+ case Opcode_BEGIN:
+ reg_val = 0;
+ break; // do nothing
+ case Opcode_LD:
+ //cout << "\tcheckCorrectness " << m_node << " LD: " << addr << " address = " << hex << addr.getAddress() << endl;
+ ASSERT(addr.getAddress() < g_MEMORY_SIZE_BYTES);
+ reg_val = testArray[addr.getAddress()];
+ //cout << m_node << " LD: " << addr << ", " << (int) reg_val << endl;
+ break;
+ case Opcode_INC:
+ reg_val++;
+ //cout << m_node << " INC: " << (int) reg_val << endl;
+ break;
+ case Opcode_ST:
+ //cout << "\tcheckCorrectness " << m_node << " ST: " << addr << " address = " << hex << addr.getAddress() << endl;
+ ASSERT(addr.getAddress() < g_MEMORY_SIZE_BYTES);
+ testArray[addr.getAddress()] = reg_val;
+ //cout << m_node << " ST: " << addr << ", " << (int) reg_val << endl;
+ break;
+ case Opcode_DONE:
+ case Opcode_COMMIT:
+ done = true;
+ break;
+ default:
+ ERROR_MSG("Invalid Opcode.");
+ };
+ }
+
+ bool success = true;
+ uint8 ruby_value;
+ done = false;
+ bool found = false;
+ for(int i=0; i<XACT_LENGTH && !done; ++i){
+ Opcode op = m_instructions[i].getOpcode();
+ Address addr = m_instructions[i].getAddress();
+
+ uint8 reg_val;
+ switch(op){
+ case Opcode_BEGIN:
+ case Opcode_INC:
+ break; // do nothing
+ case Opcode_LD:
+ case Opcode_ST:
+ found = sequencer()->getRubyMemoryValue(m_instructions[i].getAddress(), (char *) &ruby_value, 1);
+ assert(found);
+
+ if (ruby_value != testArray[i]){
+ success = false;
+ WARN_MSG("DATA MISMATCH!");
+ WARN_EXPR((int) ruby_value);
+ WARN_EXPR((int) testArray[i]);
+ WARN_EXPR(i);
+ assert(success);
+ }
+
+ break;
+ case Opcode_COMMIT:
+ done = true;
+ break;
+ default:
+ ERROR_MSG("Invalid Opcode.");
+ };
+ }
+ cout << m_node << " CORRECT!" << endl;
+}
+
+Sequencer* XactRequestGenerator::sequencer() const
+{
+ return g_system_ptr->getChip(m_node/RubyConfig::numberOfProcsPerChip())->getSequencer(m_node%RubyConfig::numberOfProcsPerChip());
+}
+
+TransactionManager* XactRequestGenerator::transactionManager() const
+{
+ return g_system_ptr->getChip(m_node/RubyConfig::numberOfProcsPerChip())->getTransactionManager(m_node%RubyConfig::numberOfProcsPerChip());
+}
+
+void XactRequestGenerator::print(ostream& out) const
+{
+}
+
+void XactRequestGenerator::abortTransaction(){
+ cout << " " << m_node << " *** ABORT! ***" << endl;
+ m_status = XactRequestGeneratorStatus_Aborted;
+}
+
+void XactRequestGenerator::printPcStack(int depth) {
+ cout << "XactRequestGenerator::printPcStack m_node=" << m_node << " [";
+ for (int i = 0; i < depth; i++) {
+ cout << pc_stack[i] << ", ";
+ }
+ cout << "]" << endl;
+}
+
+void XactRequestGenerator::notifySendNack( const Address & physicalAddr, uint64 remote_timestamp, const MachineID & remote_id) {
+ Address addr = physicalAddr;
+ addr.makeLineAddress();
+ TransactionManager * xact_mgr = transactionManager();
+ bool isOlder = xact_mgr->isOlder(remote_timestamp, remote_id, addr );
+ if(isOlder){
+ bool inReadSet = xact_mgr->isInReadSetFilter(physicalAddr, 0);
+ bool inWriteSet = xact_mgr->isInWriteSetFilter(physicalAddr, 0);
+ // addr should be in perfect or Bloom filter
+ ASSERT( inReadSet || inWriteSet );
+ cout << "notifySendNack addr = " << addr << " setting POSSIBLE CYCLE " << " my_ts = " << xact_mgr->getTimestamp(0) << " remote_ts = " << remote_timestamp << " proc = " << m_node << endl;
+ xact_mgr->setPossibleCycle(addr, L1CacheMachIDToProcessorNum(remote_id), 0 /*remote thread*/ , remote_timestamp, 0 /*our thread*/);
+ }
+ // otherwise don't set the proc possible cycle flag
+}
+
+void XactRequestGenerator::notifyReceiveNack( const Address & physicalAddr, uint64 remote_timestamp, const MachineID & remote_id ) {
+ Address addr = physicalAddr;
+ addr.makeLineAddress();
+ // check whether the possible cycle is set, and whether remote_timestamp is older.
+ // we only have 1 SMT thread
+ TransactionManager * xact_mgr = transactionManager();
+ int local_timestamp = xact_mgr->getTimestamp(0);
+ bool possible_cycle = xact_mgr->possibleCycle(0);
+ // calculate isOlder() only if possible_cycle is set. This is because isOlder assumes proc is in a transaction
+ bool isOlder = false;
+ if(possible_cycle){
+ isOlder = xact_mgr->isOlder(remote_timestamp, remote_id, addr );
+ }
+
+ if(isOlder && possible_cycle){
+ // set our pendingAbort flag
+ cout << "notifyReceiveNack Setting Abort Pending Flag addr= " << addr << " ID = " << m_node << " possible_cycle = " << possible_cycle << " time = " << g_eventQueue_ptr->getTime() << endl;
+ m_abortPending = true;
+ assert(possible_cycle);
+ assert(isOlder);
+ }
+
+ // profile this nack
+ xact_mgr->profileNack(addr, IDToInt(L1CacheMachIDToProcessorNum(remote_id)), remote_timestamp, 0, 0);
+}
+
+void XactRequestGenerator::notifyReceiveNackFinal(const Address & physicalAddr) {
+ Address addr = physicalAddr;
+ addr.makeLineAddress();
+ TransactionManager * xact_mgr = transactionManager();
+ int local_timestamp = xact_mgr->getTimestamp(0);
+ bool possible_cycle = xact_mgr->possibleCycle(0);
+
+ // we should still have an active request
+ if(m_abortPending){
+ cout << "notifyReceiveNackFinal ABORTING addr= " << addr << " ID = " << m_node << " possible_cycle = " << possible_cycle << " time = " << g_eventQueue_ptr->getTime() << endl;
+ assert(possible_cycle);
+
+ // we abort
+ // Step 1: remove the aborting request from sequencer, and mark it as "Trap" in our request table if needed
+ // Note: by marking request as "Trap" we can simulate HW abort delay
+ switch (m_instructions[m_pc].getOpcode()){
+ case Opcode_LD:
+ sequencer()->readCallbackAbort(addr, 0);
+ break;
+ case Opcode_ST:
+ sequencer()->writeCallbackAbort(addr, 0);
+ break;
+ default:
+ cout << "Invalid Opcode = " << m_instructions[m_pc].getOpcode() << endl;
+ ASSERT(0);
+ };
+ // Step 2: call the abort handler explicitly. If using software handler + trap aborts we wait until
+ // Simics transfers control to us again
+ // Note: it is impossible for this request to be a prefetch, so don't need that check
+ xact_mgr->abortTransaction(0, 0, -1 /*dummy remote ID*/, addr);
+ //reset the abort flag
+ m_abortPending = false;
+ }
+ else{
+ // retry the request
+ // figure out whether to retry transactional load or store
+ switch (m_instructions[m_pc].getOpcode()){
+ case Opcode_LD:
+ cout << "RETRYING LOAD " << addr << " of proc = " << m_node << " transactionLevel = " << xact_mgr->getTransactionLevel(0) << " time = " << g_eventQueue_ptr->getTime() << endl;
+ sequencer()->issueInterProcLoadRetryRequest(addr, 0);
+ break;
+ case Opcode_ST:
+ cout << "RETRYING STORE " << addr << " of proc = " << m_node << " transactionLevel = " << xact_mgr->getTransactionLevel(0) << " time = " << g_eventQueue_ptr->getTime() << endl;
+ sequencer()->issueInterProcStoreRetryRequest(addr, 0);
+ break;
+ default:
+ cout << "Invalid Opcode = " << m_instructions[m_pc].getOpcode() << endl;
+ ASSERT(0);
+ };
+ }
+}
diff --git a/src/mem/ruby/tester/XactRequestGenerator.hh b/src/mem/ruby/tester/XactRequestGenerator.hh
new file mode 100644
index 000000000..826a257ce
--- /dev/null
+++ b/src/mem/ruby/tester/XactRequestGenerator.hh
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 1999 by Mark Hill and David Wood for the Wisconsin
+ * Multifacet Project. ALL RIGHTS RESERVED.
+ *
+ * ##HEADER##
+ *
+ * This software is furnished under a license and may be used and
+ * copied only in accordance with the terms of such license and the
+ * inclusion of the above copyright notice. This software or any
+ * other copies thereof or any derivative works may not be provided or
+ * otherwise made available to any other persons. Title to and
+ * ownership of the software is retained by Mark Hill and David Wood.
+ * Any use of this software must include the above copyright notice.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS". THE LICENSOR MAKES NO
+ * WARRANTIES ABOUT ITS CORRECTNESS OR PERFORMANCE.
+ * */
+
+/*
+ * $Id: XactRequestGenerator.h 1.4 05/07/07 10:35:32-05:00 kmoore@s0-30.cs.wisc.edu $
+ *
+ * Description:
+ *
+ */
+
+#ifndef XACTREQUESTGENERATOR_H
+#define XACTREQUESTGENERATOR_H
+
+#include "global.hh"
+#include "RequestGenerator.hh"
+#include "Consumer.hh"
+#include "NodeID.hh"
+#include "Address.hh"
+#include "TransactionManager.hh"
+
+class Sequencer;
+class SubBlock;
+class SyntheticDriver;
+class Instruction;
+class TransactionManager;
+
+#define MAX_ADDRESS 16777216
+const int TESTER_MAX_DEPTH = 16;
+
+enum XactRequestGeneratorStatus {
+ XactRequestGeneratorStatus_Waiting,
+ XactRequestGeneratorStatus_Ready,
+ XactRequestGeneratorStatus_Blocked,
+ XactRequestGeneratorStatus_Aborted,
+ XactRequestGeneratorStatus_Done
+};
+
+class XactRequestGenerator : public RequestGenerator {
+public:
+ // Constructors
+ XactRequestGenerator(NodeID node, SyntheticDriver& driver);
+
+ // Destructor
+ ~XactRequestGenerator();
+
+ // Public Methods
+ void wakeup();
+ void performCallback(NodeID proc, SubBlock& data);
+ void abortTransaction();
+
+ void print(ostream& out) const;
+
+ // For dealing with NACKs/retries
+ void notifySendNack(const Address & addr, uint64 remote_timestamp, const MachineID & remote_id);
+ void notifyReceiveNack(const Address & addr, uint64 remote_timestamp, const MachineID & remote_id);
+ void notifyReceiveNackFinal(const Address & addr);
+private:
+ // Private Methods
+ int thinkTime() const;
+ int waitTime() const;
+ int holdTime() const;
+ void initiateBeginTransaction();
+ void initiateStore(Address a);
+ void initiateCommit();
+ void initiateInc(Address a);
+ void initiateLoad(Address a);
+ void initiateDone();
+ void pickAddress();
+ Sequencer* sequencer() const;
+ TransactionManager* transactionManager() const;
+ void execute();
+ void scheduleEvent(int time);
+ void checkCorrectness();
+
+ // Private copy constructor and assignment operator
+ XactRequestGenerator(const XactRequestGenerator& obj);
+ XactRequestGenerator& operator=(const XactRequestGenerator& obj);
+
+ void newTransaction(bool init);
+ void printPcStack(int depth);
+
+ // Data Members (m_ prefix)
+ SyntheticDriver& m_driver;
+ NodeID m_node;
+ XactRequestGeneratorStatus m_status;
+ int m_counter;
+ int m_size;
+ Time m_last_transition;
+ Address m_address;
+
+ Instruction *m_instructions;
+ int m_pc;
+ int pc_stack[TESTER_MAX_DEPTH];
+ bool m_transaction;
+ uint8 m_register;
+ uint8 * testArray;
+ //static uint8 dataArray[];
+ bool m_eventPending;
+
+ // for pending aborts
+ bool m_abortPending;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const XactRequestGenerator& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const XactRequestGenerator& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //XACTREQUESTGENERATOR_H
+
diff --git a/src/mem/ruby/tester/main.cc b/src/mem/ruby/tester/main.cc
new file mode 100644
index 000000000..35b927f5e
--- /dev/null
+++ b/src/mem/ruby/tester/main.cc
@@ -0,0 +1,51 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "main.hh"
+#include "EventQueue.hh"
+#include "RubyConfig.hh"
+#include "test_framework.hh"
+
+// *******************
+// *** tester main ***
+// *******************
+
+int main(int argc, char *argv[])
+{
+ if (g_SIMICS) {
+ ERROR_MSG("g_SIMICS should not be defined.");
+ }
+
+ tester_main(argc, argv);
+}
diff --git a/src/mem/ruby/tester/main.hh b/src/mem/ruby/tester/main.hh
new file mode 100644
index 000000000..05e3a0e8d
--- /dev/null
+++ b/src/mem/ruby/tester/main.hh
@@ -0,0 +1,42 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef MAIN_H
+#define MAIN_H
+
+#include "Global.hh"
+
+#endif //MAIN_H
diff --git a/src/mem/ruby/tester/test_framework.cc b/src/mem/ruby/tester/test_framework.cc
new file mode 100644
index 000000000..0f180409e
--- /dev/null
+++ b/src/mem/ruby/tester/test_framework.cc
@@ -0,0 +1,431 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "protocol_name.hh"
+#include "test_framework.hh"
+#include "System.hh"
+#include "OpalInterface.hh"
+#include "init.hh"
+#include "Tester.hh"
+#include "EventQueue.hh"
+#include "getopt.hh"
+#include "Network.hh"
+#include "CacheRecorder.hh"
+#include "Tracer.hh"
+
+using namespace std;
+#include <string>
+#include <map>
+
+// Maurice
+// extern "C" {
+// #include "simics/api.hh"
+// };
+
+#include "confio.hh"
+#include "initvar.hh"
+
+// A generated file containing the default tester parameters in string form
+// The defaults are stored in the variables
+// global_default_param and global_default_tester_param
+#include "default_param.hh"
+#include "tester_param.hh"
+
+static void parseOptions(int argc, char **argv);
+static void usageInstructions();
+static void checkArg(char ch);
+static void tester_record_cache();
+static void tester_playback_trace();
+static void tester_initialize(int argc, char **argv);
+static void tester_destroy();
+
+static string trace_filename;
+char * my_default_param;
+initvar_t * my_initvar;
+
+void tester_main(int argc, char **argv)
+{
+ tester_initialize(argc, argv);
+
+ if (trace_filename != "") {
+ // playback a trace (for multicast-mask prediction)
+ tester_playback_trace();
+ } else {
+ // test code to create a trace
+ if (!(g_SYNTHETIC_DRIVER || g_DETERMINISTIC_DRIVER) && trace_filename == "") {
+ g_system_ptr->getTracer()->startTrace("ruby.trace.gz");
+ g_eventQueue_ptr->triggerEvents(g_eventQueue_ptr->getTime() + 10000);
+ g_system_ptr->getTracer()->stopTrace();
+ }
+
+ g_eventQueue_ptr->triggerAllEvents();
+
+ // This call is placed here to make sure the cache dump code doesn't fall victim to code rot
+ if (!(g_SYNTHETIC_DRIVER || g_DETERMINISTIC_DRIVER)) {
+ tester_record_cache();
+ }
+ }
+ tester_destroy();
+}
+
+static void tester_allocate( void )
+{
+ init_simulator();
+}
+
+static void tester_generate_values( void )
+{
+}
+
+void tester_initialize(int argc, char **argv)
+{
+ int param_len = strlen( global_default_param ) + strlen( global_default_tester_param ) + 1;
+ char *default_param = (char *) malloc( sizeof(char) * param_len );
+ my_default_param = default_param;
+ strcpy( default_param, global_default_param );
+ strcat( default_param, global_default_tester_param );
+
+ // when the initvar object is created, it reads the configuration default
+ // -for the tester, the configuration defaults in config/tester.defaults
+
+ /** note: default_param is included twice in the tester:
+ * -once in init.C
+ * -again in this file
+ */
+ initvar_t *ruby_initvar = new initvar_t( "ruby", "../../../ruby/",
+ default_param,
+ &tester_allocate,
+ &tester_generate_values,
+ NULL,
+ NULL );
+ my_initvar = ruby_initvar;
+ ruby_initvar->checkInitialization();
+ parseOptions(argc, argv);
+
+ ruby_initvar->allocate();
+
+ g_system_ptr->printConfig(cout);
+ cout << "Testing clear stats...";
+ g_system_ptr->clearStats();
+ cout << "Done." << endl;
+ //free( default_param );
+ //delete ruby_initvar;
+}
+
+void tester_destroy()
+{
+ g_system_ptr->printStats(cout);
+ g_debug_ptr->closeDebugOutputFile();
+
+ free(my_default_param);
+ delete my_initvar;
+ // Clean up
+ destroy_simulator();
+ cerr << "Success: " << CURRENT_PROTOCOL << endl;
+}
+
+void tester_install_opal(mf_opal_api_t* opal_api, mf_ruby_api_t* ruby_api)
+{
+ // initialize our api interface
+ OpalInterface::installInterface(ruby_api);
+
+ // update the OpalInterface object to point to opal's interface
+ ((OpalInterface *) g_system_ptr->getDriver())->setOpalInterface(opal_api);
+}
+
+void tester_record_cache()
+{
+ cout << "Testing recording of cache contents" << endl;
+ CacheRecorder recorder;
+ g_system_ptr->recordCacheContents(recorder);
+ int written = recorder.dumpRecords("ruby.caches.gz");
+ int read = Tracer::playbackTrace("ruby.caches.gz");
+ assert(read == written);
+ cout << "Testing recording of cache contents completed" << endl;
+}
+
+void tester_playback_trace()
+{
+ assert(trace_filename != "");
+ cout << "Reading trace from file '" << trace_filename << "'..." << endl;
+ int read = Tracer::playbackTrace(trace_filename);
+ cout << "(" << read << " requests read)" << endl;
+ if (read == 0) {
+ ERROR_MSG("Zero items read from tracefile.");
+ }
+}
+
+// ************************************************************************
+// *** Functions for parsing the command line parameters for the tester ***
+// ************************************************************************
+
+static struct option const long_options[] =
+{
+ {"help", no_argument, NULL, 'h'},
+ {"processors", required_argument, NULL, 'p'},
+ {"length", required_argument, NULL, 'l'},
+ {"random", required_argument, NULL, 'r'},
+ {"trace_input", required_argument, NULL, 'z'},
+ {"component", required_argument, NULL, 'c'},
+ {"verbosity", required_argument, NULL, 'v'},
+ {"debug_output_file", required_argument, NULL, 'o'},
+ {"start", required_argument, NULL, 's'},
+ {"bandwidth", required_argument, NULL, 'b'},
+ {"threshold", required_argument, NULL, 't'},
+ {"think_time", required_argument, NULL, 'k'},
+ {"locks", required_argument, NULL, 'q'},
+ {"network", required_argument, NULL, 'n'},
+ {"procs_per_chip", required_argument, NULL, 'a'},
+ {"l2_caches", required_argument, NULL, 'e'},
+ {"memories", required_argument, NULL, 'm'},
+ {NULL, 0, NULL, 0}
+};
+
+static void parseOptions(int argc, char **argv)
+{
+ cout << "Parsing command line arguments:" << endl;
+
+ // construct the short arguments string
+ int counter = 0;
+ string short_options;
+ while (long_options[counter].name != NULL) {
+ short_options += char(long_options[counter].val);
+ if (long_options[counter].has_arg == required_argument) {
+ short_options += char(':');
+ }
+ counter++;
+ }
+
+ char c;
+ /* Parse command line options. */
+ bool error;
+ while ((c = getopt_long (argc, argv, short_options.c_str(), long_options, (int *) 0)) != EOF) {
+ switch (c) {
+ case 0:
+ break;
+
+ case 'c':
+ checkArg(c);
+ cout << " component filter string = " << optarg << endl;
+ error = Debug::checkFilterString( optarg );
+ if (error) {
+ usageInstructions();
+ }
+ DEBUG_FILTER_STRING = strdup( optarg );
+ break;
+
+ case 'h':
+ usageInstructions();
+ break;
+
+ case 'v':
+ checkArg(c);
+ cout << " verbosity string = " << optarg << endl;
+ error = Debug::checkVerbosityString(optarg);
+ if (error) {
+ usageInstructions();
+ }
+ DEBUG_VERBOSITY_STRING = strdup( optarg );
+ break;
+
+ case 'r': {
+ checkArg(c);
+ if (string(optarg) == "random") {
+ g_RANDOM_SEED = time(NULL);
+ } else {
+ g_RANDOM_SEED = atoi(optarg);
+ if (g_RANDOM_SEED == 0) {
+ usageInstructions();
+ }
+ }
+ break;
+ }
+
+ case 'l': {
+ checkArg(c);
+ g_tester_length = atoi(optarg);
+ cout << " length of run = " << g_tester_length << endl;
+ if (g_tester_length == 0) {
+ usageInstructions();
+ }
+ break;
+ }
+
+ case 'q': {
+ checkArg(c);
+ g_synthetic_locks = atoi(optarg);
+ cout << " locks in synthetic workload = " << g_synthetic_locks << endl;
+ if (g_synthetic_locks == 0) {
+ usageInstructions();
+ }
+ break;
+ }
+
+ case 'p': {
+ checkArg(c);
+ g_NUM_PROCESSORS = atoi(optarg);
+ break;
+ }
+
+ case 'a': {
+ checkArg(c);
+ g_PROCS_PER_CHIP = atoi(optarg);
+ cout << " g_PROCS_PER_CHIP: " << g_PROCS_PER_CHIP << endl;
+ break;
+ }
+
+ case 'e': {
+ checkArg(c);
+ g_NUM_L2_BANKS = atoi(optarg);
+ cout << " g_NUM_L2_BANKS: " << g_NUM_L2_BANKS << endl;
+ break;
+ }
+
+ case 'm': {
+ checkArg(c);
+ g_NUM_MEMORIES = atoi(optarg);
+ cout << " g_NUM_MEMORIES: " << g_NUM_MEMORIES << endl;
+ break;
+ }
+
+ case 's': {
+ checkArg(c);
+ long long start_time = atoll(optarg);
+ cout << " debug start cycle = " << start_time << endl;
+ if (start_time == 0) {
+ usageInstructions();
+ }
+ DEBUG_START_TIME = start_time;
+ break;
+ }
+
+ case 'b': {
+ checkArg(c);
+ int bandwidth = atoi(optarg);
+ cout << " bandwidth per link (MB/sec) = " << bandwidth << endl;
+ g_endpoint_bandwidth = bandwidth;
+ if (bandwidth == 0) {
+ usageInstructions();
+ }
+ break;
+ }
+
+ case 't': {
+ checkArg(c);
+ g_bash_bandwidth_adaptive_threshold = atof(optarg);
+ if ((g_bash_bandwidth_adaptive_threshold > 1.1) || (g_bash_bandwidth_adaptive_threshold < -0.1)) {
+ cerr << "Error: Bandwidth adaptive threshold must be between 0.0 and 1.0" << endl;
+ usageInstructions();
+ }
+
+ break;
+ }
+
+ case 'k': {
+ checkArg(c);
+ g_think_time = atoi(optarg);
+ break;
+ }
+
+ case 'o':
+ checkArg(c);
+ cout << " output file = " << optarg << endl;
+ DEBUG_OUTPUT_FILENAME = strdup( optarg );
+ break;
+
+ case 'z':
+ checkArg(c);
+ trace_filename = string(optarg);
+ cout << " tracefile = " << trace_filename << endl;
+ break;
+
+ case 'n':
+ checkArg(c);
+ cout << " topology = " << string(optarg) << endl;
+ g_NETWORK_TOPOLOGY = strdup(optarg);
+ break;
+
+ default:
+ cerr << "parameter '" << c << "' unknown" << endl;
+ usageInstructions();
+ }
+ }
+
+ if ((trace_filename != "") || (g_tester_length != 0)) {
+ if ((trace_filename != "") && (g_tester_length != 0)) {
+ cerr << "Error: both a run length (-l) and a trace file (-z) have been specified." << endl;
+ usageInstructions();
+ }
+ } else {
+ cerr << "Error: either run length (-l) must be > 0 or a trace file (-z) must be specified." << endl;
+ usageInstructions();
+ }
+}
+
+static void usageInstructions()
+{
+ cerr << endl;
+ cerr << "Options:" << endl;
+
+ // print options
+ int counter = 0;
+ while (long_options[counter].name != NULL) {
+ cerr << " -" << char(long_options[counter].val);
+ if (long_options[counter].has_arg == required_argument) {
+ cerr << " <arg>";
+ }
+ cerr << " --" << long_options[counter].name;
+ if (long_options[counter].has_arg == required_argument) {
+ cerr << " <arg>";
+ }
+ cerr << endl;
+ counter++;
+ }
+
+ cerr << "Option --processors (-p) is required." << endl;
+ cerr << "Either option --length (-l) or --trace_input (-z) must be specified." << endl;
+ cerr << endl;
+ g_debug_ptr->usageInstructions();
+ cerr << endl;
+
+ exit(1);
+}
+
+static void checkArg(char ch)
+{
+ if (optarg == NULL) {
+ cerr << "Error: parameter '" << ch << "' missing required argument" << endl;
+ usageInstructions();
+ }
+}
diff --git a/src/mem/ruby/tester/test_framework.hh b/src/mem/ruby/tester/test_framework.hh
new file mode 100644
index 000000000..7464cc274
--- /dev/null
+++ b/src/mem/ruby/tester/test_framework.hh
@@ -0,0 +1,46 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef TESTFRAMEWORK_H
+#define TESTFRAMEWORK_H
+
+#include "Global.hh"
+#include "mf_api.hh"
+
+void tester_main(int argc, char **argv);
+void tester_install_opal( mf_opal_api_t *opal_api, mf_ruby_api_t *ruby_api );
+
+#endif //TESTFRAMEWORK_H
diff --git a/src/mem/slicc/README b/src/mem/slicc/README
new file mode 100644
index 000000000..fb7f52dac
--- /dev/null
+++ b/src/mem/slicc/README
@@ -0,0 +1,114 @@
+Overview
+========
+This is SLICC, a domain specific language to specify cache coherence protocol
+we have developed in Multifacet group.
+
+It is developed by Milo Martin <milo@cs.wisc.edu>
+This document is prepared by Min Xu <mxu@cae.wisc.edu> while I am learning the
+system. With minor correctness updates by Brad Beckmann <beckmann@cs.wisc.edu>
+
+It can be used to generate C++ code that works with RUBY cache simulator as
+well as generate HTML and other document to describe the target protocol.
+
+Some user document is available in doc directory.
+
+Tech details
+============
+SLICC take a text input with similar syntax to C++ language and use the lexer
+and parser in parser directory to construct a Abstract Syntax Tree (AST)
+internally. After having done this first pass, the AST is traversed to fill
+several interval table, such as symbol table, type table, etc. Finally the code
+is generated by traversing the tree once again.
+
+Note, by Milo's good coding habit, almost all C++ class define their private
+copy/assignment constructor. This prevents accidentally copying/assigning an
+object by its address.
+
+The AST basically looks like a hierarchical representation of the text input.
+At the highest level, it has the "Machine", each Machine has several "states"
+and "events" and "actions" and "transistions".
+
+Since the language is domain specific, many assumptions of the target system is
+hardcoded in SLICC. For example, ruby would expect the generated code for each
+system node, has the following components:
+ processor(sequencer, not generated?)
+ cache
+ directory (with memory block value, only when compiled with tester)
+ network interface (NI)
+
+Directory generator/ contains routines to generate HTML/MIF format output.
+fileio.[Ch] has a routine to conditionally write a file only when the original
+content of the file is different from what is going to be written, this avoid
+re-make those file after regenerate the protocol. html_gen.[Ch] contains the
+symbol name munge and index page generation. mif_gen.[Ch] contains the entire
+MIF output generation routine, mainly a table buildup.
+
+Directory symbol/ contains classes to represent symbols in the slicc input
+file. Base class is "Symbol". Derived subclasses are "Action Event Func State
+StateMachine Transition Type Var". "Symbol" has knowledge about its locations
+in the source file and short name, long name. "SymbolTable" is a list of
+symbols and g_sym_table is the global SymbolTable of the slicc system.
+One can query a SymbolTable by symbol's id. Also SymbolTable is responsible for
+keeping track of Symbol's declaration in correct scope. The current
+implementation uses a stack which dynamically determine the scope of symbol
+lookups. Global scope is at bottom of the stack (vector[0]). SymbolTable is
+also the main place to write out the generated C++/HTML/MIF files.
+SymbolTable::writeNodeFiles() is one of the place to look for hardcoded C++
+code for node.[Ch]. And Type.[Ch] is the place where generating enumeration and
+Message/NetworkMessage declaration and implementation. Func.[Ch] is used to
+generate function of the class Chip. StateMachine.[Ch] wrap the whole thing
+up by putting States, Actions, Events together. It actually has a two dimension
+table like the one represented in the HTML output. Actions are indexed with
+the initial state and observed event. After the tabel being built, the
+StateMachine class can write out Transitions/Controller/wakeup_logic into C++
+outputs. Finally, in symbol directory, Var.[Ch] seem to incomplete?
+
+Demystify all those "predefined" external types, like "Address". Where are
+they defined? They are in ../protocol/RubySlicc-*.sm and
+../protocol/RubySlicc_interfaces.slicc is include in the slicc invocation
+command in ../ruby/Makefile.
+
+Another myth: "trigger" method is hardcoded in ast/InPortDeclAST.C and
+ast/FuncCallExprAST.C. The function is similar to inlined function in the
+output generated code, so you cannot find any occurance of string "trigger" in
+the generated code. "trigger" also increment a counter that is checked every
+time a transition is done. In one ruby cycle, only TRANSITIONS_PER_RUBY_CYCLE
+number of transitions can be done. ast/FuncCallExprAST.C also contains some
+code for function "error" and "assert" and "DEBUG_EXPR", all in the same
+manner. Ruby always issues transitions from the first port while there is any.
+Stalled transition in Ruby does not consume a sub-cycle. This models the
+hardware that probe all port in parallel, pick one transition from the highest
+priority queue if the transistion was not stalled by any resources constraint.
+
+Another note: scheduleEvent() call of ruby make sure a consumer is woken up at
+specified cycle, and only once per cycle.
+
+Action z_stall, where is it? It is hardcoded in symbols/StateMachine.C. In
+function StateMachine::printCSwitch(), z_stall cause the generated code return
+TransitionResult_ProtocolStall. Also the HTML output for z_stall has to be
+consequently hardcoded. I am not sure that's really a good idea or not. :-)
+
+Question: How comes there is no "for" loop statement in slicc?
+Answer: Been there, done that. That is easy to add, first of all. But unbound
+loop make slicc eventually un-synthesizable. We want to avoid that. If you want
+to loop through a bounded array do something, make the action done in a
+external interface in RubySlicc_Util.h. Inside, you just pass the vector as
+parameter to the external interface to achieve the same effects.
+
+Another bad thing of using loop statement like for is that we can not determine
+how many buffer space to allocate before the transition. With a vector, if it
+easy to understand we can always allocate the worst case number of hardware
+resources.
+
+Question: Wait! It seems statement check_allocate does nothing!
+Answer: No, it does call areNSoltsAvailable() function of the object before any
+statement is executed in one action. It does *NOT* generate code in its
+original place in the code, instead, it scan the body of the action code and
+determine how many slots are needed to allocated before hand. So the
+transaction is all done or nothing done. I had tried to make all actions return
+boolean values and the false return cause a transition to abort with
+ResourceStall. But it is later on deemed to be too flexible in its semantics.
+We should never introduce control flow inside the transitions, so that each
+transition is either "all" or "nothing". Just that simple. BTW, if you call
+check_allocate twice, areNSoltsAvailable(2) is generated, three times generates
+areNSoltsAvailable(3), etc.
diff --git a/src/mem/slicc/ast/AST.cc b/src/mem/slicc/ast/AST.cc
new file mode 100644
index 000000000..9342cd2e8
--- /dev/null
+++ b/src/mem/slicc/ast/AST.cc
@@ -0,0 +1,39 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * AST.C
+ *
+ * Description: See AST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "AST.hh"
diff --git a/src/mem/slicc/ast/AST.hh b/src/mem/slicc/ast/AST.hh
new file mode 100644
index 000000000..b20bbebe2
--- /dev/null
+++ b/src/mem/slicc/ast/AST.hh
@@ -0,0 +1,94 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * AST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef AST_H
+#define AST_H
+
+#include "slicc_global.hh"
+#include "Vector.hh"
+#include "Map.hh"
+#include "Location.hh"
+#include "SymbolTable.hh"
+
+class AST {
+public:
+ // Constructors
+ AST(Map<string, string> pairs) { m_pairs = pairs; };
+ AST() {};
+
+ // Destructor
+ virtual ~AST() {};
+
+ // Public Methods
+ virtual void print(ostream& out) const = 0;
+ void error(string err_msg) const { m_location.error(err_msg); };
+ string embedError(string err_msg) const { return m_location.embedError(err_msg); };
+ void warning(string err_msg) const { m_location.warning(err_msg); };
+
+ const Location& getLocation() const { return m_location; };
+
+ const Map<string, string>& getPairs() const { return m_pairs; };
+ Map<string, string>& getPairs() { return m_pairs; };
+
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ // AST(const AST& obj);
+ // AST& operator=(const AST& obj);
+
+ // Data Members (m_ prefix)
+ Location m_location;
+ Map<string, string> m_pairs;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const AST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const AST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //AST_H
diff --git a/src/mem/slicc/ast/ASTs.hh b/src/mem/slicc/ast/ASTs.hh
new file mode 100644
index 000000000..77b055a80
--- /dev/null
+++ b/src/mem/slicc/ast/ASTs.hh
@@ -0,0 +1,90 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#ifndef ASTs_H
+#define ASTs_H
+
+#include "slicc_global.hh"
+#include "main.hh"
+#include "StateMachine.hh"
+#include "AST.hh"
+
+#include "MachineAST.hh"
+
+#include "TypeAST.hh"
+#include "FormalParamAST.hh"
+
+#include "DeclListAST.hh"
+#include "DeclAST.hh"
+#include "ActionDeclAST.hh"
+#include "InPortDeclAST.hh"
+#include "OutPortDeclAST.hh"
+#include "TransitionDeclAST.hh"
+#include "EnumDeclAST.hh"
+#include "TypeDeclAST.hh"
+#include "ObjDeclAST.hh"
+#include "FuncDeclAST.hh"
+
+#include "TypeFieldAST.hh"
+#include "TypeFieldMethodAST.hh"
+#include "TypeFieldMemberAST.hh"
+#include "TypeFieldEnumAST.hh"
+
+#include "PairAST.hh"
+#include "PairListAST.hh"
+
+#include "ExprAST.hh"
+#include "VarExprAST.hh"
+#include "EnumExprAST.hh"
+#include "LiteralExprAST.hh"
+#include "MemberExprAST.hh"
+#include "InfixOperatorExprAST.hh"
+#include "FuncCallExprAST.hh"
+#include "MethodCallExprAST.hh"
+
+#include "ChipComponentAccessAST.hh"
+
+#include "StatementListAST.hh"
+#include "StatementAST.hh"
+#include "ExprStatementAST.hh"
+#include "AssignStatementAST.hh"
+#include "EnqueueStatementAST.hh"
+#include "IfStatementAST.hh"
+#include "PeekStatementAST.hh"
+#include "CopyHeadStatementAST.hh"
+#include "CheckAllocateStatementAST.hh"
+#include "CheckStopSlotsStatementAST.hh"
+#include "ReturnStatementAST.hh"
+
+#endif //ASTs_H
diff --git a/src/mem/slicc/ast/ActionDeclAST.cc b/src/mem/slicc/ast/ActionDeclAST.cc
new file mode 100644
index 000000000..6514b9afd
--- /dev/null
+++ b/src/mem/slicc/ast/ActionDeclAST.cc
@@ -0,0 +1,96 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ActionDeclAST.C
+ *
+ * Description: See ActionDeclAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "ActionDeclAST.hh"
+#include "Action.hh"
+
+ActionDeclAST::ActionDeclAST(string* ident_ptr,
+ PairListAST* pairs_ptr,
+ StatementListAST* statement_list_ptr)
+ : DeclAST(pairs_ptr)
+{
+ m_ident_ptr = ident_ptr;
+ m_statement_list_ptr = statement_list_ptr;
+}
+
+ActionDeclAST::~ActionDeclAST()
+{
+ delete m_ident_ptr;
+ delete m_statement_list_ptr;
+}
+
+void ActionDeclAST::generate()
+{
+ Map<Var*, string> resource_list;
+ if (m_statement_list_ptr != NULL) {
+ string code;
+
+ // Add new local vars
+ g_sym_table.pushFrame();
+
+ Type* type_ptr = g_sym_table.getType("Address");
+
+ if (type_ptr == NULL) {
+ error("Type 'Address' not declared.");
+ }
+
+ g_sym_table.newSym(new Var("address", getLocation(), type_ptr, "addr", getPairs()));
+
+ // Don't allows returns in actions
+ m_statement_list_ptr->generate(code, NULL);
+
+ getPairs().add("c_code", code);
+
+ m_statement_list_ptr->findResources(resource_list);
+
+ g_sym_table.popFrame();
+ }
+
+ StateMachine* machine_ptr = g_sym_table.getStateMachine();
+ if (machine_ptr == NULL) {
+ error("Action declaration not part of a machine.");
+ } else {
+ machine_ptr->addAction(new Action(*m_ident_ptr, resource_list, getLocation(), getPairs()));
+ }
+
+}
+
+void ActionDeclAST::print(ostream& out) const
+{
+ out << "[ActionDecl: " << *m_ident_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/ActionDeclAST.hh b/src/mem/slicc/ast/ActionDeclAST.hh
new file mode 100644
index 000000000..cc020a95f
--- /dev/null
+++ b/src/mem/slicc/ast/ActionDeclAST.hh
@@ -0,0 +1,85 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ActionDeclAST.h
+ *
+ * Description:
+ *
+ * $Id: ActionDeclAST.h,v 3.2 2003/07/10 18:08:06 milo Exp $
+ *
+ */
+
+#ifndef ActionDeclAST_H
+#define ActionDeclAST_H
+
+#include "slicc_global.hh"
+#include "DeclAST.hh"
+#include "StatementListAST.hh"
+
+class ActionDeclAST : public DeclAST {
+public:
+ // Constructors
+ ActionDeclAST(string* ident_ptr,
+ PairListAST* pairs_ptr,
+ StatementListAST* statement_list_ptr);
+
+ // Destructor
+ ~ActionDeclAST();
+
+ // Public Methods
+ void generate();
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ ActionDeclAST(const ActionDeclAST& obj);
+ ActionDeclAST& operator=(const ActionDeclAST& obj);
+
+ // Data Members (m_ prefix)
+ string* m_ident_ptr;
+ StatementListAST* m_statement_list_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const ActionDeclAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const ActionDeclAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //ActionDeclAST_H
diff --git a/src/mem/slicc/ast/AssignStatementAST.cc b/src/mem/slicc/ast/AssignStatementAST.cc
new file mode 100644
index 000000000..2d72e583e
--- /dev/null
+++ b/src/mem/slicc/ast/AssignStatementAST.cc
@@ -0,0 +1,76 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * AssignStatementAST.C
+ *
+ * Description: See AssignStatementAST.h
+ *
+ * $Id: AssignStatementAST.C,v 3.2 2003/08/01 18:38:19 beckmann Exp $
+ *
+ */
+
+#include "AssignStatementAST.hh"
+
+AssignStatementAST::AssignStatementAST(ExprAST* lvalue_ptr, ExprAST* rvalue_ptr)
+ : StatementAST()
+{
+ m_lvalue_ptr = lvalue_ptr;
+ m_rvalue_ptr = rvalue_ptr;
+}
+
+AssignStatementAST::~AssignStatementAST()
+{
+ delete m_lvalue_ptr;
+ delete m_rvalue_ptr;
+}
+
+void AssignStatementAST::generate(string& code, Type* return_type_ptr) const
+{
+ code += indent_str();
+ Type* lvalue_type_ptr = m_lvalue_ptr->generate(code);
+ code += " = ";
+ Type* rvalue_type_ptr = m_rvalue_ptr->generate(code);
+ code += ";\n";
+
+ if (lvalue_type_ptr != rvalue_type_ptr) {
+ // FIXME - beckmann
+ // the following if statement is a hack to allow NetDest objects to be assigned to Sets
+ // this allows for the previous NetworkMessage Destiantion 'Set class' to migrate to the
+ // new NetworkMessage Destiantion 'NetDest class'
+ if (lvalue_type_ptr->toString() != "NetDest" && rvalue_type_ptr->toString() != "Set") {
+ error("Assignment type mismatch '" + lvalue_type_ptr->toString() + "' and '" + rvalue_type_ptr->toString() + "'");
+ }
+ }
+}
+
+void AssignStatementAST::print(ostream& out) const
+{
+ out << "[AssignStatementAST: " << *m_lvalue_ptr << " := " << *m_rvalue_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/AssignStatementAST.hh b/src/mem/slicc/ast/AssignStatementAST.hh
new file mode 100644
index 000000000..c249c8a75
--- /dev/null
+++ b/src/mem/slicc/ast/AssignStatementAST.hh
@@ -0,0 +1,85 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * AssignStatementAST.h
+ *
+ * Description:
+ *
+ * $Id: AssignStatementAST.h,v 3.2 2001/12/12 01:00:09 milo Exp $
+ *
+ */
+
+#ifndef ASSIGNSTATEMENTAST_H
+#define ASSIGNSTATEMENTAST_H
+
+#include "slicc_global.hh"
+#include "StatementAST.hh"
+#include "ExprAST.hh"
+
+
+
+class AssignStatementAST : public StatementAST {
+public:
+ // Constructors
+ AssignStatementAST(ExprAST* lvalue_ptr, ExprAST* rvalue_ptr);
+
+ // Destructor
+ ~AssignStatementAST();
+
+ // Public Methods
+ void generate(string& code, Type* return_type_ptr) const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ AssignStatementAST(const AssignStatementAST& obj);
+ AssignStatementAST& operator=(const AssignStatementAST& obj);
+
+ // Data Members (m_ prefix)
+ ExprAST* m_lvalue_ptr;
+ ExprAST* m_rvalue_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const AssignStatementAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const AssignStatementAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //ASSIGNSTATEMENTAST_H
diff --git a/src/mem/slicc/ast/CheckAllocateStatementAST.cc b/src/mem/slicc/ast/CheckAllocateStatementAST.cc
new file mode 100644
index 000000000..25fd4d2e7
--- /dev/null
+++ b/src/mem/slicc/ast/CheckAllocateStatementAST.cc
@@ -0,0 +1,72 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "CheckAllocateStatementAST.hh"
+#include "SymbolTable.hh"
+#include "VarExprAST.hh"
+#include "util.hh"
+
+CheckAllocateStatementAST::CheckAllocateStatementAST(VarExprAST* variable)
+ : StatementAST()
+{
+ m_variable = variable;
+}
+
+CheckAllocateStatementAST::~CheckAllocateStatementAST()
+{
+ delete m_variable;
+}
+
+void CheckAllocateStatementAST::generate(string& code, Type* return_type_ptr) const
+{
+ // FIXME - check the type of the variable
+
+ // Make sure the variable is valid
+ m_variable->getVar();
+}
+
+void CheckAllocateStatementAST::findResources(Map<Var*, string>& resource_list) const
+{
+ Var* var_ptr = m_variable->getVar();
+ int res_count = 0;
+ if (resource_list.exist(var_ptr)) {
+ res_count = atoi((resource_list.lookup(var_ptr)).c_str());
+ }
+ resource_list.add(var_ptr, int_to_string(res_count+1));
+}
+
+void CheckAllocateStatementAST::print(ostream& out) const
+{
+ out << "[CheckAllocateStatementAst: " << *m_variable << "]";
+}
diff --git a/src/mem/slicc/ast/CheckAllocateStatementAST.hh b/src/mem/slicc/ast/CheckAllocateStatementAST.hh
new file mode 100644
index 000000000..8df08a086
--- /dev/null
+++ b/src/mem/slicc/ast/CheckAllocateStatementAST.hh
@@ -0,0 +1,82 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#ifndef CHECKALLOCATESTATEMENTAST_H
+#define CHECKALLOCATESTATEMENTAST_H
+
+#include "slicc_global.hh"
+#include "StatementAST.hh"
+#include "TypeAST.hh"
+
+class VarExprAST;
+class Var;
+
+class CheckAllocateStatementAST : public StatementAST {
+public:
+ // Constructors
+ CheckAllocateStatementAST(VarExprAST* variable);
+
+ // Destructor
+ ~CheckAllocateStatementAST();
+
+ // Public Methods
+ void generate(string& code, Type* return_type_ptr) const;
+ void findResources(Map<Var*, string>& resource_list) const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ CheckAllocateStatementAST(const CheckAllocateStatementAST& obj);
+ CheckAllocateStatementAST& operator=(const CheckAllocateStatementAST& obj);
+
+ // Data Members (m_ prefix)
+ VarExprAST* m_variable;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const CheckAllocateStatementAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const CheckAllocateStatementAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //CHECKALLOCATESTATEMENTAST_H
diff --git a/src/mem/slicc/ast/CheckStopSlotsStatementAST.cc b/src/mem/slicc/ast/CheckStopSlotsStatementAST.cc
new file mode 100644
index 000000000..f102e8894
--- /dev/null
+++ b/src/mem/slicc/ast/CheckStopSlotsStatementAST.cc
@@ -0,0 +1,115 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "CheckStopSlotsStatementAST.hh"
+#include "SymbolTable.hh"
+#include "VarExprAST.hh"
+#include "PairListAST.hh"
+
+CheckStopSlotsStatementAST::CheckStopSlotsStatementAST(VarExprAST* variable, string* condStr, string* bankStr)
+ : StatementAST()
+{
+ m_variable = variable;
+ m_condStr_ptr = condStr;
+ m_bankStr_ptr = bankStr;
+}
+
+CheckStopSlotsStatementAST::~CheckStopSlotsStatementAST()
+{
+ delete m_variable;
+ delete m_condStr_ptr;
+ delete m_bankStr_ptr;
+}
+
+void CheckStopSlotsStatementAST::generate(string& code, Type* return_type_ptr) const
+{
+
+ // Make sure the variable is valid
+ m_variable->getVar();
+
+}
+
+void CheckStopSlotsStatementAST::findResources(Map<Var*, string>& resource_list) const
+{
+ Type* type_ptr;
+
+ Var* var_ptr = m_variable->getVar();
+ string check_code;
+
+ if (*m_condStr_ptr == "((*in_msg_ptr)).m_isOnChipSearch") {
+ check_code += " const Response9Msg* in_msg_ptr;\n";
+ check_code += " in_msg_ptr = dynamic_cast<const Response9Msg*>(((*(m_chip_ptr->m_L2Cache_responseToL2Cache9_vec[m_version]))).peek());\n";
+ check_code += " assert(in_msg_ptr != NULL);\n";
+ }
+
+ check_code += " if (";
+ check_code += *m_condStr_ptr;
+ check_code += ") {\n";
+
+ check_code += " if (!";
+ type_ptr = m_variable->generate(check_code);
+ check_code += ".isDisableSPossible((((*(m_chip_ptr->m_DNUCAmover_ptr))).getBankPos(";
+ check_code += *m_bankStr_ptr;
+ check_code += ")))) {\n";
+ if(CHECK_INVALID_RESOURCE_STALLS) {
+ check_code += " assert(priority >= ";
+ type_ptr = m_variable->generate(check_code);
+ check_code += ".getPriority());\n";
+ }
+ check_code += " return TransitionResult_ResourceStall;\n";
+ check_code += " }\n";
+ check_code += " } else {\n";
+ check_code += " if (!";
+ type_ptr = m_variable->generate(check_code);
+ check_code += ".isDisableFPossible((((*(m_chip_ptr->m_DNUCAmover_ptr))).getBankPos(";
+ check_code += *m_bankStr_ptr;
+ check_code += ")))) {\n";
+ if(CHECK_INVALID_RESOURCE_STALLS) {
+ check_code += " assert(priority >= ";
+ type_ptr = m_variable->generate(check_code);
+ check_code += ".getPriority());\n";
+ }
+ check_code += " return TransitionResult_ResourceStall;\n";
+ check_code += " }\n";
+ check_code += " }\n";
+
+ assert(!resource_list.exist(var_ptr));
+ resource_list.add(var_ptr, check_code);
+
+}
+
+void CheckStopSlotsStatementAST::print(ostream& out) const
+{
+ out << "[CheckStopSlotsStatementAst: " << *m_variable << "]";
+}
diff --git a/src/mem/slicc/ast/CheckStopSlotsStatementAST.hh b/src/mem/slicc/ast/CheckStopSlotsStatementAST.hh
new file mode 100644
index 000000000..131b8cf4d
--- /dev/null
+++ b/src/mem/slicc/ast/CheckStopSlotsStatementAST.hh
@@ -0,0 +1,85 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#ifndef CHECKSTOPSLOTSSTATEMENTAST_H
+#define CHECKSTOPSLOTSSTATEMENTAST_H
+
+#include "slicc_global.hh"
+#include "ExprAST.hh"
+#include "StatementAST.hh"
+#include "TypeAST.hh"
+
+class VarExprAST;
+class Var;
+
+class CheckStopSlotsStatementAST : public StatementAST {
+public:
+ // Constructors
+ CheckStopSlotsStatementAST(VarExprAST* variable, string* condStr, string* bankStr);
+
+ // Destructor
+ ~CheckStopSlotsStatementAST();
+
+ // Public Methods
+ void generate(string& code, Type* return_type_ptr) const;
+ void findResources(Map<Var*, string>& resource_list) const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ CheckStopSlotsStatementAST(const CheckStopSlotsStatementAST& obj);
+ CheckStopSlotsStatementAST& operator=(const CheckStopSlotsStatementAST& obj);
+
+ // Data Members (m_ prefix)
+ VarExprAST* m_variable;
+ string* m_condStr_ptr;
+ string* m_bankStr_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const CheckStopSlotsStatementAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const CheckStopSlotsStatementAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //CHECKSTOPSLOTSSTATEMENTAST_H
diff --git a/src/mem/slicc/ast/ChipComponentAccessAST.cc b/src/mem/slicc/ast/ChipComponentAccessAST.cc
new file mode 100644
index 000000000..1ba1b98f2
--- /dev/null
+++ b/src/mem/slicc/ast/ChipComponentAccessAST.cc
@@ -0,0 +1,244 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ChipComponentAccessAST.C
+ *
+ * Description: See ChipComponentAccessAST.h
+ *
+ * $Id: ChipComponentAccessAST.C 1.9 04/06/18 21:00:08-00:00 beckmann@cottons.cs.wisc.edu $
+ *
+ */
+
+#include "ChipComponentAccessAST.hh"
+
+ChipComponentAccessAST::ChipComponentAccessAST(VarExprAST* machine, ExprAST* mach_version, VarExprAST* component, string* proc_name, Vector<ExprAST*>* expr_vec_ptr)
+
+ : ExprAST()
+{
+ m_chip_ver_expr_ptr = NULL;
+ m_mach_var_ptr = machine;
+ m_comp_var_ptr = component;
+ m_mach_ver_expr_ptr = mach_version;
+ m_expr_vec_ptr = expr_vec_ptr;
+ m_proc_name_ptr = proc_name;
+ m_field_name_ptr = NULL;
+}
+
+ChipComponentAccessAST::ChipComponentAccessAST(VarExprAST* machine, ExprAST* mach_version, VarExprAST* component, string* field_name)
+
+ : ExprAST()
+{
+ m_chip_ver_expr_ptr = NULL;
+ m_mach_var_ptr = machine;
+ m_comp_var_ptr = component;
+ m_mach_ver_expr_ptr = mach_version;
+ m_expr_vec_ptr = NULL;
+ m_proc_name_ptr = NULL;
+ m_field_name_ptr = field_name;
+}
+
+ChipComponentAccessAST::ChipComponentAccessAST(ExprAST* chip_version, VarExprAST* machine, ExprAST* mach_version, VarExprAST* component, string* proc_name, Vector<ExprAST*>* expr_vec_ptr)
+
+ : ExprAST()
+{
+ m_chip_ver_expr_ptr = chip_version;
+ m_mach_var_ptr = machine;
+ m_comp_var_ptr = component;
+ m_mach_ver_expr_ptr = mach_version;
+ m_expr_vec_ptr = expr_vec_ptr;
+ m_proc_name_ptr = proc_name;
+ m_field_name_ptr = NULL;
+}
+
+ChipComponentAccessAST::ChipComponentAccessAST(ExprAST* chip_version, VarExprAST* machine, ExprAST* mach_version, VarExprAST* component, string* field_name)
+
+ : ExprAST()
+{
+ m_chip_ver_expr_ptr = chip_version;
+ m_mach_var_ptr = machine;
+ m_comp_var_ptr = component;
+ m_mach_ver_expr_ptr = mach_version;
+ m_expr_vec_ptr = NULL;
+ m_proc_name_ptr = NULL;
+ m_field_name_ptr = field_name;
+}
+
+
+
+ChipComponentAccessAST::~ChipComponentAccessAST()
+{
+ if (m_expr_vec_ptr != NULL) {
+ int size = m_expr_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ delete (*m_expr_vec_ptr)[i];
+ }
+ }
+
+ delete m_mach_var_ptr;
+ delete m_comp_var_ptr;
+ delete m_mach_ver_expr_ptr;
+
+ if (m_proc_name_ptr != NULL) {
+ delete m_proc_name_ptr;
+ }
+
+ if (m_field_name_ptr != NULL) {
+ delete m_field_name_ptr;
+ }
+
+ if (m_chip_ver_expr_ptr != NULL) {
+ delete m_chip_ver_expr_ptr;
+ }
+}
+
+Type* ChipComponentAccessAST::generate(string& code) const
+{
+ Type* void_type_ptr = g_sym_table.getType("void");
+ Type* ret_type_ptr;
+
+
+ code += "(";
+
+ Var* v = g_sym_table.getMachComponentVar(m_mach_var_ptr->getName(), m_comp_var_ptr->getName());
+
+ string orig_code = v->getCode();
+ string working_code;
+
+ if (m_chip_ver_expr_ptr != NULL) {
+ // replace m_chip_ptr with specified chip
+
+ unsigned int t = orig_code.find("m_chip_ptr");
+ assert(t != string::npos);
+ string code_temp0 = orig_code.substr(0, t);
+ string code_temp1 = orig_code.substr(t+10);
+
+ working_code += code_temp0;
+ working_code += "g_system_ptr->getChip(";
+ m_chip_ver_expr_ptr->generate(working_code);
+ working_code += ")";
+ working_code += code_temp1;
+ }
+ else {
+ working_code += orig_code;
+ }
+
+ // replace default "m_version" with the version we really want
+ unsigned int tmp_uint = working_code.find("m_version");
+ assert(tmp_uint != string::npos);
+ string code_temp2 = working_code.substr(0, tmp_uint);
+ string code_temp3 = working_code.substr(tmp_uint+9);
+
+ code += code_temp2;
+ code += "(";
+ m_mach_ver_expr_ptr->generate(code);
+ code += ")";
+ code += code_temp3;
+ code += ")";
+
+ if (m_proc_name_ptr != NULL) {
+ // method call
+ code += ".";
+
+ Vector <Type*> paramTypes;
+
+ // generate code
+ int actual_size = m_expr_vec_ptr->size();
+ code += (*m_proc_name_ptr) + "(";
+ for(int i=0; i<actual_size; i++) {
+ if (i != 0) {
+ code += ", ";
+ }
+ // Check the types of the parameter
+ Type* actual_type_ptr = (*m_expr_vec_ptr)[i]->generate(code);
+ paramTypes.insertAtBottom(actual_type_ptr);
+ }
+ code += ")";
+
+ Type* obj_type_ptr = v->getType();
+ string methodId = obj_type_ptr->methodId(*m_proc_name_ptr, paramTypes);
+
+ // Verify that this is a method of the object
+ if (!obj_type_ptr->methodExist(methodId)) {
+ error("Invalid method call: Type '" + obj_type_ptr->toString() + "' does not have a method '" + methodId + "'");
+ }
+
+ int expected_size = obj_type_ptr->methodParamType(methodId).size();
+ if (actual_size != expected_size) {
+ // Right number of parameters
+ ostringstream err;
+ err << "Wrong number of parameters for function name: '" << *m_proc_name_ptr << "'";
+ err << ", expected: ";
+ err << expected_size;
+ err << ", actual: ";
+ err << actual_size;
+ error(err.str());
+ }
+
+ for(int i=0; i<actual_size; i++) {
+ // Check the types of the parameter
+ Type* actual_type_ptr = paramTypes[i];
+ Type* expected_type_ptr = obj_type_ptr->methodParamType(methodId)[i];
+ if (actual_type_ptr != expected_type_ptr) {
+ (*m_expr_vec_ptr)[i]->error("Type mismatch: expected: " + expected_type_ptr->toString() +
+ " actual: " + actual_type_ptr->toString());
+ }
+ }
+
+ // Return the return type of the method
+ ret_type_ptr = obj_type_ptr->methodReturnType(methodId);
+ }
+ else if (m_field_name_ptr != NULL) {
+ Type* obj_type_ptr = v->getType();
+ code += ").m_" + (*m_field_name_ptr);
+
+ // Verify that this is a valid field name for this type
+ if (!obj_type_ptr->dataMemberExist(*m_field_name_ptr)) {
+ error("Invalid object field: Type '" + obj_type_ptr->toString() + "' does not have data member " + *m_field_name_ptr);
+ }
+
+ // Return the type of the field
+ ret_type_ptr = obj_type_ptr->dataMemberType(*m_field_name_ptr);
+ }
+ else {
+ assert(0);
+ }
+
+ return ret_type_ptr;
+}
+
+void ChipComponentAccessAST::findResources(Map<Var*, string>& resource_list) const
+{
+
+}
+
+void ChipComponentAccessAST::print(ostream& out) const
+{
+ out << "[ChipAccessExpr: " << *m_expr_vec_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/ChipComponentAccessAST.hh b/src/mem/slicc/ast/ChipComponentAccessAST.hh
new file mode 100644
index 000000000..039fece2b
--- /dev/null
+++ b/src/mem/slicc/ast/ChipComponentAccessAST.hh
@@ -0,0 +1,101 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ *
+ *
+ * Description:
+ *
+ * $Id: ChipComponentAccessAST.h 1.8 04/06/18 21:00:08-00:00 beckmann@cottons.cs.wisc.edu $
+ *
+ */
+
+#ifndef ChipComponentAccessAST_H
+#define ChipComponentAccessAST_H
+
+#include "slicc_global.hh"
+#include "StatementAST.hh"
+#include "ExprAST.hh"
+#include "VarExprAST.hh"
+#include "TypeAST.hh"
+
+class ChipComponentAccessAST : public ExprAST {
+public:
+ // Constructors
+
+ // method call from local chip
+ ChipComponentAccessAST(VarExprAST* machine, ExprAST* mach_version, VarExprAST* component, string* proc_name, Vector<ExprAST*>* expr_vec_ptr);
+ // member access from local chip
+ ChipComponentAccessAST(VarExprAST* machine, ExprAST* mach_version, VarExprAST* component, string* field_name);
+
+ // method call from specified chip
+ ChipComponentAccessAST(ExprAST* chip_version, VarExprAST* machine, ExprAST* mach_version, VarExprAST* component, string* proc_name, Vector<ExprAST*>* expr_vec_ptr);
+
+ // member access from specified chip
+ ChipComponentAccessAST(ExprAST* chip_version, VarExprAST* machine, ExprAST* mach_version, VarExprAST* component, string* field_name);
+
+ // Destructor
+ ~ChipComponentAccessAST();
+
+ // Public Methods
+ Type* generate(string& code) const;
+ void findResources(Map<Var*, string>& resource_list) const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ ChipComponentAccessAST(const ChipComponentAccessAST& obj);
+ ChipComponentAccessAST& operator=(const ChipComponentAccessAST& obj);
+
+ // Data Members (m_ prefix)
+ VarExprAST* m_mach_var_ptr;
+ VarExprAST* m_comp_var_ptr;
+ ExprAST* m_mach_ver_expr_ptr;
+ ExprAST* m_chip_ver_expr_ptr;
+ Vector<ExprAST*>* m_expr_vec_ptr;
+ string* m_proc_name_ptr;
+ string* m_field_name_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const ChipComponentAccessAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const ChipComponentAccessAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif // ChipComponentAccessAST_H
diff --git a/src/mem/slicc/ast/CopyHeadStatementAST.cc b/src/mem/slicc/ast/CopyHeadStatementAST.cc
new file mode 100644
index 000000000..40e61dc07
--- /dev/null
+++ b/src/mem/slicc/ast/CopyHeadStatementAST.cc
@@ -0,0 +1,85 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "CopyHeadStatementAST.hh"
+#include "SymbolTable.hh"
+#include "VarExprAST.hh"
+#include "util.hh"
+
+CopyHeadStatementAST::CopyHeadStatementAST(VarExprAST* in_queue_ptr,
+ VarExprAST* out_queue_ptr,
+ PairListAST* pairs_ptr)
+ : StatementAST(pairs_ptr->getPairs())
+{
+ m_in_queue_ptr = in_queue_ptr;
+ m_out_queue_ptr = out_queue_ptr;
+}
+
+CopyHeadStatementAST::~CopyHeadStatementAST()
+{
+ delete m_in_queue_ptr;
+ delete m_out_queue_ptr;
+}
+
+void CopyHeadStatementAST::generate(string& code, Type* return_type_ptr) const
+{
+ m_in_queue_ptr->assertType("InPort");
+ m_out_queue_ptr->assertType("OutPort");
+
+ code += indent_str();
+ code += m_out_queue_ptr->getVar()->getCode() + ".enqueue(" + m_in_queue_ptr->getVar()->getCode() + ".getMsgPtrCopy()";
+
+ if (getPairs().exist("latency")) {
+ code += ", " + getPairs().lookup("latency");
+ } else {
+ code += ", COPY_HEAD_LATENCY";
+ }
+
+ code += ");\n";
+}
+
+void CopyHeadStatementAST::findResources(Map<Var*, string>& resource_list) const
+{
+ Var* var_ptr = m_out_queue_ptr->getVar();
+ int res_count = 0;
+ if (resource_list.exist(var_ptr)) {
+ res_count = atoi((resource_list.lookup(var_ptr)).c_str());
+ }
+ resource_list.add(var_ptr, int_to_string(res_count+1));
+}
+
+void CopyHeadStatementAST::print(ostream& out) const
+{
+ out << "[CopyHeadStatementAst: " << *m_in_queue_ptr << " " << *m_out_queue_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/CopyHeadStatementAST.hh b/src/mem/slicc/ast/CopyHeadStatementAST.hh
new file mode 100644
index 000000000..1631395fc
--- /dev/null
+++ b/src/mem/slicc/ast/CopyHeadStatementAST.hh
@@ -0,0 +1,87 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#ifndef COPYHEADSTATEMENTAST_H
+#define COPYHEADTATEMENTAST_H
+
+#include "slicc_global.hh"
+#include "StatementAST.hh"
+#include "StatementListAST.hh"
+#include "TypeAST.hh"
+#include "PairListAST.hh"
+
+class VarExprAST;
+class Var;
+
+class CopyHeadStatementAST : public StatementAST {
+public:
+ // Constructors
+ CopyHeadStatementAST(VarExprAST* in_queue_ptr,
+ VarExprAST* out_queue_ptr,
+ PairListAST* pairs_ptr);
+
+ // Destructor
+ ~CopyHeadStatementAST();
+
+ // Public Methods
+ void generate(string& code, Type* return_type_ptr) const;
+ void findResources(Map<Var*, string>& resource_list) const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ CopyHeadStatementAST(const CopyHeadStatementAST& obj);
+ CopyHeadStatementAST& operator=(const CopyHeadStatementAST& obj);
+
+ // Data Members (m_ prefix)
+ VarExprAST* m_in_queue_ptr;
+ VarExprAST* m_out_queue_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const CopyHeadStatementAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const CopyHeadStatementAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //COPYHEADSTATEMENTAST_H
diff --git a/src/mem/slicc/ast/DeclAST.cc b/src/mem/slicc/ast/DeclAST.cc
new file mode 100644
index 000000000..4269ed9f4
--- /dev/null
+++ b/src/mem/slicc/ast/DeclAST.cc
@@ -0,0 +1,39 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * DeclAST.C
+ *
+ * Description: See DeclAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "DeclAST.hh"
diff --git a/src/mem/slicc/ast/DeclAST.hh b/src/mem/slicc/ast/DeclAST.hh
new file mode 100644
index 000000000..e7c8467d6
--- /dev/null
+++ b/src/mem/slicc/ast/DeclAST.hh
@@ -0,0 +1,85 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * DeclAST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef DECLAST_H
+#define DECLAST_H
+
+#include "slicc_global.hh"
+#include "AST.hh"
+#include "PairListAST.hh"
+#include "StateMachine.hh"
+#include "TypeAST.hh"
+
+class DeclAST : public AST {
+public:
+ // Constructors
+ DeclAST(PairListAST* pairs_ptr) : AST(pairs_ptr->getPairs()) {}
+
+ // Destructor
+ virtual ~DeclAST() {}
+
+ // Public Methods
+ virtual void generate() = 0;
+ virtual void findMachines() {};
+
+ // void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ // DeclAST(const DeclAST& obj);
+ // DeclAST& operator=(const DeclAST& obj);
+
+ // Data Members (m_ prefix)
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const DeclAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const DeclAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //DECLAST_H
diff --git a/src/mem/slicc/ast/DeclListAST.cc b/src/mem/slicc/ast/DeclListAST.cc
new file mode 100644
index 000000000..6dacd5435
--- /dev/null
+++ b/src/mem/slicc/ast/DeclListAST.cc
@@ -0,0 +1,86 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * DeclListAST.C
+ *
+ * Description: See DeclListAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "DeclListAST.hh"
+
+DeclListAST::DeclListAST(Vector<DeclAST*>* vec_ptr)
+ : AST()
+{
+ assert(vec_ptr != NULL);
+ m_vec_ptr = vec_ptr;
+}
+
+// Singleton constructor.
+DeclListAST::DeclListAST(DeclAST* Decl_ptr)
+ : AST()
+{
+ assert(Decl_ptr != NULL);
+ m_vec_ptr = new Vector<DeclAST*>;
+ m_vec_ptr->insertAtTop(Decl_ptr);
+}
+
+DeclListAST::~DeclListAST()
+{
+ int size = m_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ delete (*m_vec_ptr)[i];
+ }
+ delete m_vec_ptr;
+}
+
+void DeclListAST::generate() const
+{
+ int size = m_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ (*m_vec_ptr)[i]->generate();
+ }
+}
+
+void DeclListAST::findMachines() const
+{
+ int size = m_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ (*m_vec_ptr)[i]->findMachines();
+ }
+}
+
+void DeclListAST::print(ostream& out) const
+{
+ assert(m_vec_ptr != NULL);
+ out << "[DeclListAST: " << *m_vec_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/DeclListAST.hh b/src/mem/slicc/ast/DeclListAST.hh
new file mode 100644
index 000000000..80c7fffcc
--- /dev/null
+++ b/src/mem/slicc/ast/DeclListAST.hh
@@ -0,0 +1,84 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * DeclListAST.h
+ *
+ * Description:
+ *
+ * $Id: DeclListAST.h,v 3.1 2001/12/12 01:00:12 milo Exp $
+ *
+ */
+
+#ifndef DeclListAST_H
+#define DeclListAST_H
+
+#include "slicc_global.hh"
+#include "AST.hh"
+#include "DeclAST.hh"
+
+class DeclListAST : public AST {
+public:
+ // Constructors
+ DeclListAST(Vector<DeclAST*>* vec_ptr);
+ DeclListAST(DeclAST* statement_ptr);
+
+ // Destructor
+ ~DeclListAST();
+
+ // Public Methods
+ void generate() const;
+ void findMachines() const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ DeclListAST(const DeclListAST& obj);
+ DeclListAST& operator=(const DeclListAST& obj);
+
+ // Data Members (m_ prefix)
+ Vector<DeclAST*>* m_vec_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const DeclListAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const DeclListAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //DeclListAST_H
diff --git a/src/mem/slicc/ast/EnqueueStatementAST.cc b/src/mem/slicc/ast/EnqueueStatementAST.cc
new file mode 100644
index 000000000..49f2a9233
--- /dev/null
+++ b/src/mem/slicc/ast/EnqueueStatementAST.cc
@@ -0,0 +1,104 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "EnqueueStatementAST.hh"
+#include "SymbolTable.hh"
+#include "VarExprAST.hh"
+#include "PairListAST.hh"
+#include "util.hh"
+
+EnqueueStatementAST::EnqueueStatementAST(VarExprAST* queue_name_ptr,
+ TypeAST* type_name_ptr,
+ PairListAST* pairs_ptr,
+ StatementListAST* statement_list_ast_ptr)
+ : StatementAST(pairs_ptr->getPairs())
+{
+ m_queue_name_ptr = queue_name_ptr;
+ m_type_name_ptr = type_name_ptr;
+ m_statement_list_ast_ptr = statement_list_ast_ptr;
+}
+
+EnqueueStatementAST::~EnqueueStatementAST()
+{
+ delete m_queue_name_ptr;
+ delete m_type_name_ptr;
+ delete m_statement_list_ast_ptr;
+}
+
+void EnqueueStatementAST::generate(string& code, Type* return_type_ptr) const
+{
+ code += indent_str() + "{\n"; // Start scope
+ inc_indent();
+ g_sym_table.pushFrame();
+
+ Type* msg_type_ptr = m_type_name_ptr->lookupType();
+
+ // Add new local var to symbol table
+ g_sym_table.newSym(new Var("out_msg", getLocation(), msg_type_ptr, "out_msg", getPairs()));
+
+ code += indent_str() + msg_type_ptr->cIdent() + " out_msg;\n"; // Declare message
+ m_statement_list_ast_ptr->generate(code, NULL); // The other statements
+
+ code += indent_str();
+
+ m_queue_name_ptr->assertType("OutPort");
+ code += "(" + m_queue_name_ptr->getVar()->getCode() + ")";
+ code += ".enqueue(out_msg";
+
+ if (getPairs().exist("latency")) {
+ code += ", " + getPairs().lookup("latency");
+ }
+
+ code += ");\n";
+
+ dec_indent();
+ g_sym_table.popFrame();
+ code += indent_str() + "}\n"; // End scope
+}
+
+void EnqueueStatementAST::findResources(Map<Var*, string>& resource_list) const
+{
+ Var* var_ptr = m_queue_name_ptr->getVar();
+ int res_count = 0;
+ if (resource_list.exist(var_ptr)) {
+ res_count = atoi((resource_list.lookup(var_ptr)).c_str());
+ }
+ resource_list.add(var_ptr, int_to_string(res_count+1));
+}
+
+void EnqueueStatementAST::print(ostream& out) const
+{
+ out << "[EnqueueStatementAst: " << *m_queue_name_ptr << " "
+ << m_type_name_ptr->toString() << " " << *m_statement_list_ast_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/EnqueueStatementAST.hh b/src/mem/slicc/ast/EnqueueStatementAST.hh
new file mode 100644
index 000000000..eb7dad9af
--- /dev/null
+++ b/src/mem/slicc/ast/EnqueueStatementAST.hh
@@ -0,0 +1,93 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * EnqueueStatementAST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef ENQUEUESTATEMENTAST_H
+#define ENQUEUESTATEMENTAST_H
+
+#include "slicc_global.hh"
+#include "StatementAST.hh"
+#include "StatementListAST.hh"
+#include "TypeAST.hh"
+
+class VarExprAST;
+class Var;
+class PairListAST;
+
+class EnqueueStatementAST : public StatementAST {
+public:
+ // Constructors
+ EnqueueStatementAST(VarExprAST* queue_name_ptr,
+ TypeAST* type_name_ptr,
+ PairListAST* pairs_ptr,
+ StatementListAST* statement_list_ast_ptr);
+
+ // Destructor
+ ~EnqueueStatementAST();
+
+ // Public Methods
+ void generate(string& code, Type* return_type_ptr) const;
+ void findResources(Map<Var*, string>& resource_list) const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ EnqueueStatementAST(const EnqueueStatementAST& obj);
+ EnqueueStatementAST& operator=(const EnqueueStatementAST& obj);
+
+ // Data Members (m_ prefix)
+ VarExprAST* m_queue_name_ptr;
+ TypeAST* m_type_name_ptr;
+ StatementListAST* m_statement_list_ast_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const EnqueueStatementAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const EnqueueStatementAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //ENQUEUESTATEMENTAST_H
diff --git a/src/mem/slicc/ast/EnumDeclAST.cc b/src/mem/slicc/ast/EnumDeclAST.cc
new file mode 100644
index 000000000..c8e033095
--- /dev/null
+++ b/src/mem/slicc/ast/EnumDeclAST.cc
@@ -0,0 +1,98 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * EnumDeclAST.C
+ *
+ * Description: See EnumDeclAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "EnumDeclAST.hh"
+#include "main.hh"
+#include "SymbolTable.hh"
+
+EnumDeclAST::EnumDeclAST(TypeAST* type_ast_ptr,
+ PairListAST* pairs_ptr,
+ Vector<TypeFieldAST*>* field_vec_ptr)
+ : DeclAST(pairs_ptr)
+{
+ m_type_ast_ptr = type_ast_ptr;
+ m_field_vec_ptr = field_vec_ptr;
+}
+
+EnumDeclAST::~EnumDeclAST()
+{
+ delete m_type_ast_ptr;
+ if (m_field_vec_ptr != NULL) {
+ int size = m_field_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ delete (*m_field_vec_ptr)[i];
+ }
+ delete m_field_vec_ptr;
+ }
+}
+
+void EnumDeclAST::generate()
+{
+ string machine_name;
+ string id = m_type_ast_ptr->toString();
+
+ Vector<Type*> param_type_vec; // used by to_string func call
+
+ // Make the new type
+ Type* new_type_ptr = new Type(id, getLocation(), getPairs(),
+ g_sym_table.getStateMachine());
+ g_sym_table.newSym(new_type_ptr);
+
+ // Add all of the fields of the type to it
+ if (m_field_vec_ptr != NULL) {
+ int size = m_field_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ (*m_field_vec_ptr)[i]->generate(new_type_ptr);
+ }
+ }
+
+ // Add the implicit State_to_string method - FIXME, this is a bit dirty
+ param_type_vec.insertAtBottom(new_type_ptr); // add state to param vector
+ string func_id = new_type_ptr->cIdent()+"_to_string";
+
+ Map<string, string> pairs;
+ pairs.add("external", "yes");
+ Vector<string> string_vec;
+ g_sym_table.newSym(new Func(func_id, getLocation(), g_sym_table.getType("string"), param_type_vec, string_vec, string(""), pairs, NULL));
+}
+
+void EnumDeclAST::print(ostream& out) const
+{
+ out << "[EnumDecl: " << m_type_ast_ptr->toString() << "]";
+}
+
diff --git a/src/mem/slicc/ast/EnumDeclAST.hh b/src/mem/slicc/ast/EnumDeclAST.hh
new file mode 100644
index 000000000..4474b69cc
--- /dev/null
+++ b/src/mem/slicc/ast/EnumDeclAST.hh
@@ -0,0 +1,86 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * EnummDeclAST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef EnumDeclAST_H
+#define EnumDeclAST_H
+
+#include "slicc_global.hh"
+#include "DeclAST.hh"
+#include "TypeAST.hh"
+#include "TypeFieldAST.hh"
+
+class EnumDeclAST : public DeclAST {
+public:
+ // Constructors
+ EnumDeclAST(TypeAST* type_ast_ptr,
+ PairListAST* pairs_ptr,
+ Vector<TypeFieldAST*>* field_vec_ptr);
+
+ // Destructor
+ ~EnumDeclAST();
+
+ // Public Methods
+ virtual void generate();
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ EnumDeclAST(const EnumDeclAST& obj);
+ EnumDeclAST& operator=(const EnumDeclAST& obj);
+
+ // Data Members (m_ prefix)
+ TypeAST* m_type_ast_ptr;
+ Vector<TypeFieldAST*>* m_field_vec_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const EnumDeclAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const EnumDeclAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //EnumDeclAST_H
diff --git a/src/mem/slicc/ast/EnumExprAST.cc b/src/mem/slicc/ast/EnumExprAST.cc
new file mode 100644
index 000000000..90dc4bebf
--- /dev/null
+++ b/src/mem/slicc/ast/EnumExprAST.cc
@@ -0,0 +1,76 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * EnumExprAST.C
+ *
+ * Description: See EnumExprAST.h
+ *
+ * $Id: EnumExprAST.C,v 3.1 2003/07/10 18:08:06 milo Exp $
+ *
+ */
+
+#include "EnumExprAST.hh"
+
+EnumExprAST::EnumExprAST(TypeAST* type_ast_ptr,
+ string* value_ptr)
+ : ExprAST()
+{
+ assert(value_ptr != NULL);
+ assert(type_ast_ptr != NULL);
+ m_type_ast_ptr = type_ast_ptr;
+ m_value_ptr = value_ptr;
+}
+
+EnumExprAST::~EnumExprAST()
+{
+ delete m_type_ast_ptr;
+ delete m_value_ptr;
+}
+
+Type* EnumExprAST::generate(string& code) const
+{
+ Type* type_ptr = m_type_ast_ptr->lookupType();
+ code += type_ptr->cIdent() + "_" + (*m_value_ptr);
+
+ // Make sure the enumeration value exists
+ if (!type_ptr->enumExist(*m_value_ptr)) {
+ error("Type '" + m_type_ast_ptr->toString() + "' does not have enumeration '" + *m_value_ptr + "'");
+ }
+
+ // Return the proper type
+ return type_ptr;
+}
+
+void EnumExprAST::print(ostream& out) const
+{
+ string str;
+ str += m_type_ast_ptr->toString()+":"+(*m_value_ptr);
+ out << "[EnumExpr: " << str << "]";
+}
diff --git a/src/mem/slicc/ast/EnumExprAST.hh b/src/mem/slicc/ast/EnumExprAST.hh
new file mode 100644
index 000000000..492f9ac33
--- /dev/null
+++ b/src/mem/slicc/ast/EnumExprAST.hh
@@ -0,0 +1,85 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * EnumExprAST.h
+ *
+ * Description:
+ *
+ * $Id: EnumExprAST.h,v 3.2 2003/07/10 18:08:06 milo Exp $
+ *
+ */
+
+#ifndef EnumExprAST_H
+#define EnumExprAST_H
+
+#include "slicc_global.hh"
+#include "ExprAST.hh"
+#include "TypeAST.hh"
+
+
+class EnumExprAST : public ExprAST {
+public:
+ // Constructors
+ EnumExprAST(TypeAST* type_ast_ptr,
+ string* value_ptr);
+
+ // Destructor
+ ~EnumExprAST();
+
+ // Public Methods
+ Type* generate(string& code) const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ EnumExprAST(const EnumExprAST& obj);
+ EnumExprAST& operator=(const EnumExprAST& obj);
+
+ // Data Members (m_ prefix)
+ TypeAST* m_type_ast_ptr;
+ string* m_value_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const EnumExprAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const EnumExprAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //EnumExprAST_H
diff --git a/src/mem/slicc/ast/ExprAST.cc b/src/mem/slicc/ast/ExprAST.cc
new file mode 100644
index 000000000..e910d688a
--- /dev/null
+++ b/src/mem/slicc/ast/ExprAST.cc
@@ -0,0 +1,39 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ExprAST.C
+ *
+ * Description: See ExprAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "ExprAST.hh"
diff --git a/src/mem/slicc/ast/ExprAST.hh b/src/mem/slicc/ast/ExprAST.hh
new file mode 100644
index 000000000..0015cebaa
--- /dev/null
+++ b/src/mem/slicc/ast/ExprAST.hh
@@ -0,0 +1,84 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ExprAST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef EXPRAST_H
+#define EXPRAST_H
+
+#include "slicc_global.hh"
+#include "AST.hh"
+
+
+class ExprAST : public AST {
+public:
+ // Constructors
+ ExprAST() : AST() { }
+
+ // Destructor
+ virtual ~ExprAST() { }
+
+ // Public Methods
+ virtual Type* generate(string& code) const = 0;
+ virtual void findResources(Map<Var*, string>& resource_list) const {} // The default is no resources
+
+ // void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ // ExprAST(const ExprAST& obj);
+ // ExprAST& operator=(const ExprAST& obj);
+
+ // Data Members (m_ prefix)
+
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const ExprAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const ExprAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //EXPRAST_H
diff --git a/src/mem/slicc/ast/ExprStatementAST.cc b/src/mem/slicc/ast/ExprStatementAST.cc
new file mode 100644
index 000000000..5eb1ce7b4
--- /dev/null
+++ b/src/mem/slicc/ast/ExprStatementAST.cc
@@ -0,0 +1,73 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ExprStatementAST.C
+ *
+ * Description: See ExprStatementAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "ExprStatementAST.hh"
+
+ExprStatementAST::ExprStatementAST(ExprAST* expr_ptr)
+ : StatementAST()
+{
+ m_expr_ptr = expr_ptr;
+}
+
+ExprStatementAST::~ExprStatementAST()
+{
+ delete m_expr_ptr;
+}
+
+void ExprStatementAST::generate(string& code, Type* return_type_ptr) const
+{
+ code += indent_str();
+ Type* actual_type_ptr = m_expr_ptr->generate(code);
+ code += ";\n";
+
+ // The return type must be void
+ Type* expected_type_ptr = g_sym_table.getType("void");
+ if (expected_type_ptr != actual_type_ptr) {
+ m_expr_ptr->error("Non-void return must not be ignored, return type is '" + actual_type_ptr->toString() + "'");
+ }
+}
+
+void ExprStatementAST::findResources(Map<Var*, string>& resource_list) const
+{
+ m_expr_ptr->findResources(resource_list);
+}
+
+void ExprStatementAST::print(ostream& out) const
+{
+ out << "[ExprStatementAST: " << *m_expr_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/ExprStatementAST.hh b/src/mem/slicc/ast/ExprStatementAST.hh
new file mode 100644
index 000000000..925ded72a
--- /dev/null
+++ b/src/mem/slicc/ast/ExprStatementAST.hh
@@ -0,0 +1,83 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ExprStatementAST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef ExprStatementAST_H
+#define ExprStatementAST_H
+
+#include "slicc_global.hh"
+#include "StatementAST.hh"
+#include "ExprAST.hh"
+
+class ExprStatementAST : public StatementAST {
+public:
+ // Constructors
+ ExprStatementAST(ExprAST* expr_ptr);
+
+ // Destructor
+ ~ExprStatementAST();
+
+ // Public Methods
+ void generate(string& code, Type* return_type_ptr) const;
+ void findResources(Map<Var*, string>& resource_list) const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ ExprStatementAST(const ExprStatementAST& obj);
+ ExprStatementAST& operator=(const ExprStatementAST& obj);
+
+ // Data Members (m_ prefix)
+ ExprAST* m_expr_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const ExprStatementAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const ExprStatementAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //ExprStatementAST_H
diff --git a/src/mem/slicc/ast/FormalParamAST.cc b/src/mem/slicc/ast/FormalParamAST.cc
new file mode 100644
index 000000000..b7dacf8a1
--- /dev/null
+++ b/src/mem/slicc/ast/FormalParamAST.cc
@@ -0,0 +1,61 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * FormalParamAST.C
+ *
+ * Description: See FormalParamAST.h
+ *
+ * $Id: FormalParamAST.C,v 3.1 2000/10/05 21:22:20 milo Exp $
+ *
+ */
+
+#include "FormalParamAST.hh"
+#include "StatementAST.hh"
+#include "SymbolTable.hh"
+
+FormalParamAST::~FormalParamAST()
+{
+ delete m_ident_ptr;
+ delete m_type_ast_ptr;
+}
+
+Type* FormalParamAST::generate(string& code) const
+{
+ string param = "param_" + *m_ident_ptr;
+
+ Type* type_ptr = m_type_ast_ptr->lookupType();
+ code += type_ptr->cIdent();
+ code += " ";
+ code += param;
+
+ // Add to symbol table
+ g_sym_table.newSym(new Var(*m_ident_ptr, getLocation(), type_ptr, param, getPairs()));
+ return type_ptr;
+}
diff --git a/src/mem/slicc/ast/FormalParamAST.hh b/src/mem/slicc/ast/FormalParamAST.hh
new file mode 100644
index 000000000..0dc08fabe
--- /dev/null
+++ b/src/mem/slicc/ast/FormalParamAST.hh
@@ -0,0 +1,84 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * FormalParamAST.h
+ *
+ * Description:
+ *
+ * $Id: FormalParamAST.h,v 3.1 2001/12/12 01:00:15 milo Exp $
+ *
+ */
+
+#ifndef FORMALPARAMAST_H
+#define FORMALPARAMAST_H
+
+#include "slicc_global.hh"
+#include "TypeAST.hh"
+
+
+class FormalParamAST : public AST {
+public:
+ // Constructors
+ FormalParamAST(TypeAST* type_ast_ptr, string* ident_ptr) : AST() { m_type_ast_ptr = type_ast_ptr; m_ident_ptr = ident_ptr; }
+
+ // Destructor
+ ~FormalParamAST();
+
+ // Public Methods
+ Type* generate(string& code) const;
+ void print(ostream& out) const { out << "[FormalParamAST: " << *m_ident_ptr << "]"; }
+ string getName() const { return *m_ident_ptr; }
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ FormalParamAST(const FormalParamAST& obj);
+ FormalParamAST& operator=(const FormalParamAST& obj);
+
+ // Data Members (m_ prefix)
+ string* m_ident_ptr;
+ TypeAST* m_type_ast_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const FormalParamAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const FormalParamAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //FORMALPARAMAST_H
diff --git a/src/mem/slicc/ast/FuncCallExprAST.cc b/src/mem/slicc/ast/FuncCallExprAST.cc
new file mode 100644
index 000000000..845d0c8e3
--- /dev/null
+++ b/src/mem/slicc/ast/FuncCallExprAST.cc
@@ -0,0 +1,224 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * FuncCallExprAST.C
+ *
+ * Description: See FuncCallExprAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "FuncCallExprAST.hh"
+#include "SymbolTable.hh"
+
+FuncCallExprAST::FuncCallExprAST(string* proc_name_ptr,
+ Vector<ExprAST*>* expr_vec_ptr)
+ : ExprAST()
+{
+ m_proc_name_ptr = proc_name_ptr;
+ m_expr_vec_ptr = expr_vec_ptr;
+}
+
+FuncCallExprAST::~FuncCallExprAST()
+{
+ delete m_proc_name_ptr;
+ int size = m_expr_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ delete (*m_expr_vec_ptr)[i];
+ }
+ delete m_expr_vec_ptr;
+}
+
+Type* FuncCallExprAST::generate(string& code) const
+{
+ // DEBUG_EXPR is strange since it takes parameters of multiple types
+ if (*m_proc_name_ptr == "DEBUG_EXPR") {
+ // FIXME - check for number of parameters
+ code += "DEBUG_SLICC(MedPrio, \"";
+ code += (*m_expr_vec_ptr)[0]->getLocation().toString();
+ code += ": \", ";
+ (*m_expr_vec_ptr)[0]->generate(code);
+ code += ");\n";
+ Type* void_type_ptr = g_sym_table.getType("void");
+ assert(void_type_ptr != NULL);
+ return void_type_ptr;
+ }
+
+ // hack for adding comments to profileTransition
+ if (*m_proc_name_ptr == "APPEND_TRANSITION_COMMENT") {
+ // FIXME - check for number of parameters
+ code += "APPEND_TRANSITION_COMMENT(";
+ //code += (*m_expr_vec_ptr)[0]->getLocation().toString();
+ //code += ": \", ";
+ (*m_expr_vec_ptr)[0]->generate(code);
+ code += ");\n";
+ Type* void_type_ptr = g_sym_table.getType("void");
+ assert(void_type_ptr != NULL);
+ return void_type_ptr;
+ }
+
+ // Look up the function in the symbol table
+ Vector<string> code_vec;
+ Func* func_ptr = g_sym_table.getFunc(*m_proc_name_ptr);
+
+ // Check the types and get the code for the parameters
+ if (func_ptr == NULL) {
+ error("Unrecognized function name: '" + *m_proc_name_ptr + "'");
+ } else {
+ int size = m_expr_vec_ptr->size();
+
+ Vector<Type*> f = func_ptr->getParamTypes();
+
+ if (size != f.size() ) {
+ error("Wrong number of arguments passed to function : '" + *m_proc_name_ptr + "'");
+ }
+ else {
+ for(int i=0; i<size; i++) {
+
+ // Check the types of the parameter
+ string param_code;
+ Type* actual_type_ptr = (*m_expr_vec_ptr)[i]->generate(param_code);
+ Type* expected_type_ptr = func_ptr->getParamTypes()[i];
+ if (actual_type_ptr != expected_type_ptr) {
+ (*m_expr_vec_ptr)[i]->error("Type mismatch: expected: " + expected_type_ptr->toString() +
+ " actual: " + actual_type_ptr->toString());
+ }
+ code_vec.insertAtBottom(param_code);
+ }
+ }
+ }
+
+ /* OK, the semantics of "trigger" here is that, ports in the machine have
+ * different priorities. We always check the first port for doable
+ * transitions. If nothing/stalled, we pick one from the next port.
+ *
+ * One thing we have to be careful as the SLICC protocol writter is :
+ * If a port have two or more transitions can be picked from in one cycle,
+ * they must be independent. Otherwise, if transition A and B mean to be
+ * executed in sequential, and A get stalled, transition B can be issued
+ * erroneously. In practice, in most case, there is only one transition
+ * should be executed in one cycle for a given port. So as most of current
+ * protocols.
+ */
+
+ if (*m_proc_name_ptr == "trigger") {
+ code += indent_str() + "{\n";
+ code += indent_str() + " Address addr = ";
+ code += code_vec[1];
+ code += ";\n";
+ code += indent_str() + " TransitionResult result = doTransition(";
+ code += code_vec[0];
+ code += ", " + g_sym_table.getStateMachine()->toString() + "_getState(addr), addr";
+ if(CHECK_INVALID_RESOURCE_STALLS) {
+ // FIXME - the current assumption is that in_buffer_rank is declared in the msg buffer peek statement
+ code += ", in_buffer_rank";
+ }
+ code += ");\n";
+ code += indent_str() + " if (result == TransitionResult_Valid) {\n";
+ code += indent_str() + " counter++;\n";
+ code += indent_str() + " continue; // Check the first port again\n";
+ code += indent_str() + " }\n";
+ code += indent_str() + " if (result == TransitionResult_ResourceStall) {\n";
+ code += indent_str() + " g_eventQueue_ptr->scheduleEvent(this, 1);\n";
+ code += indent_str() + " // Cannot do anything with this transition, go check next doable transition (mostly likely of next port)\n";
+ code += indent_str() + " }\n";
+ code += indent_str() + "}\n";
+ } else if (*m_proc_name_ptr == "doubleTrigger") {
+ // NOTE: Use the doubleTrigger call with extreme caution
+ // the key to double trigger is the second event triggered cannot fail becuase the first event cannot be undone
+ assert(code_vec.size() == 4);
+ code += indent_str() + "{\n";
+ code += indent_str() + " Address addr1 = ";
+ code += code_vec[1];
+ code += ";\n";
+ code += indent_str() + " TransitionResult result1 = doTransition(";
+ code += code_vec[0];
+ code += ", " + g_sym_table.getStateMachine()->toString() + "_getState(addr1), addr1";
+ if(CHECK_INVALID_RESOURCE_STALLS) {
+ // FIXME - the current assumption is that in_buffer_rank is declared in the msg buffer peek statement
+ code += ", in_buffer_rank";
+ }
+ code += ");\n";
+ code += indent_str() + " if (result1 == TransitionResult_Valid) {\n";
+ code += indent_str() + " //this second event cannont fail because the first event already took effect\n";
+ code += indent_str() + " Address addr2 = ";
+ code += code_vec[3];
+ code += ";\n";
+ code += indent_str() + " TransitionResult result2 = doTransition(";
+ code += code_vec[2];
+ code += ", " + g_sym_table.getStateMachine()->toString() + "_getState(addr2), addr2";
+ if(CHECK_INVALID_RESOURCE_STALLS) {
+ // FIXME - the current assumption is that in_buffer_rank is declared in the msg buffer peek statement
+ code += ", in_buffer_rank";
+ }
+ code += ");\n";
+ code += indent_str() + " assert(result2 == TransitionResult_Valid); // ensure the event suceeded\n";
+ code += indent_str() + " counter++;\n";
+ code += indent_str() + " continue; // Check the first port again\n";
+ code += indent_str() + " }\n";
+ code += indent_str() + " if (result1 == TransitionResult_ResourceStall) {\n";
+ code += indent_str() + " g_eventQueue_ptr->scheduleEvent(this, 1);\n";
+ code += indent_str() + " // Cannot do anything with this transition, go check next doable transition (mostly likely of next port)\n";
+ code += indent_str() + " }\n";
+ code += indent_str() + "}\n";
+ } else if (*m_proc_name_ptr == "error") {
+ code += indent_str() + (*m_expr_vec_ptr)[0]->embedError(code_vec[0]) + "\n";
+ } else if (*m_proc_name_ptr == "assert") {
+ code += indent_str() + "if (ASSERT_FLAG && !(" + code_vec[0] + ")) {\n";
+ code += indent_str() + " " + (*m_expr_vec_ptr)[0]->embedError("\"assert failure\"") + "\n";
+ code += indent_str() + "}\n";
+ } else if (*m_proc_name_ptr == "continueProcessing") {
+ code += "counter++; continue; // Check the first port again";
+ } else {
+ // Normal function
+ code += "(";
+ // if the func is internal to the chip but not the machine then it can only be
+ // accessed through the chip pointer
+ if (!func_ptr->existPair("external") && !func_ptr->isInternalMachineFunc()) {
+ code += "m_chip_ptr->";
+ }
+ code += func_ptr->cIdent() + "(";
+ int size = code_vec.size();
+ for(int i=0; i<size; i++) {
+ if (i != 0) {
+ code += ", ";
+ }
+ code += code_vec[i];
+ }
+ code += "))";
+ }
+ return func_ptr->getReturnType();
+}
+
+void FuncCallExprAST::print(ostream& out) const
+{
+ out << "[FuncCallExpr: " << *m_proc_name_ptr << " " << *m_expr_vec_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/FuncCallExprAST.hh b/src/mem/slicc/ast/FuncCallExprAST.hh
new file mode 100644
index 000000000..edac97a04
--- /dev/null
+++ b/src/mem/slicc/ast/FuncCallExprAST.hh
@@ -0,0 +1,89 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * FuncCallExprAST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef FUNCCALLEXPRAST_H
+#define FUNCCALLEXPRAST_H
+
+#include "slicc_global.hh"
+#include "StatementAST.hh"
+#include "ExprAST.hh"
+
+
+// ProcGen decl
+class FuncGen;
+
+class FuncCallExprAST : public ExprAST {
+public:
+ // Constructors
+ FuncCallExprAST(string* proc_name_ptr,
+ Vector<ExprAST*>* expr_vec_ptr);
+
+ // Destructor
+ ~FuncCallExprAST();
+
+ // Public Methods
+ Type* generate(string& code) const;
+ void print(ostream& out) const;
+
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ FuncCallExprAST(const FuncCallExprAST& obj);
+ FuncCallExprAST& operator=(const FuncCallExprAST& obj);
+
+ // Data Members (m_ prefix)
+ string* m_proc_name_ptr;
+ Vector<ExprAST*>* m_expr_vec_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const FuncCallExprAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const FuncCallExprAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //FUNCCALLEXPRAST_H
diff --git a/src/mem/slicc/ast/FuncDeclAST.cc b/src/mem/slicc/ast/FuncDeclAST.cc
new file mode 100644
index 000000000..9f9dd1f8e
--- /dev/null
+++ b/src/mem/slicc/ast/FuncDeclAST.cc
@@ -0,0 +1,111 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * FuncDeclAST.C
+ *
+ * Description: See FuncDeclAST.h
+ *
+ * $Id: FuncDeclAST.C,v 3.4 2003/08/22 18:19:34 beckmann Exp $
+ *
+ */
+
+#include "FuncDeclAST.hh"
+#include "SymbolTable.hh"
+#include "main.hh"
+
+FuncDeclAST::FuncDeclAST(TypeAST* return_type_ast_ptr,
+ string* ident_ptr,
+ Vector<FormalParamAST*>* formal_vec_ptr,
+ PairListAST* pairs_ptr,
+ StatementListAST* statement_list_ptr)
+ : DeclAST(pairs_ptr)
+{
+ m_return_type_ast_ptr = return_type_ast_ptr;
+ m_ident_ptr = ident_ptr;
+ m_formal_vec_ptr = formal_vec_ptr;
+ m_statement_list_ptr = statement_list_ptr;
+}
+
+FuncDeclAST::~FuncDeclAST()
+{
+ delete m_return_type_ast_ptr;
+ delete m_ident_ptr;
+
+ int size = m_formal_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ delete (*m_formal_vec_ptr)[i];
+ }
+ delete m_formal_vec_ptr;
+ delete m_statement_list_ptr;
+}
+
+void FuncDeclAST::generate()
+{
+ Vector<Type*> type_vec;
+ Vector<string> param_vec;
+ Type* void_type_ptr = g_sym_table.getType("void");
+
+ // Generate definition code
+ g_sym_table.pushFrame();
+
+ // Lookup return type
+ Type* return_type_ptr = m_return_type_ast_ptr->lookupType();
+
+ // Generate function header
+ int size = m_formal_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ // Lookup parameter types
+ string ident;
+ Type* type_ptr = (*m_formal_vec_ptr)[i]->generate(ident);
+ type_vec.insertAtBottom(type_ptr);
+ param_vec.insertAtBottom(ident);
+ }
+
+ string body;
+ if (m_statement_list_ptr == NULL) {
+ getPairs().add("external", "yes");
+ } else {
+ m_statement_list_ptr->generate(body, return_type_ptr);
+ }
+ g_sym_table.popFrame();
+
+ StateMachine* machine_ptr = g_sym_table.getStateMachine();
+ if (machine_ptr != NULL) {
+ machine_ptr->addFunc(new Func(*m_ident_ptr, getLocation(), return_type_ptr, type_vec, param_vec, body, getPairs(), machine_ptr));
+ } else {
+ g_sym_table.newSym(new Func(*m_ident_ptr, getLocation(), return_type_ptr, type_vec, param_vec, body, getPairs(), machine_ptr));
+ }
+
+}
+
+void FuncDeclAST::print(ostream& out) const
+{
+ out << "[FuncDecl: " << *m_ident_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/FuncDeclAST.hh b/src/mem/slicc/ast/FuncDeclAST.hh
new file mode 100644
index 000000000..4096a8b66
--- /dev/null
+++ b/src/mem/slicc/ast/FuncDeclAST.hh
@@ -0,0 +1,90 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * FuncDeclAST.h
+ *
+ * Description:
+ *
+ * $Id: FuncDeclAST.h,v 3.2 2003/07/10 18:08:06 milo Exp $
+ *
+ */
+
+#ifndef FuncDeclAST_H
+#define FuncDeclAST_H
+
+#include "slicc_global.hh"
+#include "DeclAST.hh"
+#include "TypeFieldAST.hh"
+#include "TypeAST.hh"
+#include "FormalParamAST.hh"
+
+class FuncDeclAST : public DeclAST {
+public:
+ // Constructors
+ FuncDeclAST(TypeAST* return_type_ptr,
+ string* ident_ptr,
+ Vector<FormalParamAST*>* formal_vec_ptr,
+ PairListAST* pairs_ptr,
+ StatementListAST* statement_list_ptr);
+ // Destructor
+ ~FuncDeclAST();
+
+ // Public Methods
+ void generate();
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ FuncDeclAST(const FuncDeclAST& obj);
+ FuncDeclAST& operator=(const FuncDeclAST& obj);
+
+ // Data Members (m_ prefix)
+ string* m_ident_ptr;
+ TypeAST* m_return_type_ast_ptr;
+ Vector<FormalParamAST*>* m_formal_vec_ptr;
+ StatementListAST* m_statement_list_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const FuncDeclAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const FuncDeclAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //FuncDeclAST_H
diff --git a/src/mem/slicc/ast/IfStatementAST.cc b/src/mem/slicc/ast/IfStatementAST.cc
new file mode 100644
index 000000000..372b213d8
--- /dev/null
+++ b/src/mem/slicc/ast/IfStatementAST.cc
@@ -0,0 +1,98 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * IfStatementAST.C
+ *
+ * Description: See IfStatementAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "IfStatementAST.hh"
+
+IfStatementAST::IfStatementAST(ExprAST* cond_ptr,
+ StatementListAST* then_ptr,
+ StatementListAST* else_ptr)
+ : StatementAST()
+{
+ assert(cond_ptr != NULL);
+ assert(then_ptr != NULL);
+ m_cond_ptr = cond_ptr;
+ m_then_ptr = then_ptr;
+ m_else_ptr = else_ptr;
+}
+
+IfStatementAST::~IfStatementAST()
+{
+ delete m_cond_ptr;
+ delete m_then_ptr;
+ delete m_else_ptr;
+}
+
+
+void IfStatementAST::generate(string& code, Type* return_type_ptr) const
+{
+ Type* type_ptr;
+
+ // Conditional
+ code += indent_str() + "if (";
+ type_ptr = m_cond_ptr->generate(code);
+ if (type_ptr != g_sym_table.getType("bool")) {
+ m_cond_ptr->error("Condition of if statement must be boolean, type was '" + type_ptr->toString() + "'");
+ }
+ code += ") {\n";
+ // Then part
+ inc_indent();
+ m_then_ptr->generate(code, return_type_ptr);
+ dec_indent();
+ // Else part
+ if (m_else_ptr != NULL) {
+ code += indent_str() + "} else {\n";
+ inc_indent();
+ m_else_ptr->generate(code, return_type_ptr);
+ dec_indent();
+ }
+ code += indent_str() + "}\n"; // End scope
+}
+
+void IfStatementAST::findResources(Map<Var*, string>& resource_list) const
+{
+ // Take a worse case look at both paths
+ m_then_ptr->findResources(resource_list);
+ if (m_else_ptr != NULL) {
+ m_else_ptr->findResources(resource_list);
+ }
+}
+
+void IfStatementAST::print(ostream& out) const
+{
+ out << "[IfStatement: " << *m_cond_ptr << *m_then_ptr << *m_else_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/IfStatementAST.hh b/src/mem/slicc/ast/IfStatementAST.hh
new file mode 100644
index 000000000..bad7a286d
--- /dev/null
+++ b/src/mem/slicc/ast/IfStatementAST.hh
@@ -0,0 +1,89 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * IfStatementAST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef IFSTATEMENTAST_H
+#define IFSTATEMENTAST_H
+
+#include "slicc_global.hh"
+#include "ExprAST.hh"
+#include "StatementAST.hh"
+#include "StatementListAST.hh"
+
+
+class IfStatementAST : public StatementAST {
+public:
+ // Constructors
+ IfStatementAST(ExprAST* cond_ptr,
+ StatementListAST* then_ptr,
+ StatementListAST* else_ptr);
+
+ // Destructor
+ ~IfStatementAST();
+
+ // Public Methods
+ void generate(string& code, Type* return_type_ptr) const;
+ void findResources(Map<Var*, string>& resource_list) const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ IfStatementAST(const IfStatementAST& obj);
+ IfStatementAST& operator=(const IfStatementAST& obj);
+
+ // Data Members (m_ prefix)
+ ExprAST* m_cond_ptr;
+ StatementListAST* m_then_ptr;
+ StatementListAST* m_else_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const IfStatementAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const IfStatementAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //IFSTATEMENTAST_H
diff --git a/src/mem/slicc/ast/InPortDeclAST.cc b/src/mem/slicc/ast/InPortDeclAST.cc
new file mode 100644
index 000000000..6b13fec51
--- /dev/null
+++ b/src/mem/slicc/ast/InPortDeclAST.cc
@@ -0,0 +1,149 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * InPortDeclAST.C
+ *
+ * Description: See InPortDeclAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "InPortDeclAST.hh"
+#include "SymbolTable.hh"
+#include "Var.hh"
+
+InPortDeclAST::InPortDeclAST(string* ident_ptr,
+ TypeAST* msg_type_ptr,
+ ExprAST* var_expr_ptr,
+ PairListAST* pairs_ptr,
+ StatementListAST* statement_list_ptr)
+ : DeclAST(pairs_ptr)
+{
+ m_ident_ptr = ident_ptr;
+ m_msg_type_ptr = msg_type_ptr;
+ m_var_expr_ptr = var_expr_ptr;
+ m_statement_list_ptr = statement_list_ptr;
+ m_queue_type_ptr = new TypeAST(new string("InPort"));
+}
+
+InPortDeclAST::~InPortDeclAST()
+{
+ delete m_ident_ptr;
+ delete m_msg_type_ptr;
+ delete m_var_expr_ptr;
+ delete m_statement_list_ptr;
+ delete m_queue_type_ptr;
+}
+
+void InPortDeclAST::generate()
+{
+ string code;
+ Type* queue_type_ptr = m_var_expr_ptr->generate(code);
+ if (!queue_type_ptr->isInPort()) {
+ error("Inport queues must be of a type that has the 'inport' attribute. The type '" +
+ queue_type_ptr->toString() + "' does not have this attribute.");
+ }
+
+ Type* type_ptr = m_queue_type_ptr->lookupType();
+ Var* in_port_ptr = new Var(*m_ident_ptr, getLocation(), type_ptr, code, getPairs());
+ g_sym_table.newSym(in_port_ptr);
+
+ g_sym_table.pushFrame();
+ Vector<Type*> param_type_vec;
+
+ // Check for Event
+ type_ptr = g_sym_table.getType("Event");
+ if (type_ptr == NULL) {
+ error("in_port declarations require 'Event' enumeration to be defined");
+ }
+ param_type_vec.insertAtBottom(type_ptr);
+
+ // Check for Address
+ type_ptr = g_sym_table.getType("Address");
+ if (type_ptr == NULL) {
+ error("in_port declarations require 'Address' type to be defined");
+ }
+ param_type_vec.insertAtBottom(type_ptr);
+
+ // Add the trigger method - FIXME, this is a bit dirty
+ Map<string, string> pairs;
+ pairs.add("external", "yes");
+ Vector<string> string_vec;
+ g_sym_table.newSym(new Func("trigger", getLocation(), g_sym_table.getType("void"), param_type_vec, string_vec, string(""), pairs, NULL));
+
+ // Check for Event2
+ type_ptr = g_sym_table.getType("Event");
+ if (type_ptr == NULL) {
+ error("in_port declarations require 'Event' enumeration to be defined");
+ }
+ param_type_vec.insertAtBottom(type_ptr);
+
+ // Check for Address2
+ type_ptr = g_sym_table.getType("Address");
+ if (type_ptr == NULL) {
+ error("in_port declarations require 'Address' type to be defined");
+ }
+ param_type_vec.insertAtBottom(type_ptr);
+
+ // Add the doubleTrigger method - this hack supports tiggering two simulateous events
+ // The key is that the second transistion cannot fail because the first event cannot be undone
+ // therefore you must do some checks before calling double trigger to ensure that won't happen
+ g_sym_table.newSym(new Func("doubleTrigger", getLocation(), g_sym_table.getType("void"), param_type_vec, string_vec, string(""), pairs, NULL));
+
+ // Add the continueProcessing method - this hack supports messages that don't trigger events
+ Vector<Type*> empty_param_type_vec;
+ Vector<string> empty_string_vec;
+ g_sym_table.newSym(new Func("continueProcessing", getLocation(), g_sym_table.getType("void"), empty_param_type_vec, empty_string_vec, string(""), pairs, NULL));
+
+ if (m_statement_list_ptr != NULL) {
+ inc_indent();
+ inc_indent();
+ string code;
+ m_statement_list_ptr->generate(code, NULL);
+ in_port_ptr->addPair("c_code_in_port", code);
+ dec_indent();
+ dec_indent();
+ }
+ g_sym_table.popFrame();
+
+ // Add port to state machine
+ StateMachine* machine_ptr = g_sym_table.getStateMachine();
+ if (machine_ptr == NULL) {
+ error("InPort declaration not part of a machine.");
+ }
+ machine_ptr->addInPort(in_port_ptr);
+}
+
+
+void InPortDeclAST::print(ostream& out) const
+{
+ out << "[InPortDecl: " << *m_ident_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/InPortDeclAST.hh b/src/mem/slicc/ast/InPortDeclAST.hh
new file mode 100644
index 000000000..4f3c25f93
--- /dev/null
+++ b/src/mem/slicc/ast/InPortDeclAST.hh
@@ -0,0 +1,91 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * InPortDeclAST.h
+ *
+ * Description:
+ *
+ * $Id: InPortDeclAST.h,v 3.2 2003/07/10 18:08:06 milo Exp $
+ *
+ */
+
+#ifndef InPortDeclAST_H
+#define InPortDeclAST_H
+
+#include "slicc_global.hh"
+#include "DeclAST.hh"
+#include "StatementListAST.hh"
+#include "VarExprAST.hh"
+
+class InPortDeclAST : public DeclAST {
+public:
+ // Constructors
+ InPortDeclAST(string* ident_ptr,
+ TypeAST* msg_type_ptr,
+ ExprAST* var_expr_ptr,
+ PairListAST* pairs_ptr,
+ StatementListAST* statement_list_ptr);
+
+ // Destructor
+ ~InPortDeclAST();
+
+ // Public Methods
+ void generate();
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ InPortDeclAST(const InPortDeclAST& obj);
+ InPortDeclAST& operator=(const InPortDeclAST& obj);
+
+ // Data Members (m_ prefix)
+ string* m_ident_ptr;
+ TypeAST* m_msg_type_ptr;
+ ExprAST* m_var_expr_ptr;
+ StatementListAST* m_statement_list_ptr;
+ TypeAST* m_queue_type_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const InPortDeclAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const InPortDeclAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //InPortDeclAST_H
diff --git a/src/mem/slicc/ast/InfixOperatorExprAST.cc b/src/mem/slicc/ast/InfixOperatorExprAST.cc
new file mode 100644
index 000000000..ba3aa1245
--- /dev/null
+++ b/src/mem/slicc/ast/InfixOperatorExprAST.cc
@@ -0,0 +1,121 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * InfixOperatorExprAST.C
+ *
+ * Description: See InfixOperatorExprAST.h
+ *
+ * $Id: InfixOperatorExprAST.C,v 3.2 2004/01/31 20:46:15 milo Exp $
+ *
+ */
+
+#include "InfixOperatorExprAST.hh"
+
+InfixOperatorExprAST::InfixOperatorExprAST(ExprAST* left_ptr,
+ string* op_ptr,
+ ExprAST* right_ptr)
+ : ExprAST()
+{
+ m_left_ptr = left_ptr;
+ m_op_ptr = op_ptr;
+ m_right_ptr = right_ptr;
+}
+
+InfixOperatorExprAST::~InfixOperatorExprAST()
+{
+ delete m_left_ptr;
+ delete m_op_ptr;
+ delete m_right_ptr;
+}
+
+Type* InfixOperatorExprAST::generate(string& code) const
+{
+ code += "(";
+ Type* left_type_ptr = m_left_ptr->generate(code);
+ code += " " + *m_op_ptr + " ";
+ Type* right_type_ptr = m_right_ptr->generate(code);
+ code += ")";
+
+ string inputs, output;
+ // Figure out what the input and output types should be
+ if ((*m_op_ptr == "==" ||
+ *m_op_ptr == "!=")) {
+ output = "bool";
+ if (left_type_ptr != right_type_ptr) {
+ error("Type mismatch: left & right operand of operator '" + *m_op_ptr +
+ "' must be the same type." +
+ "left: '" + left_type_ptr->toString() +
+ "', right: '" + right_type_ptr->toString() + "'");
+ }
+ } else {
+ if ((*m_op_ptr == "&&" ||
+ *m_op_ptr == "||")) {
+ // boolean inputs and output
+ inputs = "bool";
+ output = "bool";
+ } else if ((*m_op_ptr == "==" ||
+ *m_op_ptr == "!=" ||
+ *m_op_ptr == ">=" ||
+ *m_op_ptr == "<=" ||
+ *m_op_ptr == ">" ||
+ *m_op_ptr == "<")) {
+ // Integer inputs, boolean output
+ inputs = "int";
+ output = "bool";
+ } else {
+ // integer inputs and output
+ inputs = "int";
+ output = "int";
+ }
+
+ Type* inputs_type = g_sym_table.getType(inputs);
+
+ if (inputs_type != left_type_ptr) {
+ m_left_ptr->error("Type mismatch: left operand of operator '" + *m_op_ptr +
+ "' expects input type '" + inputs + "', actual was " + left_type_ptr->toString() + "'");
+ }
+
+ if (inputs_type != right_type_ptr) {
+ m_right_ptr->error("Type mismatch: right operand of operator '" + *m_op_ptr +
+ "' expects input type '" + inputs + "', actual was '" + right_type_ptr->toString() + "'");
+ }
+ }
+
+ // All is well
+ Type* output_type = g_sym_table.getType(output);
+ return output_type;
+}
+
+
+void InfixOperatorExprAST::print(ostream& out) const
+{
+ out << "[InfixExpr: " << *m_left_ptr
+ << *m_op_ptr << *m_right_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/InfixOperatorExprAST.hh b/src/mem/slicc/ast/InfixOperatorExprAST.hh
new file mode 100644
index 000000000..451156f6d
--- /dev/null
+++ b/src/mem/slicc/ast/InfixOperatorExprAST.hh
@@ -0,0 +1,85 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * InfixOperatorExprAST.h
+ *
+ * Description:
+ *
+ * $Id: InfixOperatorExprAST.h,v 3.1 2001/12/12 01:00:19 milo Exp $
+ *
+ */
+
+#ifndef INFIXOPERATOREXPRAST_H
+#define INFIXOPERATOREXPRAST_H
+
+#include "slicc_global.hh"
+#include "ExprAST.hh"
+
+
+class InfixOperatorExprAST : public ExprAST {
+public:
+ // Constructors
+ InfixOperatorExprAST(ExprAST* left_ptr, string* op_ptr, ExprAST* right_ptr);
+
+ // Destructor
+ ~InfixOperatorExprAST();
+
+ // Public Methods
+ Type* generate(string& code) const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ InfixOperatorExprAST(const InfixOperatorExprAST& obj);
+ InfixOperatorExprAST& operator=(const InfixOperatorExprAST& obj);
+
+ // Data Members (m_ prefix)
+ ExprAST* m_left_ptr;
+ string* m_op_ptr;
+ ExprAST* m_right_ptr;
+
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const InfixOperatorExprAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const InfixOperatorExprAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //INFIXOPERATOREXPRAST_H
diff --git a/src/mem/slicc/ast/LiteralExprAST.cc b/src/mem/slicc/ast/LiteralExprAST.cc
new file mode 100644
index 000000000..b3b415bc9
--- /dev/null
+++ b/src/mem/slicc/ast/LiteralExprAST.cc
@@ -0,0 +1,55 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * LiteralExprAST.C
+ *
+ * Description: See LiteralExprAST.h
+ *
+ * $Id: LiteralExprAST.C,v 3.1 2002/10/22 21:27:52 milo Exp $
+ *
+ */
+
+#include "LiteralExprAST.hh"
+
+Type* LiteralExprAST::generate(string& code) const
+{
+ if (m_type == "string") {
+ code += "(\"" + *m_lit_ptr + "\")";
+ } else {
+ code += "(" + *m_lit_ptr + ")";
+ }
+
+ Type* type_ptr = g_sym_table.getType(m_type);
+ if (type_ptr == NULL) {
+ // Can't find the type
+ error("Internal: can't primitive type '" + m_type + "'");
+ }
+ return type_ptr;
+}
diff --git a/src/mem/slicc/ast/LiteralExprAST.hh b/src/mem/slicc/ast/LiteralExprAST.hh
new file mode 100644
index 000000000..be433a1e3
--- /dev/null
+++ b/src/mem/slicc/ast/LiteralExprAST.hh
@@ -0,0 +1,83 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * LiteralExprAST.h
+ *
+ * Description:
+ *
+ * $Id: LiteralExprAST.h,v 3.1 2001/12/12 01:00:20 milo Exp $
+ *
+ */
+
+#ifndef LITERALEXPRAST_H
+#define LITERALEXPRAST_H
+
+#include "slicc_global.hh"
+#include "ExprAST.hh"
+
+
+class LiteralExprAST : public ExprAST {
+public:
+ // Constructors
+ LiteralExprAST(string* lit_ptr, string type) : ExprAST() { m_lit_ptr = lit_ptr; m_type = type; }
+
+ // Destructor
+ ~LiteralExprAST() { delete m_lit_ptr; }
+
+ // Public Methods
+ Type* generate(string& code) const;
+ void print(ostream& out) const { out << "[Literal: " << *m_lit_ptr << "]"; }
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ LiteralExprAST(const LiteralExprAST& obj);
+ LiteralExprAST& operator=(const LiteralExprAST& obj);
+
+ // Data Members (m_ prefix)
+ string* m_lit_ptr;
+ string m_type;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const LiteralExprAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const LiteralExprAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //LITERALEXPRAST_H
diff --git a/src/mem/slicc/ast/Location.cc b/src/mem/slicc/ast/Location.cc
new file mode 100644
index 000000000..91b8dbd28
--- /dev/null
+++ b/src/mem/slicc/ast/Location.cc
@@ -0,0 +1,83 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Location.C
+ *
+ * Description: See Location.h
+ *
+ * $Id: Location.C,v 3.3 2004/05/30 22:19:02 kmoore Exp $
+ *
+ */
+
+#include "Location.hh"
+
+int g_line_number = 0;
+string g_file_name("");
+
+Location::Location()
+{
+ m_file_name = g_file_name;
+ m_line_number = g_line_number;
+
+ ostringstream sstr;
+ sstr << getLineNumber();
+ m_line_number_str = sstr.str();
+}
+
+void Location::error(string err_msg) const
+{
+ cerr << endl;
+ cerr << toString() << ": Error: " << err_msg << endl;
+ exit(1);
+}
+
+string Location::embedError(string err_msg) const
+{
+ string code;
+ code += "cerr << \"Runtime Error at ";
+ code += toString() + ", Ruby Time: \" << ";
+ code += "g_eventQueue_ptr->getTime() << \": \" << ";
+ code += err_msg;
+ code += " << \", PID: \" << getpid() << endl;\n";
+ code += "char c; cerr << \"press return to continue.\" << endl; cin.get(c); abort();\n";
+
+ return code;
+}
+
+void Location::warning(string err_msg) const
+{
+ cerr << toString() << ": Warning: "
+ << err_msg << endl;
+}
+
+string Location::toString() const
+{
+ return m_file_name + ":" + m_line_number_str;
+}
diff --git a/src/mem/slicc/ast/Location.hh b/src/mem/slicc/ast/Location.hh
new file mode 100644
index 000000000..6cf2df5a4
--- /dev/null
+++ b/src/mem/slicc/ast/Location.hh
@@ -0,0 +1,93 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Location.h
+ *
+ * Description:
+ *
+ * $Id: Location.h,v 3.1 2001/12/12 01:00:20 milo Exp $
+ *
+ */
+
+#ifndef LOCATION_H
+#define LOCATION_H
+
+#include "slicc_global.hh"
+
+extern int g_line_number;
+extern string g_file_name;
+
+class Location {
+public:
+ // Constructors
+ Location();
+
+ // Destructor
+ //~Location();
+
+ // Public Methods
+
+ void print(ostream& out) const;
+ void error(string err_msg) const;
+ string embedError(string err_msg) const;
+ void warning(string err_msg) const;
+ string toString() const;
+
+private:
+ // Private Methods
+ const string& getFileName() const { return m_file_name; }
+ int getLineNumber() const { return m_line_number; }
+ string getLineNumberStr() const { return m_line_number_str; }
+
+ // Private copy constructor and assignment operator
+ //Location(const Location& obj);
+ //Location& operator=(const Location& obj);
+
+ // Data Members (m_ prefix)
+ string m_file_name;
+ int m_line_number;
+ string m_line_number_str;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Location& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Location& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //LOCATION_H
diff --git a/src/mem/slicc/ast/MachineAST.cc b/src/mem/slicc/ast/MachineAST.cc
new file mode 100644
index 000000000..3ee97bc38
--- /dev/null
+++ b/src/mem/slicc/ast/MachineAST.cc
@@ -0,0 +1,96 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MachineAST.C
+ *
+ * Description: See MachineAST.h
+ *
+ * $Id: MachineAST.C,v 3.1 2003/03/17 01:54:25 xu Exp $
+ *
+ */
+
+#include "MachineAST.hh"
+#include "SymbolTable.hh"
+
+MachineAST::MachineAST(string* ident_ptr,
+ PairListAST* pairs_ptr,
+ DeclListAST* decl_list_ptr)
+
+ : DeclAST(pairs_ptr)
+{
+ m_ident_ptr = ident_ptr;
+ m_pairs_ptr = pairs_ptr;
+ m_decl_list_ptr = decl_list_ptr;
+}
+
+MachineAST::~MachineAST()
+{
+ delete m_ident_ptr;
+ delete m_pairs_ptr;
+ delete m_decl_list_ptr;
+}
+
+void MachineAST::generate()
+{
+ StateMachine* machine_ptr;
+
+ // Make a new frame
+ g_sym_table.pushFrame();
+
+ // Create a new machine
+ machine_ptr = new StateMachine(*m_ident_ptr, getLocation(), getPairs());
+ g_sym_table.newCurrentMachine(machine_ptr);
+
+ // Generate code for all the internal decls
+ m_decl_list_ptr->generate();
+
+ // Build the transition table
+ machine_ptr->buildTable();
+
+ // Pop the frame
+ g_sym_table.popFrame();
+}
+
+void MachineAST::findMachines()
+{
+ // Add to MachineType enumeration
+ Type* type_ptr = g_sym_table.getType("MachineType");
+ if (!type_ptr->enumAdd(*m_ident_ptr, m_pairs_ptr->getPairs())) {
+ error("Duplicate machine name: " + type_ptr->toString() + ":" + *m_ident_ptr);
+ }
+
+ // Generate code for all the internal decls
+ m_decl_list_ptr->findMachines();
+}
+
+void MachineAST::print(ostream& out) const
+{
+ out << "[Machine: " << *m_ident_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/MachineAST.hh b/src/mem/slicc/ast/MachineAST.hh
new file mode 100644
index 000000000..c05bcc5f2
--- /dev/null
+++ b/src/mem/slicc/ast/MachineAST.hh
@@ -0,0 +1,88 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ActionDeclAST.h
+ *
+ * Description:
+ *
+ * $Id: MachineAST.h,v 3.2 2003/07/10 18:08:06 milo Exp $
+ *
+ */
+
+#ifndef MachineAST_H
+#define MachineAST_H
+
+#include "slicc_global.hh"
+#include "DeclAST.hh"
+#include "DeclListAST.hh"
+#include "StateMachine.hh"
+
+class MachineAST : public DeclAST {
+public:
+ // Constructors
+ MachineAST(string* ident_ptr,
+ PairListAST* pairs_ptr,
+ DeclListAST* decl_list_ptr);
+
+ // Destructor
+ ~MachineAST();
+
+ // Public Methods
+ void print(ostream& out) const;
+ void generate();
+ void findMachines();
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ MachineAST(const MachineAST& obj);
+ MachineAST& operator=(const MachineAST& obj);
+
+ // Data Members (m_ prefix)
+ string* m_ident_ptr;
+ DeclListAST* m_decl_list_ptr;
+ PairListAST* m_pairs_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const MachineAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const MachineAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //MachineAST_H
diff --git a/src/mem/slicc/ast/MemberExprAST.cc b/src/mem/slicc/ast/MemberExprAST.cc
new file mode 100644
index 000000000..c4e9ce24b
--- /dev/null
+++ b/src/mem/slicc/ast/MemberExprAST.cc
@@ -0,0 +1,72 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * FieldExprAST.C
+ *
+ * Description: See FieldExprAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "MemberExprAST.hh"
+
+MemberExprAST::MemberExprAST(ExprAST* expr_ast_ptr, string* field_ptr)
+ : ExprAST()
+{
+ m_expr_ast_ptr = expr_ast_ptr;
+ m_field_ptr = field_ptr;
+}
+
+MemberExprAST::~MemberExprAST()
+{
+ delete m_expr_ast_ptr;
+ delete m_field_ptr;
+}
+
+Type* MemberExprAST::generate(string& code) const
+{
+ code += "(";
+ Type* type_ptr = m_expr_ast_ptr->generate(code);
+ code += ").m_" + (*m_field_ptr);
+
+ // Verify that this is a valid field name for this type
+ if (!type_ptr->dataMemberExist(*m_field_ptr)) {
+ error("Invalid object field: Type '" + type_ptr->toString() + "' does not have data member " + *m_field_ptr);
+ }
+
+ // Return the type of the field
+ return type_ptr->dataMemberType(*m_field_ptr);
+}
+
+void MemberExprAST::print(ostream& out) const
+{
+ out << "[MemberExprAST: " << *m_expr_ast_ptr << "." << *m_field_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/MemberExprAST.hh b/src/mem/slicc/ast/MemberExprAST.hh
new file mode 100644
index 000000000..c5fd3d0dd
--- /dev/null
+++ b/src/mem/slicc/ast/MemberExprAST.hh
@@ -0,0 +1,83 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MemberExprAST.h
+ *
+ * Description:
+ *
+ * $Id: MemberExprAST.h,v 3.1 2001/12/12 01:00:21 milo Exp $
+ *
+ */
+
+#ifndef MemberExprAST_H
+#define MemberExprAST_H
+
+#include "slicc_global.hh"
+#include "ExprAST.hh"
+
+
+class MemberExprAST : public ExprAST {
+public:
+ // Constructors
+ MemberExprAST(ExprAST* expr_ast_ptr, string* field_ptr);
+
+ // Destructor
+ ~MemberExprAST();
+
+ // Public Methods
+ Type* generate(string& code) const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ MemberExprAST(const MemberExprAST& obj);
+ MemberExprAST& operator=(const MemberExprAST& obj);
+
+ // Data Members (m_ prefix)
+ ExprAST* m_expr_ast_ptr;
+ string* m_field_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const MemberExprAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const MemberExprAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //MemberExprAST_H
diff --git a/src/mem/slicc/ast/MethodCallExprAST.cc b/src/mem/slicc/ast/MethodCallExprAST.cc
new file mode 100644
index 000000000..fe6505a1a
--- /dev/null
+++ b/src/mem/slicc/ast/MethodCallExprAST.cc
@@ -0,0 +1,150 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MethodCallExprAST.C
+ *
+ * Description: See MethodCallExprAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "MethodCallExprAST.hh"
+
+MethodCallExprAST::MethodCallExprAST(ExprAST* obj_expr_ptr,
+ string* proc_name_ptr,
+ Vector<ExprAST*>* expr_vec_ptr)
+ : ExprAST()
+{
+ m_obj_expr_ptr = obj_expr_ptr;
+ m_type_ptr = NULL;
+ m_proc_name_ptr = proc_name_ptr;
+ m_expr_vec_ptr = expr_vec_ptr;
+}
+
+MethodCallExprAST::MethodCallExprAST(TypeAST* type_ptr,
+ string* proc_name_ptr,
+ Vector<ExprAST*>* expr_vec_ptr)
+ : ExprAST()
+{
+ m_obj_expr_ptr = NULL;
+ m_type_ptr = type_ptr;
+ m_proc_name_ptr = proc_name_ptr;
+ m_expr_vec_ptr = expr_vec_ptr;
+}
+
+MethodCallExprAST::~MethodCallExprAST()
+{
+ delete m_obj_expr_ptr;
+ delete m_type_ptr;
+ delete m_proc_name_ptr;
+ int size = m_expr_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ delete (*m_expr_vec_ptr)[i];
+ }
+ delete m_expr_vec_ptr;
+}
+
+Type* MethodCallExprAST::generate(string& code) const
+{
+ Type* obj_type_ptr = NULL;
+
+ if(m_obj_expr_ptr) {
+ // member method call
+ code += "((";
+ obj_type_ptr = m_obj_expr_ptr->generate(code);
+
+ code += ").";
+ } else if (m_type_ptr) {
+ // class method call
+ code += "(" + m_type_ptr->toString() + "::";
+ obj_type_ptr = m_type_ptr->lookupType();
+ } else {
+ // impossible
+ assert(0);
+ }
+
+ Vector <Type*> paramTypes;
+
+ // generate code
+ int actual_size = m_expr_vec_ptr->size();
+ code += (*m_proc_name_ptr) + "(";
+ for(int i=0; i<actual_size; i++) {
+ if (i != 0) {
+ code += ", ";
+ }
+ // Check the types of the parameter
+ Type* actual_type_ptr = (*m_expr_vec_ptr)[i]->generate(code);
+ paramTypes.insertAtBottom(actual_type_ptr);
+ }
+ code += "))";
+
+ string methodId = obj_type_ptr->methodId(*m_proc_name_ptr, paramTypes);
+
+ // Verify that this is a method of the object
+ if (!obj_type_ptr->methodExist(methodId)) {
+ error("Invalid method call: Type '" + obj_type_ptr->toString() + "' does not have a method '" + methodId + "'");
+ }
+
+ int expected_size = obj_type_ptr->methodParamType(methodId).size();
+ if (actual_size != expected_size) {
+ // Right number of parameters
+ ostringstream err;
+ err << "Wrong number of parameters for function name: '" << *m_proc_name_ptr << "'";
+ err << ", expected: ";
+ err << expected_size;
+ err << ", actual: ";
+ err << actual_size;
+ error(err.str());
+ }
+
+ for(int i=0; i<actual_size; i++) {
+ // Check the types of the parameter
+ Type* actual_type_ptr = paramTypes[i];
+ Type* expected_type_ptr = obj_type_ptr->methodParamType(methodId)[i];
+ if (actual_type_ptr != expected_type_ptr) {
+ (*m_expr_vec_ptr)[i]->error("Type mismatch: expected: " + expected_type_ptr->toString() +
+ " actual: " + actual_type_ptr->toString());
+ }
+ }
+
+ // Return the return type of the method
+ return obj_type_ptr->methodReturnType(methodId);
+}
+
+void MethodCallExprAST::findResources(Map<Var*, string>& resource_list) const
+{
+
+}
+
+void MethodCallExprAST::print(ostream& out) const
+{
+ out << "[MethodCallExpr: " << *m_proc_name_ptr << *m_obj_expr_ptr << " " << *m_expr_vec_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/MethodCallExprAST.hh b/src/mem/slicc/ast/MethodCallExprAST.hh
new file mode 100644
index 000000000..22766e922
--- /dev/null
+++ b/src/mem/slicc/ast/MethodCallExprAST.hh
@@ -0,0 +1,93 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MethodCallExprAST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef MethodCallExprAST_H
+#define MethodCallExprAST_H
+
+#include "slicc_global.hh"
+#include "StatementAST.hh"
+#include "ExprAST.hh"
+#include "TypeAST.hh"
+
+class MethodCallExprAST : public ExprAST {
+public:
+ // Constructors
+ MethodCallExprAST(ExprAST* m_obj_expr_ptr,
+ string* proc_name_ptr,
+ Vector<ExprAST*>* expr_vec_ptr);
+
+ MethodCallExprAST(TypeAST* type_ptr,
+ string* proc_name_ptr,
+ Vector<ExprAST*>* expr_vec_ptr);
+
+ // Destructor
+ ~MethodCallExprAST();
+
+ // Public Methods
+ Type* generate(string& code) const;
+ void findResources(Map<Var*, string>& resource_list) const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ MethodCallExprAST(const MethodCallExprAST& obj);
+ MethodCallExprAST& operator=(const MethodCallExprAST& obj);
+
+ // Data Members (m_ prefix)
+ ExprAST* m_obj_expr_ptr;
+ TypeAST* m_type_ptr;
+ string* m_proc_name_ptr;
+ Vector<ExprAST*>* m_expr_vec_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const MethodCallExprAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const MethodCallExprAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif // MethodCallExprAST_H
diff --git a/src/mem/slicc/ast/ObjDeclAST.cc b/src/mem/slicc/ast/ObjDeclAST.cc
new file mode 100644
index 000000000..106c18cac
--- /dev/null
+++ b/src/mem/slicc/ast/ObjDeclAST.cc
@@ -0,0 +1,148 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ObjDeclAST.C
+ *
+ * Description: See ObjDeclAST.h
+ *
+ * $Id: ObjDeclAST.C,v 3.13 2004/06/24 15:56:14 beckmann Exp $
+ *
+ */
+
+#include "ObjDeclAST.hh"
+#include "SymbolTable.hh"
+#include "main.hh"
+
+ObjDeclAST::ObjDeclAST(TypeAST* type_ptr,
+ string* ident_ptr,
+ PairListAST* pairs_ptr)
+ : DeclAST(pairs_ptr)
+{
+ m_type_ptr = type_ptr;
+ m_ident_ptr = ident_ptr;
+}
+
+ObjDeclAST::~ObjDeclAST()
+{
+ delete m_type_ptr;
+ delete m_ident_ptr;
+}
+
+void ObjDeclAST::generate()
+{
+
+ bool machineComponentSym = false;
+
+ getPairs().add("chip_object", "yes");
+
+ string c_code;
+
+
+ if (getPairs().exist("hack")) {
+ warning("'hack=' is now deprecated");
+ }
+
+ if (getPairs().exist("network")) {
+ if (!getPairs().exist("virtual_network")) {
+ error("Network queues require a 'virtual_network' attribute.");
+ }
+ }
+
+ Type* type_ptr = m_type_ptr->lookupType();
+ if (type_ptr->isBuffer()) {
+ if (!getPairs().exist("ordered")) {
+ error("Buffer object declarations require an 'ordered' attribute.");
+ }
+ }
+
+ if (getPairs().exist("ordered")) {
+ string value = getPairs().lookup("ordered");
+ if (value != "true" && value != "false") {
+ error("The 'ordered' attribute must be 'true' or 'false'.");
+ }
+ }
+
+ if (getPairs().exist("random")) {
+ string value = getPairs().lookup("random");
+ if (value != "true" && value != "false") {
+ error("The 'random' attribute must be 'true' or 'false'.");
+ }
+ }
+
+ string machine;
+ if (g_sym_table.getStateMachine() != NULL) {
+ machine = g_sym_table.getStateMachine()->getIdent() + "_";
+ }
+
+ // FIXME : should all use accessors here to avoid public member variables
+ if (*m_ident_ptr == "id") {
+ c_code = "m_chip_ptr->getID()";
+ } else if (*m_ident_ptr == "version") {
+ c_code = "m_version";
+ } else if (*m_ident_ptr == "machineID") {
+ c_code = "m_machineID";
+ } else if (*m_ident_ptr == "sequencer") {
+ c_code = "*(dynamic_cast<"+m_type_ptr->toString()+"*>(m_chip_ptr->getSequencer(m_version)))";
+ machineComponentSym = true;
+ } /*else if (*m_ident_ptr == "xfdr_record_mgr") {
+ c_code = "*(dynamic_cast<"+m_type_ptr->toString()+"*>(m_chip_ptr->getXfdrManager(m_version)))";
+ machineComponentSym = true;
+ } */else if (// getPairs().exist("network") || (m_type_ptr->lookupType()->existPair("cache"))
+// || (m_type_ptr->lookupType()->existPair("tbe")) ||
+// (m_type_ptr->lookupType()->existPair("newtbe")) ||
+// (m_type_ptr->lookupType()->existPair("timer")) ||
+// (m_type_ptr->lookupType()->existPair("dir")) ||
+// (m_type_ptr->lookupType()->existPair("persistent")) ||
+// (m_type_ptr->lookupType()->existPair("filter")) ||
+// (getPairs().exist("trigger_queue"))
+ getPairs().exist("no_vector")) {
+ c_code = "(*(m_chip_ptr->m_" + machine + *m_ident_ptr + "_ptr))";
+ machineComponentSym = true;
+ } else {
+ c_code = "(*(m_chip_ptr->m_" + machine + *m_ident_ptr + "_vec[m_version]))";
+ machineComponentSym = true;
+ }
+
+ Var* v = new Var(*m_ident_ptr, getLocation(), type_ptr, c_code,
+ getPairs(), g_sym_table.getStateMachine());
+
+ g_sym_table.newSym(v);
+
+ // used to cheat-- that is, access components in other machines
+ if (machineComponentSym) {
+ g_sym_table.newMachComponentSym(v);
+ }
+
+}
+
+void ObjDeclAST::print(ostream& out) const
+{
+ out << "[ObjDecl: " << *m_ident_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/ObjDeclAST.hh b/src/mem/slicc/ast/ObjDeclAST.hh
new file mode 100644
index 000000000..2d4ac9714
--- /dev/null
+++ b/src/mem/slicc/ast/ObjDeclAST.hh
@@ -0,0 +1,86 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ObjDeclAST.h
+ *
+ * Description:
+ *
+ * $Id: ObjDeclAST.h,v 3.2 2003/07/10 18:08:06 milo Exp $
+ *
+ */
+
+#ifndef ObjDeclAST_H
+#define ObjDeclAST_H
+
+#include "slicc_global.hh"
+#include "DeclAST.hh"
+#include "TypeFieldAST.hh"
+#include "TypeAST.hh"
+
+class ObjDeclAST : public DeclAST {
+public:
+ // Constructors
+ ObjDeclAST(TypeAST* type_ptr,
+ string* ident_ptr,
+ PairListAST* pairs_ptr);
+
+ // Destructor
+ ~ObjDeclAST();
+
+ // Public Methods
+ void generate();
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ ObjDeclAST(const ObjDeclAST& obj);
+ ObjDeclAST& operator=(const ObjDeclAST& obj);
+
+ // Data Members (m_ prefix)
+ string* m_ident_ptr;
+ TypeAST* m_type_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const ObjDeclAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const ObjDeclAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //ObjDeclAST_H
diff --git a/src/mem/slicc/ast/OutPortDeclAST.cc b/src/mem/slicc/ast/OutPortDeclAST.cc
new file mode 100644
index 000000000..1aa0363aa
--- /dev/null
+++ b/src/mem/slicc/ast/OutPortDeclAST.cc
@@ -0,0 +1,79 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * OutPortDeclAST.C
+ *
+ * Description: See OutPortDeclAST.h
+ *
+ * $Id: OutPortDeclAST.C,v 3.3 2004/02/02 22:37:51 milo Exp $
+ *
+ */
+
+#include "OutPortDeclAST.hh"
+#include "SymbolTable.hh"
+
+OutPortDeclAST::OutPortDeclAST(string* ident_ptr,
+ TypeAST* msg_type_ptr,
+ ExprAST* var_expr_ptr,
+ PairListAST* pairs_ptr)
+ : DeclAST(pairs_ptr)
+{
+ m_ident_ptr = ident_ptr;
+ m_msg_type_ptr = msg_type_ptr;
+ m_var_expr_ptr = var_expr_ptr;
+ m_queue_type_ptr = new TypeAST(new string("OutPort"));
+}
+
+OutPortDeclAST::~OutPortDeclAST()
+{
+ delete m_ident_ptr;
+ delete m_msg_type_ptr;
+ delete m_var_expr_ptr;
+ delete m_queue_type_ptr;
+}
+
+void OutPortDeclAST::generate()
+{
+ string code;
+ Type* queue_type_ptr = m_var_expr_ptr->generate(code);
+ if (!queue_type_ptr->isOutPort()) {
+ error("Outport queues must be of a type that has the 'outport' attribute. The type '" +
+ queue_type_ptr->toString() + "' does not have this attribute.");
+ }
+
+ Type* type_ptr = m_queue_type_ptr->lookupType();
+ g_sym_table.newSym(new Var(*m_ident_ptr, getLocation(), type_ptr, code, getPairs()));
+}
+
+
+void OutPortDeclAST::print(ostream& out) const
+{
+ out << "[OutPortDecl: " << *m_ident_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/OutPortDeclAST.hh b/src/mem/slicc/ast/OutPortDeclAST.hh
new file mode 100644
index 000000000..3fcd755ef
--- /dev/null
+++ b/src/mem/slicc/ast/OutPortDeclAST.hh
@@ -0,0 +1,89 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * OutPortDeclAST.h
+ *
+ * Description:
+ *
+ * $Id: OutPortDeclAST.h,v 3.2 2003/07/10 18:08:06 milo Exp $
+ *
+ */
+
+#ifndef OutPortDeclAST_H
+#define OutPortDeclAST_H
+
+#include "slicc_global.hh"
+#include "DeclAST.hh"
+#include "StatementListAST.hh"
+#include "VarExprAST.hh"
+
+class OutPortDeclAST : public DeclAST {
+public:
+ // Constructors
+ OutPortDeclAST(string* ident_ptr,
+ TypeAST* msg_type_ptr,
+ ExprAST* var_expr_ptr,
+ PairListAST* pairs_ptr);
+
+ // Destructor
+ ~OutPortDeclAST();
+
+ // Public Methods
+ void generate();
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ OutPortDeclAST(const OutPortDeclAST& obj);
+ OutPortDeclAST& operator=(const OutPortDeclAST& obj);
+
+ // Data Members (m_ prefix)
+ string* m_ident_ptr;
+ TypeAST* m_msg_type_ptr;
+ TypeAST* m_queue_type_ptr;
+ ExprAST* m_var_expr_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const OutPortDeclAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const OutPortDeclAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //OutPortDeclAST_H
diff --git a/src/mem/slicc/ast/PairAST.cc b/src/mem/slicc/ast/PairAST.cc
new file mode 100644
index 000000000..da71bdd01
--- /dev/null
+++ b/src/mem/slicc/ast/PairAST.cc
@@ -0,0 +1,72 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * PairAST.C
+ *
+ * Description: See PairAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "PairAST.hh"
+
+PairAST::PairAST(string* key_ptr, string* value_ptr)
+ : AST()
+{
+ m_key_ptr = key_ptr;
+ m_value_ptr = value_ptr;
+}
+
+PairAST::PairAST(string key, string* value_ptr)
+ : AST()
+{
+ m_key_ptr = new string(key);
+ m_value_ptr = value_ptr;
+}
+
+PairAST::PairAST(string key, string value)
+ : AST()
+{
+ m_key_ptr = new string(key);
+ m_value_ptr = new string(value);
+}
+
+PairAST::~PairAST()
+{
+ delete m_key_ptr;
+ delete m_value_ptr;
+}
+
+void PairAST::print(ostream& out) const
+{
+ out << "[" << *m_key_ptr << "=" << *m_value_ptr << "]" << endl;
+}
+
diff --git a/src/mem/slicc/ast/PairAST.hh b/src/mem/slicc/ast/PairAST.hh
new file mode 100644
index 000000000..4cc297dc1
--- /dev/null
+++ b/src/mem/slicc/ast/PairAST.hh
@@ -0,0 +1,86 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * PairAST.h
+ *
+ * Description:
+ *
+ * $Id: PairAST.h,v 3.1 2001/12/12 01:00:24 milo Exp $
+ *
+ */
+
+#ifndef PAIRAST_H
+#define PAIRAST_H
+
+#include "slicc_global.hh"
+#include "AST.hh"
+
+class PairAST : public AST {
+public:
+ // Constructors
+ PairAST(string* key_ptr, string* value_ptr);
+ PairAST(string key, string* value_ptr);
+ PairAST(string key, string value);
+
+ // Destructor
+ ~PairAST();
+
+ // Public Methods
+ string key() const { return *m_key_ptr; }
+ string value() const { return *m_value_ptr; }
+
+ virtual void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ // PairAST(const PairAST& obj);
+ // PairAST& operator=(const PairAST& obj);
+
+ // Data Members (m_ prefix)
+ string* m_key_ptr;
+ string* m_value_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const PairAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const PairAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //PAIRAST_H
diff --git a/src/mem/slicc/ast/PairListAST.cc b/src/mem/slicc/ast/PairListAST.cc
new file mode 100644
index 000000000..9af8b3898
--- /dev/null
+++ b/src/mem/slicc/ast/PairListAST.cc
@@ -0,0 +1,49 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * PairListAST.C
+ *
+ * Description: See PairListAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "PairListAST.hh"
+
+void PairListAST::addPair(PairAST* pair_ptr)
+{
+ getPairs().add(pair_ptr->key(), pair_ptr->value());
+}
+
+void PairListAST::print(ostream& out) const
+{
+ out << "[PairListAST] " << getPairs();
+}
diff --git a/src/mem/slicc/ast/PairListAST.hh b/src/mem/slicc/ast/PairListAST.hh
new file mode 100644
index 000000000..9d2700277
--- /dev/null
+++ b/src/mem/slicc/ast/PairListAST.hh
@@ -0,0 +1,82 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * PairListAST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef PairListAST_H
+#define PairListAST_H
+
+#include "slicc_global.hh"
+#include "AST.hh"
+#include "PairAST.hh"
+
+
+class PairListAST : public AST {
+public:
+ // Constructors
+ PairListAST() : AST() {}
+
+ // Destructor
+ //~PairListAST();
+
+ // Public Methods
+ void addPair(PairAST* pair_ptr);
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ PairListAST(const PairListAST& obj);
+ PairListAST& operator=(const PairListAST& obj);
+
+ // Data Members (m_ prefix)
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const PairListAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const PairListAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //PairListAST_H
diff --git a/src/mem/slicc/ast/PeekStatementAST.cc b/src/mem/slicc/ast/PeekStatementAST.cc
new file mode 100644
index 000000000..627e85ae8
--- /dev/null
+++ b/src/mem/slicc/ast/PeekStatementAST.cc
@@ -0,0 +1,115 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * PeekStatementAST.C
+ *
+ * Description: See PeekStatementAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "PeekStatementAST.hh"
+#include "SymbolTable.hh"
+#include "StatementListAST.hh"
+#include "TypeAST.hh"
+#include "VarExprAST.hh"
+
+PeekStatementAST::PeekStatementAST(VarExprAST* queue_name_ptr,
+ TypeAST* type_ptr,
+ StatementListAST* statementlist_ptr,
+ string method)
+ : StatementAST()
+{
+ m_queue_name_ptr = queue_name_ptr;
+ m_type_ptr = type_ptr;
+ m_statementlist_ptr = statementlist_ptr;
+ m_method = method;
+}
+
+PeekStatementAST::~PeekStatementAST()
+{
+ delete m_queue_name_ptr;
+ delete m_type_ptr;
+ delete m_statementlist_ptr;
+}
+
+void PeekStatementAST::generate(string& code, Type* return_type_ptr) const
+{
+ code += indent_str() + "{\n"; // Start scope
+ inc_indent();
+ g_sym_table.pushFrame();
+
+ Type* msg_type_ptr = m_type_ptr->lookupType();
+
+ // Add new local var to symbol table
+ g_sym_table.newSym(new Var("in_msg", getLocation(), msg_type_ptr, "(*in_msg_ptr)", getPairs()));
+
+ // Check the queue type
+ m_queue_name_ptr->assertType("InPort");
+
+ // Declare the new "in_msg_ptr" variable
+ code += indent_str() + "const " + msg_type_ptr->cIdent() + "* in_msg_ptr;\n"; // Declare message
+ // code += indent_str() + "in_msg_ptr = static_cast<const ";
+ code += indent_str() + "in_msg_ptr = dynamic_cast<const ";
+ code += msg_type_ptr->cIdent() + "*>(";
+ code += "(" + m_queue_name_ptr->getVar()->getCode() + ")";
+ code += ".";
+ code += m_method;
+ code += "());\n";
+
+ code += indent_str() + "assert(in_msg_ptr != NULL);\n"; // Check the cast result
+
+ if(CHECK_INVALID_RESOURCE_STALLS) {
+ // Declare the "in_buffer_rank" variable
+ code += indent_str() + "int in_buffer_rank = "; // Declare message
+ code += "(" + m_queue_name_ptr->getVar()->getCode() + ")";
+ code += ".getPriority();\n";
+ }
+
+ m_statementlist_ptr->generate(code, return_type_ptr); // The other statements
+ dec_indent();
+ g_sym_table.popFrame();
+ code += indent_str() + "}\n"; // End scope
+}
+
+void PeekStatementAST::findResources(Map<Var*, string>& resource_list) const
+{
+ m_statementlist_ptr->findResources(resource_list);
+}
+
+void PeekStatementAST::print(ostream& out) const
+{
+ out << "[PeekStatementAST: " << m_method
+ << " queue_name: " << *m_queue_name_ptr
+ << " type: " << m_type_ptr->toString()
+ << " " << *m_statementlist_ptr
+ << "]";
+}
diff --git a/src/mem/slicc/ast/PeekStatementAST.hh b/src/mem/slicc/ast/PeekStatementAST.hh
new file mode 100644
index 000000000..e51a0a6a4
--- /dev/null
+++ b/src/mem/slicc/ast/PeekStatementAST.hh
@@ -0,0 +1,91 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * PeekStatementAST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef PEEKSTATEMENTAST_H
+#define PEEKSTATEMENTAST_H
+
+#include "slicc_global.hh"
+#include "StatementAST.hh"
+
+class StatementListAST;
+class TypeAST;
+class VarExprAST;
+
+class PeekStatementAST : public StatementAST {
+public:
+ // Constructors
+ PeekStatementAST(VarExprAST* queue_name_ptr,
+ TypeAST* type_ptr,
+ StatementListAST* statementlist_ptr,
+ string method);
+ // Destructor
+ ~PeekStatementAST();
+
+ // Public Methods
+ void generate(string& code, Type* return_type_ptr) const;
+ void findResources(Map<Var*, string>& resource_list) const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ PeekStatementAST(const PeekStatementAST& obj);
+ PeekStatementAST& operator=(const PeekStatementAST& obj);
+
+ // Data Members (m_ prefix)
+ VarExprAST* m_queue_name_ptr;
+ TypeAST* m_type_ptr;
+ StatementListAST* m_statementlist_ptr;
+ string m_method;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const PeekStatementAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const PeekStatementAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //PEEKSTATEMENTAST_H
diff --git a/src/mem/slicc/ast/ReturnStatementAST.cc b/src/mem/slicc/ast/ReturnStatementAST.cc
new file mode 100644
index 000000000..68e81b834
--- /dev/null
+++ b/src/mem/slicc/ast/ReturnStatementAST.cc
@@ -0,0 +1,79 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ReturnStatementAST.C
+ *
+ * Description: See ReturnStatementAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "ReturnStatementAST.hh"
+
+ReturnStatementAST::ReturnStatementAST(ExprAST* expr_ptr)
+ : StatementAST()
+{
+ m_expr_ptr = expr_ptr;
+}
+
+ReturnStatementAST::~ReturnStatementAST()
+{
+ delete m_expr_ptr;
+}
+
+void ReturnStatementAST::generate(string& code, Type* return_type_ptr) const
+{
+ code += indent_str();
+ code += "return ";
+ Type* actual_type_ptr = m_expr_ptr->generate(code);
+ code += ";\n";
+
+ // Is return valid here?
+ if (return_type_ptr == NULL) {
+ error("Invalid 'return' statement");
+ }
+
+ // The return type must match the return_type_ptr
+ if (return_type_ptr != actual_type_ptr) {
+ m_expr_ptr->error("Return type miss-match, expected return type is '" + return_type_ptr->toString() +
+ "', actual is '" + actual_type_ptr->toString() + "'");
+ }
+}
+
+void ReturnStatementAST::findResources(Map<Var*, string>& resource_list) const
+{
+ m_expr_ptr->findResources(resource_list);
+}
+
+void ReturnStatementAST::print(ostream& out) const
+{
+ out << "[ReturnStatementAST: " << *m_expr_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/ReturnStatementAST.hh b/src/mem/slicc/ast/ReturnStatementAST.hh
new file mode 100644
index 000000000..f8c4812cf
--- /dev/null
+++ b/src/mem/slicc/ast/ReturnStatementAST.hh
@@ -0,0 +1,83 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ReturnStatementAST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef ReturnStatementAST_H
+#define ReturnStatementAST_H
+
+#include "slicc_global.hh"
+#include "StatementAST.hh"
+#include "ExprAST.hh"
+
+class ReturnStatementAST : public StatementAST {
+public:
+ // Constructors
+ ReturnStatementAST(ExprAST* expr_ptr);
+
+ // Destructor
+ ~ReturnStatementAST();
+
+ // Public Methods
+ void generate(string& code, Type* return_type_ptr) const;
+ void findResources(Map<Var*, string>& resource_list) const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ ReturnStatementAST(const ReturnStatementAST& obj);
+ ReturnStatementAST& operator=(const ReturnStatementAST& obj);
+
+ // Data Members (m_ prefix)
+ ExprAST* m_expr_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const ReturnStatementAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const ReturnStatementAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //ReturnStatementAST_H
diff --git a/src/mem/slicc/ast/StatementAST.cc b/src/mem/slicc/ast/StatementAST.cc
new file mode 100644
index 000000000..1f3c02474
--- /dev/null
+++ b/src/mem/slicc/ast/StatementAST.cc
@@ -0,0 +1,60 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * StatementAST.C
+ *
+ * Description: See StatementAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "StatementAST.hh"
+
+static int indentation_depth = 1;
+
+void inc_indent()
+{
+ indentation_depth++;
+}
+
+void dec_indent()
+{
+ indentation_depth--;
+}
+
+string indent_str()
+{
+ string temp;
+ for(int i=0; i<indentation_depth; i++) {
+ temp += " ";
+ }
+ return temp;
+}
diff --git a/src/mem/slicc/ast/StatementAST.hh b/src/mem/slicc/ast/StatementAST.hh
new file mode 100644
index 000000000..16a1ecafe
--- /dev/null
+++ b/src/mem/slicc/ast/StatementAST.hh
@@ -0,0 +1,88 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * StatementAST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef STATEMENTAST_H
+#define STATEMENTAST_H
+
+#include "slicc_global.hh"
+#include "AST.hh"
+
+void inc_indent();
+void dec_indent();
+string indent_str();
+
+class StatementAST : public AST {
+public:
+ // Constructors
+ StatementAST() : AST() {}
+ StatementAST(Map<string, string> pairs) : AST(pairs) {}
+
+ // Destructor
+ //~StatementAST();
+
+ // Public Methods
+ virtual void generate(string& code, Type* return_type_ptr) const = 0;
+ virtual void findResources(Map<Var*, string>& resource_list) const { }
+
+ //void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ //StatementAST(const StatementAST& obj);
+ //StatementAST& operator=(const StatementAST& obj);
+
+ // Data Members (m_ prefix)
+
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const StatementAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const StatementAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //STATEMENTAST_H
diff --git a/src/mem/slicc/ast/StatementListAST.cc b/src/mem/slicc/ast/StatementListAST.cc
new file mode 100644
index 000000000..f70d5b1fa
--- /dev/null
+++ b/src/mem/slicc/ast/StatementListAST.cc
@@ -0,0 +1,86 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * StatementListAST.C
+ *
+ * Description: See StatementListAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "StatementListAST.hh"
+
+StatementListAST::StatementListAST(Vector<StatementAST*>* vec_ptr)
+ : AST()
+{
+ assert(vec_ptr != NULL);
+ m_vec_ptr = vec_ptr;
+}
+
+// Singleton constructor.
+StatementListAST::StatementListAST(StatementAST* statement_ptr)
+ : AST()
+{
+ assert(statement_ptr != NULL);
+ m_vec_ptr = new Vector<StatementAST*>;
+ m_vec_ptr->insertAtTop(statement_ptr);
+}
+
+StatementListAST::~StatementListAST()
+{
+ int size = m_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ delete (*m_vec_ptr)[i];
+ }
+ delete m_vec_ptr;
+}
+
+void StatementListAST::generate(string& code, Type* return_type_ptr) const
+{
+ int size = m_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ (*m_vec_ptr)[i]->generate(code, return_type_ptr);
+ }
+}
+
+void StatementListAST::findResources(Map<Var*, string>& resource_list) const
+{
+ int size = m_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ (*m_vec_ptr)[i]->findResources(resource_list);
+ }
+}
+
+void StatementListAST::print(ostream& out) const
+{
+ assert(m_vec_ptr != NULL);
+ out << "[StatementListAST: " << *m_vec_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/StatementListAST.hh b/src/mem/slicc/ast/StatementListAST.hh
new file mode 100644
index 000000000..3b3968211
--- /dev/null
+++ b/src/mem/slicc/ast/StatementListAST.hh
@@ -0,0 +1,85 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * StatementListAST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef STATEMENTLISTAST_H
+#define STATEMENTLISTAST_H
+
+#include "slicc_global.hh"
+#include "AST.hh"
+#include "StatementAST.hh"
+class Var;
+
+class StatementListAST : public AST {
+public:
+ // Constructors
+ StatementListAST(Vector<StatementAST*>* vec_ptr);
+ StatementListAST(StatementAST* statement_ptr);
+
+ // Destructor
+ ~StatementListAST();
+
+ // Public Methods
+ void generate(string& code, Type* return_type_ptr) const;
+ void findResources(Map<Var*, string>& resource_list) const;
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ StatementListAST(const StatementListAST& obj);
+ StatementListAST& operator=(const StatementListAST& obj);
+
+ // Data Members (m_ prefix)
+ Vector<StatementAST*>* m_vec_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const StatementListAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const StatementListAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //STATEMENTLISTAST_H
diff --git a/src/mem/slicc/ast/TransitionDeclAST.cc b/src/mem/slicc/ast/TransitionDeclAST.cc
new file mode 100644
index 000000000..83361e233
--- /dev/null
+++ b/src/mem/slicc/ast/TransitionDeclAST.cc
@@ -0,0 +1,89 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TransitionDeclAST.C
+ *
+ * Description: See TransitionDeclAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "TransitionDeclAST.hh"
+#include "Transition.hh"
+
+TransitionDeclAST::TransitionDeclAST(Vector<string>* state_list_ptr,
+ Vector<string>* event_list_ptr,
+ string* next_state_ptr,
+ PairListAST* pairs_ptr,
+ Vector<string>* action_list_ptr)
+ : DeclAST(pairs_ptr)
+{
+ m_state_list_ptr = state_list_ptr;
+ m_event_list_ptr = event_list_ptr;
+ m_next_state_ptr = next_state_ptr;
+ m_action_list_ptr = action_list_ptr;
+}
+
+TransitionDeclAST::~TransitionDeclAST()
+{
+ delete m_state_list_ptr;
+ delete m_event_list_ptr;
+ delete m_next_state_ptr;
+ delete m_action_list_ptr;
+}
+
+void TransitionDeclAST::generate()
+{
+ Vector<string>& states = *m_state_list_ptr;
+ Vector<string>& events = *m_event_list_ptr;
+
+ StateMachine* machine_ptr = g_sym_table.getStateMachine();
+ if (machine_ptr == NULL) {
+ error("Transition declaration not part of a machine.");
+ } else if (m_next_state_ptr == NULL) {
+ for (int i=0; i<states.size(); i++) {
+ for (int j=0; j<events.size(); j++) {
+ machine_ptr->addTransition(new Transition(states[i], events[j], states[i], *m_action_list_ptr, getLocation(), getPairs()));
+ }
+ }
+ } else {
+ for (int i=0; i<states.size(); i++) {
+ for (int j=0; j<events.size(); j++) {
+ machine_ptr->addTransition(new Transition(states[i], events[j], *m_next_state_ptr, *m_action_list_ptr, getLocation(), getPairs()));
+ }
+ }
+ }
+}
+
+void TransitionDeclAST::print(ostream& out) const
+{
+ out << "[TransitionDecl: ]";
+}
diff --git a/src/mem/slicc/ast/TransitionDeclAST.hh b/src/mem/slicc/ast/TransitionDeclAST.hh
new file mode 100644
index 000000000..10ca067d3
--- /dev/null
+++ b/src/mem/slicc/ast/TransitionDeclAST.hh
@@ -0,0 +1,89 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TransistionDeclAST.h
+ *
+ * Description:
+ *
+ * $Id: TransitionDeclAST.h,v 3.2 2003/07/10 18:08:07 milo Exp $
+ *
+ */
+
+#ifndef TransitionDeclAST_H
+#define TransitionDeclAST_H
+
+#include "slicc_global.hh"
+#include "DeclAST.hh"
+#include "StatementListAST.hh"
+
+class TransitionDeclAST : public DeclAST {
+public:
+ // Constructors
+ TransitionDeclAST(Vector<string>* state_list_ptr,
+ Vector<string>* event_list_ptr,
+ string* next_state_ptr,
+ PairListAST* pairs_ptr,
+ Vector<string>* action_list_ptr);
+
+ // Destructor
+ ~TransitionDeclAST();
+
+ // Public Methods
+ void generate();
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ TransitionDeclAST(const TransitionDeclAST& obj);
+ TransitionDeclAST& operator=(const TransitionDeclAST& obj);
+
+ // Data Members (m_ prefix)
+ Vector<string>* m_state_list_ptr;
+ Vector<string>* m_event_list_ptr;
+ string* m_next_state_ptr;
+ Vector<string>* m_action_list_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const TransitionDeclAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const TransitionDeclAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //TransitionDeclAST_H
diff --git a/src/mem/slicc/ast/TypeAST.cc b/src/mem/slicc/ast/TypeAST.cc
new file mode 100644
index 000000000..82364732b
--- /dev/null
+++ b/src/mem/slicc/ast/TypeAST.cc
@@ -0,0 +1,67 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TypeAST.C
+ *
+ * Description: See TypeAST.h
+ *
+ * $Id: TypeAST.C,v 3.1 2003/03/22 15:15:16 xu Exp $
+ *
+ */
+
+#include "TypeAST.hh"
+
+TypeAST::TypeAST(string* ident_ptr)
+ : AST()
+{
+ m_ident_ptr = ident_ptr;
+}
+
+TypeAST::~TypeAST()
+{
+ delete m_ident_ptr;
+
+}
+
+string TypeAST::toString() const
+{
+ return *m_ident_ptr;
+}
+
+Type* TypeAST::lookupType() const
+{
+ Type* type_ptr = g_sym_table.getType(*m_ident_ptr);
+ if (type_ptr != NULL) {
+ return type_ptr;
+ } else {
+ error("Type '" + *m_ident_ptr + "' not declared.");
+ }
+ return NULL; // Not reached
+}
diff --git a/src/mem/slicc/ast/TypeAST.hh b/src/mem/slicc/ast/TypeAST.hh
new file mode 100644
index 000000000..a0fea56c3
--- /dev/null
+++ b/src/mem/slicc/ast/TypeAST.hh
@@ -0,0 +1,83 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TypeAST.h
+ *
+ * Description:
+ *
+ * $Id: TypeAST.h,v 3.2 2003/03/22 15:15:17 xu Exp $
+ *
+ */
+
+#ifndef TYPEAST_H
+#define TYPEAST_H
+
+#include "slicc_global.hh"
+#include "AST.hh"
+
+class TypeAST : public AST {
+public:
+ // Constructors
+ TypeAST(string* ident_ptr);
+
+ // Destructor
+ ~TypeAST();
+
+ // Public Methods
+ string toString() const;
+ Type* lookupType() const;
+
+ virtual void print(ostream& out) const {}
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ TypeAST(const TypeAST& obj);
+ TypeAST& operator=(const TypeAST& obj);
+
+ // Data Members (m_ prefix)
+ string* m_ident_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const TypeAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const TypeAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //TYPEAST_H
diff --git a/src/mem/slicc/ast/TypeDeclAST.cc b/src/mem/slicc/ast/TypeDeclAST.cc
new file mode 100644
index 000000000..5d2b19bac
--- /dev/null
+++ b/src/mem/slicc/ast/TypeDeclAST.cc
@@ -0,0 +1,86 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TypeDeclAST.C
+ *
+ * Description: See TypeDeclAST.h
+ *
+ * $Id: TypeDeclAST.C,v 3.1 2003/03/22 15:15:17 xu Exp $
+ *
+ */
+
+#include "TypeDeclAST.hh"
+#include "main.hh"
+#include "SymbolTable.hh"
+
+TypeDeclAST::TypeDeclAST(TypeAST* type_ast_ptr,
+ PairListAST* pairs_ptr,
+ Vector<TypeFieldAST*>* field_vec_ptr)
+ : DeclAST(pairs_ptr)
+{
+ m_type_ast_ptr = type_ast_ptr;
+ m_field_vec_ptr = field_vec_ptr;
+}
+
+TypeDeclAST::~TypeDeclAST()
+{
+ delete m_type_ast_ptr;
+ if (m_field_vec_ptr != NULL) {
+ int size = m_field_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ delete (*m_field_vec_ptr)[i];
+ }
+ delete m_field_vec_ptr;
+ }
+}
+
+void TypeDeclAST::generate()
+{
+ string machine_name;
+ string id = m_type_ast_ptr->toString();
+
+ // Make the new type
+ Type* new_type_ptr = new Type(id, getLocation(), getPairs(),
+ g_sym_table.getStateMachine());
+ g_sym_table.newSym(new_type_ptr);
+
+ // Add all of the fields of the type to it
+ if (m_field_vec_ptr != NULL) {
+ int size = m_field_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ (*m_field_vec_ptr)[i]->generate(new_type_ptr);
+ }
+ }
+}
+
+void TypeDeclAST::print(ostream& out) const
+{
+ out << "[TypeDecl: " << m_type_ast_ptr->toString() << "]";
+}
diff --git a/src/mem/slicc/ast/TypeDeclAST.hh b/src/mem/slicc/ast/TypeDeclAST.hh
new file mode 100644
index 000000000..1928c1bde
--- /dev/null
+++ b/src/mem/slicc/ast/TypeDeclAST.hh
@@ -0,0 +1,86 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TypeDeclAST.h
+ *
+ * Description:
+ *
+ * $Id: TypeDeclAST.h,v 3.2 2003/03/17 01:55:28 xu Exp $
+ *
+ */
+
+#ifndef TypeDeclAST_H
+#define TypeDeclAST_H
+
+#include "slicc_global.hh"
+#include "DeclAST.hh"
+#include "TypeAST.hh"
+#include "TypeFieldAST.hh"
+
+class TypeDeclAST : public DeclAST {
+public:
+ // Constructors
+ TypeDeclAST(TypeAST* type_ast_ptr,
+ PairListAST* pairs_ptr,
+ Vector<TypeFieldAST*>* field_vec_ptr);
+
+ // Destructor
+ ~TypeDeclAST();
+
+ // Public Methods
+ virtual void generate();
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ TypeDeclAST(const TypeDeclAST& obj);
+ TypeDeclAST& operator=(const TypeDeclAST& obj);
+
+ // Data Members (m_ prefix)
+ TypeAST* m_type_ast_ptr;
+ Vector<TypeFieldAST*>* m_field_vec_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const TypeDeclAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const TypeDeclAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //TypeDeclAST_H
diff --git a/src/mem/slicc/ast/TypeFieldAST.cc b/src/mem/slicc/ast/TypeFieldAST.cc
new file mode 100644
index 000000000..9dfe45397
--- /dev/null
+++ b/src/mem/slicc/ast/TypeFieldAST.cc
@@ -0,0 +1,44 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TypeFieldAST.C
+ *
+ * Description: See TypeFieldAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "TypeFieldAST.hh"
+
+TypeFieldAST::TypeFieldAST(PairListAST* pairs_ptr)
+ : AST(pairs_ptr->getPairs()) {
+}
+
diff --git a/src/mem/slicc/ast/TypeFieldAST.hh b/src/mem/slicc/ast/TypeFieldAST.hh
new file mode 100644
index 000000000..69d6d8b87
--- /dev/null
+++ b/src/mem/slicc/ast/TypeFieldAST.hh
@@ -0,0 +1,83 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TypeFieldAST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef TypeFieldAST_H
+#define TypeFieldAST_H
+
+#include "slicc_global.hh"
+#include "StateMachine.hh"
+#include "StatementListAST.hh"
+#include "PairListAST.hh"
+#include "ExprAST.hh"
+
+class TypeFieldAST : public AST {
+public:
+ // Constructors
+ TypeFieldAST(PairListAST* pairs_ptr);
+
+ // Destructor
+ virtual ~TypeFieldAST() {}
+
+ // Public Methods
+ virtual void generate(Type *type_ptr) = 0;
+ virtual void print(ostream& out) const = 0;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ // TypeFieldAST(const TypeFieldAST& obj);
+ // TypeFieldAST& operator=(const TypeFieldAST& obj);
+
+ // Data Members (m_ prefix)
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const TypeFieldAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const TypeFieldAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //TypeFieldAST_H
diff --git a/src/mem/slicc/ast/TypeFieldEnumAST.cc b/src/mem/slicc/ast/TypeFieldEnumAST.cc
new file mode 100644
index 000000000..dbb27c3ae
--- /dev/null
+++ b/src/mem/slicc/ast/TypeFieldEnumAST.cc
@@ -0,0 +1,82 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TypeFieldEnumAST.C
+ *
+ * Description: See TypeFieldEnumAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "TypeFieldEnumAST.hh"
+#include "State.hh"
+#include "Event.hh"
+
+TypeFieldEnumAST::TypeFieldEnumAST(string* field_id_ptr,
+ PairListAST* pairs_ptr)
+ : TypeFieldAST(pairs_ptr)
+{
+ m_field_id_ptr = field_id_ptr;
+ m_pairs_ptr = pairs_ptr;
+}
+
+TypeFieldEnumAST::~TypeFieldEnumAST()
+{
+ delete m_field_id_ptr;
+}
+
+void TypeFieldEnumAST::generate(Type *type_ptr)
+{
+ // Add enumeration
+ if (!type_ptr->enumAdd(*m_field_id_ptr, m_pairs_ptr->getPairs())) {
+ error("Duplicate enumeration: " + type_ptr->toString() + ":" + *m_field_id_ptr);
+ }
+
+ // Fill machine info
+ StateMachine* machine_ptr = g_sym_table.getStateMachine();
+ if (type_ptr->toString() == "State") {
+ if (machine_ptr == NULL) {
+ error("State declaration not part of a machine.");
+ }
+ machine_ptr->addState(new State(*m_field_id_ptr, getLocation(), getPairs()));
+ }
+ if (type_ptr->toString() == "Event") {
+ if (machine_ptr == NULL) {
+ error("Event declaration not part of a machine.");
+ }
+ machine_ptr->addEvent(new Event(*m_field_id_ptr, getLocation(), getPairs()));
+ }
+}
+
+void TypeFieldEnumAST::print(ostream& out) const
+{
+ out << "[TypeFieldEnum: " << *m_field_id_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/TypeFieldEnumAST.hh b/src/mem/slicc/ast/TypeFieldEnumAST.hh
new file mode 100644
index 000000000..f9c111762
--- /dev/null
+++ b/src/mem/slicc/ast/TypeFieldEnumAST.hh
@@ -0,0 +1,86 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TypeFieldEnumAST.h
+ *
+ * Description:
+ *
+ * $Id: TypeFieldEnumAST.h,v 3.2 2003/07/10 18:08:07 milo Exp $
+ *
+ */
+
+#ifndef TypeFieldEnumAST_H
+#define TypeFieldEnumAST_H
+
+#include "slicc_global.hh"
+#include "StateMachine.hh"
+#include "TypeFieldAST.hh"
+#include "StatementListAST.hh"
+#include "PairListAST.hh"
+
+class TypeFieldEnumAST : public TypeFieldAST {
+public:
+ // Constructors
+ TypeFieldEnumAST(string* field_id_ptr,
+ PairListAST* pairs_ptr);
+
+ // Destructor
+ ~TypeFieldEnumAST();
+
+ // Public Methods
+ void generate(Type *type_ptr);
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ TypeFieldEnumAST(const TypeFieldEnumAST& obj);
+ TypeFieldEnumAST& operator=(const TypeFieldEnumAST& obj);
+
+ // Data Members (m_ prefix)
+ string* m_field_id_ptr;
+ PairListAST* m_pairs_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const TypeFieldEnumAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const TypeFieldEnumAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //TypeFieldEnumAST_H
diff --git a/src/mem/slicc/ast/TypeFieldMemberAST.cc b/src/mem/slicc/ast/TypeFieldMemberAST.cc
new file mode 100644
index 000000000..40109851b
--- /dev/null
+++ b/src/mem/slicc/ast/TypeFieldMemberAST.cc
@@ -0,0 +1,84 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TypeFieldMemberAST.C
+ *
+ * Description: See TypeFieldMemberAST.h
+ *
+ * $Id: TypeFieldMemberAST.C,v 3.1 2003/03/27 22:58:54 xu Exp $
+ *
+ */
+
+#include "TypeFieldMemberAST.hh"
+
+TypeFieldMemberAST::TypeFieldMemberAST(TypeAST* type_ast_ptr,
+ string* field_id_ptr,
+ PairListAST* pairs_ptr,
+ ExprAST* rvalue_ptr)
+ : TypeFieldAST(pairs_ptr)
+{
+ m_type_ast_ptr = type_ast_ptr;
+ m_field_id_ptr = field_id_ptr;
+ m_rvalue_ptr = rvalue_ptr;
+}
+
+TypeFieldMemberAST::~TypeFieldMemberAST()
+{
+ delete m_type_ast_ptr;
+ delete m_field_id_ptr;
+ if(m_rvalue_ptr) delete m_rvalue_ptr;
+}
+
+void TypeFieldMemberAST::generate(Type *type_ptr)
+{
+ // Lookup type
+ Type* field_type_ptr = m_type_ast_ptr->lookupType();
+
+ // check type if this is a initialization
+ string* init_code = NULL;
+ if(m_rvalue_ptr) {
+ init_code = new string();
+ Type* rvalue_type_ptr = m_rvalue_ptr->generate(*init_code);
+ if(field_type_ptr != rvalue_type_ptr) {
+ error("Initialization type mismatch '" + field_type_ptr->toString() + "' and '" + rvalue_type_ptr->toString() + "'");
+ }
+ }
+
+ // Add data member to the parent type
+ if (!type_ptr->dataMemberAdd(*m_field_id_ptr, field_type_ptr, getPairs(),
+ init_code)) {
+ error("Duplicate data member: " + type_ptr->toString() + ":" + *m_field_id_ptr);
+ }
+}
+
+void TypeFieldMemberAST::print(ostream& out) const
+{
+ out << "[TypeFieldMember: " << *m_field_id_ptr << "]";
+}
diff --git a/src/mem/slicc/ast/TypeFieldMemberAST.hh b/src/mem/slicc/ast/TypeFieldMemberAST.hh
new file mode 100644
index 000000000..5e275bb96
--- /dev/null
+++ b/src/mem/slicc/ast/TypeFieldMemberAST.hh
@@ -0,0 +1,91 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TypeFieldMemberAST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef TypeFieldMemberAST_H
+#define TypeFieldMemberAST_H
+
+#include "slicc_global.hh"
+#include "TypeFieldAST.hh"
+#include "StateMachine.hh"
+#include "StatementListAST.hh"
+#include "PairListAST.hh"
+#include "ExprAST.hh"
+#include "TypeAST.hh"
+
+class TypeFieldMemberAST : public TypeFieldAST {
+public:
+ // Constructors
+ TypeFieldMemberAST(TypeAST* type_ast_ptr,
+ string* field_id_ptr,
+ PairListAST* pairs_ptr,
+ ExprAST* rvalue_ptr);
+
+ // Destructor
+ ~TypeFieldMemberAST();
+
+ // Public Methods
+ void generate(Type *type_ptr);
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ TypeFieldMemberAST(const TypeFieldMemberAST& obj);
+ TypeFieldMemberAST& operator=(const TypeFieldMemberAST& obj);
+
+ // Data Members (m_ prefix)
+ TypeAST* m_type_ast_ptr;
+ string* m_field_id_ptr;
+ ExprAST* m_rvalue_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const TypeFieldMemberAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const TypeFieldMemberAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //TypeFieldMemberAST_H
diff --git a/src/mem/slicc/ast/TypeFieldMethodAST.cc b/src/mem/slicc/ast/TypeFieldMethodAST.cc
new file mode 100644
index 000000000..5229d4a08
--- /dev/null
+++ b/src/mem/slicc/ast/TypeFieldMethodAST.cc
@@ -0,0 +1,81 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TypeFieldMethodAST.C
+ *
+ * Description: See TypeFieldMethodAST.h
+ *
+ * $Id: TypeFieldMethodAST.C,v 3.1 2003/07/10 18:08:07 milo Exp $
+ *
+ */
+
+#include "TypeFieldMethodAST.hh"
+
+TypeFieldMethodAST::TypeFieldMethodAST(TypeAST* return_type_ast_ptr,
+ string* ident_ptr,
+ Vector<TypeAST*>* type_ast_vec_ptr,
+ PairListAST* pairs_ptr)
+ : TypeFieldAST(pairs_ptr)
+{
+ m_return_type_ast_ptr = return_type_ast_ptr;
+ m_ident_ptr = ident_ptr;
+ m_type_ast_vec_ptr = type_ast_vec_ptr;
+}
+
+TypeFieldMethodAST::~TypeFieldMethodAST()
+{
+ delete m_return_type_ast_ptr;
+ delete m_ident_ptr;
+
+ int size = m_type_ast_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ delete (*m_type_ast_vec_ptr)[i];
+ }
+ delete m_type_ast_vec_ptr;
+}
+
+void TypeFieldMethodAST::generate(Type *type_ptr)
+{
+ // Lookup return type
+ Type* return_type_ptr = m_return_type_ast_ptr->lookupType();
+
+ // Lookup parameter types
+ Vector<Type*> type_vec;
+ int size = m_type_ast_vec_ptr->size();
+ for(int i=0; i<size; i++) {
+ Type* type_ptr = (*m_type_ast_vec_ptr)[i]->lookupType();
+ type_vec.insertAtBottom(type_ptr);
+ }
+
+ // Add method
+ if (!type_ptr->methodAdd(*m_ident_ptr, return_type_ptr, type_vec)) { // Return false on error
+ error("Duplicate method: " + type_ptr->toString() + ":" + *m_ident_ptr + "()");
+ }
+}
diff --git a/src/mem/slicc/ast/TypeFieldMethodAST.hh b/src/mem/slicc/ast/TypeFieldMethodAST.hh
new file mode 100644
index 000000000..a276cc5f8
--- /dev/null
+++ b/src/mem/slicc/ast/TypeFieldMethodAST.hh
@@ -0,0 +1,87 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TypeFieldMethodAST.h
+ *
+ * Description:
+ *
+ * $Id: TypeFieldMethodAST.h,v 3.2 2003/07/10 18:08:07 milo Exp $
+ *
+ */
+
+#ifndef TYPEFIELDMETHODAST_H
+#define TYPEFIELDMETHODAST_H
+
+#include "slicc_global.hh"
+#include "TypeFieldAST.hh"
+#include "TypeAST.hh"
+
+class TypeFieldMethodAST : public TypeFieldAST {
+public:
+ // Constructors
+ TypeFieldMethodAST(TypeAST* return_type_ast_ptr,
+ string* ident_ptr,
+ Vector<TypeAST*>* type_ast_vec_ptr,
+ PairListAST* pairs_ptr);
+ // Destructor
+ ~TypeFieldMethodAST();
+
+ // Public Methods
+
+ void generate(Type *type_ptr);
+ void print(ostream& out) const {}
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ TypeFieldMethodAST(const TypeFieldMethodAST& obj);
+ TypeFieldMethodAST& operator=(const TypeFieldMethodAST& obj);
+
+ // Data Members (m_ prefix)
+ TypeAST* m_return_type_ast_ptr;
+ string* m_ident_ptr;
+ Vector<TypeAST*>* m_type_ast_vec_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const TypeFieldMethodAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const TypeFieldMethodAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //TYPEFIELDMETHODAST_H
diff --git a/src/mem/slicc/ast/VarExprAST.cc b/src/mem/slicc/ast/VarExprAST.cc
new file mode 100644
index 000000000..865cc71b0
--- /dev/null
+++ b/src/mem/slicc/ast/VarExprAST.cc
@@ -0,0 +1,76 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * VarExprAST.C
+ *
+ * Description: See VarExprAST.h
+ *
+ * $Id$
+ *
+ */
+
+#include "VarExprAST.hh"
+#include "StatementAST.hh"
+#include "SymbolTable.hh"
+
+VarExprAST::~VarExprAST()
+{
+ delete m_var_ptr;
+}
+
+
+Var* VarExprAST::getVar() const
+{
+ string var = *m_var_ptr;
+ Var* var_ptr = g_sym_table.getVar(var);
+ if (var_ptr == NULL) {
+ error("Unrecognized variable: " + var);
+ }
+ return var_ptr;
+}
+
+void VarExprAST::assertType(string type_ident) const
+{
+ Type* expected_type_ptr = g_sym_table.getType(type_ident);
+ if (expected_type_ptr == NULL) {
+ error("There must be a type '" + type_ident + "' declared in this scope");
+ }
+
+ if (getVar()->getType() != expected_type_ptr) {
+ error("Incorrect type: '" + getVar()->getIdent() + "' is expected to be type '" + expected_type_ptr->toString() + "'");
+ }
+}
+
+Type* VarExprAST::generate(string& code) const
+{
+ Var* var_ptr = getVar();
+ code += var_ptr->getCode();
+ return var_ptr->getType();
+}
diff --git a/src/mem/slicc/ast/VarExprAST.hh b/src/mem/slicc/ast/VarExprAST.hh
new file mode 100644
index 000000000..ce2f68875
--- /dev/null
+++ b/src/mem/slicc/ast/VarExprAST.hh
@@ -0,0 +1,86 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * VarExprAST.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef VAREXPRAST_H
+#define VAREXPRAST_H
+
+#include "slicc_global.hh"
+#include "ExprAST.hh"
+class Var;
+
+class VarExprAST : public ExprAST {
+public:
+ // Constructors
+ VarExprAST(string* var_ptr) : ExprAST() { m_var_ptr = var_ptr; }
+
+ // Destructor
+ ~VarExprAST();
+
+ // Public Methods
+ Type* generate(string& code) const;
+ void print(ostream& out) const { out << "[VarExprAST: " << *m_var_ptr << "]"; }
+ string getName() const { return *m_var_ptr; }
+ Var* getVar() const;
+ void assertType(string type_ident) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ VarExprAST(const VarExprAST& obj);
+ VarExprAST& operator=(const VarExprAST& obj);
+
+ // Data Members (m_ prefix)
+ string* m_var_ptr;
+
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const VarExprAST& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const VarExprAST& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //VAREXPRAST_H
diff --git a/src/mem/slicc/doc/SLICC_V03.txt b/src/mem/slicc/doc/SLICC_V03.txt
new file mode 100644
index 000000000..1c2a95a39
--- /dev/null
+++ b/src/mem/slicc/doc/SLICC_V03.txt
@@ -0,0 +1,307 @@
+SLICC - Version 0.3 Design Document - January 17, 1999
+Milo Martin and Dan Sorin
+
+Question: Rethinking of support for profiling the transactions
+
+Question: How do we deal with functions/methods and resources
+
+Comment: We need to discuss the sequencer interface so it can work now
+ and for the speculative version buffer.
+
+Overview
+--------
+
+We are in the process of designing and implementing SLICC v0.3, an
+evolution of SLICC v0.2. The new design includes capabilities for
+design of multilevel cache hierarchies including the specification of
+multiple protocol state machines (PSMs) and the queues which connect
+these PSMs and the network. We actually believe that most of the
+network and network topology, including the ordered network, can be
+expressed using the new hierarchical extensions to the language.
+
+In addition, many implicit aspects of the language will be eliminated
+in favor of explicit declarations. For example functions, queues, and
+objects declarations such as "cacheMemory" and "TBETable" will be
+explicitly declared. This will allow for full static type checking
+and easier extension for the language. Event triggering will be part
+of "in_port" declarations and not "event" declarations. Finally, many
+less fundamental, but important, features and internal code
+improvements will be enhanced.
+
+SLICC History
+-------------
+
+v0.1 - Initially the language only handled the generation of the PSM
+ transition table logic. All actions and event triggering were
+ still coded in C++. At this point it was still called, "the
+ language."
+
+v0.2 - Extended the language to include a simple C like syntax for
+ specifying actions, event triggering, and manipulating queues
+ and state elements. This version was the first version of the
+ language known as SLICC (suggested by Amir) and was used for
+ the Multifacet ISCA 2000 submission.
+
+v0.3 - Development effort started January 2000. Intended features and
+ enhancements are described by this document.
+
+Specifying Hierarchical Designs
+-------------------------------
+
+Right now all of our protocols have two tables, a processor/cache PSM
+and a directory PSM. In v0.2 this is a rigid requirement and
+the names are implicit. SLICC v0.3 will allow for an arbitrary number
+of different PSMs.
+
+The most significant improvement in v0.3 is the ability for the user
+to define an arbitrary set of interconnected PSMs. PSMs may include
+an L1 cache controller, L2 cache controller, directory controller,
+speculative version buffer, network interface, etc. There are a
+couple of "primitive PSMs" such as the sequencer.
+
+There will be a notion of a "node" of the system. In a node, each PSM
+will be instantiated and connected together with queues. For example,
+assume we define a PSMs and want to create a queue of RequestMessages
+to communicate between it and the network.
+
+ machine(CacheController) {
+ ...
+ out_port(to_network, RequestMessage, "To the network", desc="...");
+ ...
+ }
+
+ CacheController cache, desc="...";
+
+ connect(cache.to_network, network.from_cache, ordered="yes", desc="...");
+
+Explicit State Manipulation
+---------------------------
+
+As before, PSMs have states, events, and transitions. New in v0.3 each
+PSM must have user defined methods for get_state(address) and
+set_state(address, state), and these methods are written explicitly,
+instead of being implicit functions of memory states (e.g., our
+current implementation which implicitly uses the TBE state if there is
+a TBE or uses the cache state). Functions have a return value,
+procedures do not. Function calls are expressions, procedure calls
+are statements. All function and procedure parameters are considered
+pass-by-value.
+
+ procedure set_state(Address addr, State state) {
+ ...
+ }
+
+ function State get_state(Address addr) {
+ ...
+ }
+
+Explicit Declaration
+--------------------
+
+PSMs reference or declare structures, such as queues, ports, cache
+memories, main memory, TBEs, write buffers, etc. These primitive
+types and structures are written in C++, and their semantics are still
+specified by the C++ coder. Examples of these primitive types include
+"CacheMemory," "TBETable," as well as various types of queues.
+
+One major difference is that in v0.3 the interface for all of these
+primitive objects will be declared (but not defined) in the SLICC
+language. This also allows adding primitive structures by defining a
+C++ implementation and a SLICC interface specification. This will
+make the language much more extensible. Specifying the interface of
+these primitive types, structures, and queues in SLICC will eliminate
+much of the implicit semantics that is currently hiding in the
+controllers.
+
+The interface declaration might be in one file and shared between all
+protocols. The object instantiation would be internal to each PSM
+that requires a cache memory. The syntax for messages will also be
+enhanced by using this new syntax. Notice the support for default
+values.
+
+ structure(CacheMemory, "Cache memory", desc="...") {
+ void cache_change_state(Address addr, State state), desc="...";
+ Data dataBlk, default="", desc="";
+ bool cache_avail(Address addr), desc="...";
+ Address cache_probe(Address addr), desc="...";
+ void cache_allocate(Address addr), desc="...";
+ }
+
+ CacheMemory L1cacheMemory, desc="...";
+
+Structure specification is going to require the introduction of an
+object model in the language. The "." (dot) operator is going to be
+extended beyond the use as structure element access, but also allow
+for a object method call syntax similar to C++ and Java.
+
+ L1cacheMemory.cache_allocate(addr);
+
+Polymorphism
+------------
+
+We are also going to want to allow for polymorphism for many of the
+structures. We already have a limited degree of polymorphism between
+different protocols by using the same cache memory structure with
+different "CacheEntry" types in each protocol. Now that we are going
+to have multiple levels of cache, each requiring slightly different
+state bits, we are going to want to specify cache memory structures
+which have different "CacheEntry" types in the same protocol. To do
+this right, this is going to require adding full polymorphism support
+to the language. Right now we imagine something like C++'s templates,
+since they are a more natural fit to hardware synthesis in the future.
+
+Type Checker
+------------
+
+All of the above substantially complicates our type system by
+requiring more types and scoping rules. As a step towards
+understanding the implications of the type system, a type checking
+system will be implemented. This is a hard requirement if we are ever
+to distribute the system since receiving compile time errors in the
+generated code is not acceptable. In order to ensure that we don't
+accidentally design a language that is not statically type checkable,
+it is important to add the type checker sooner rather than later.
+
+Event Triggering
+----------------
+
+In v0.2, PSM events were individually specified as sets of conditions.
+The following SLICC v0.2 code is a simplified example from the origin
+protocol.
+
+ event(Dir_data_ack_0, "Data ack 0", desc="... ack count == 0") {
+ if (queue_ready(responseNetwork)) {
+ peek(responseNetwork, ResponseMsg) {
+ if(in_msg.NumPendingAcks == 0) {
+ trigger(in_msg.Address);
+ }
+ }
+ }
+ }
+
+ event(Dir_data_ack_not_0, "Data ack not 0", desc="... ack count != 0") {
+ if (queue_ready(responseNetwork)) {
+ peek(responseNetwork, ResponseMsg) {
+ if(in_msg.NumPendingAcks != 0) {
+ trigger(in_msg.Address);
+ }
+ }
+ }
+ }
+
+The above code defines the exact conditions for the events to be
+triggered. This type of event specification led to redundant code and
+numerous bugs where conditions for different events were not
+completely orthogonal.
+
+In v0.3, events will be declared with no accompanying code (similar to
+how states are specified). Instead, the code that determines which
+event is triggered will be part of each incoming port's declaration.
+This approach should eliminate redundancy and bugs in trigger
+conditions. The v0.3 code for the above would look like:
+
+ event(Dir_data_ack_0, "Data ack 0", desc="... ack count = 0");
+ event(Dir_data_ack_not_0, "Data ack not 0", desc="... ack count != 0");
+
+ in_port(responseNetwork, ResponseMsg, "Response Network", desc="...") {
+ if(in_msg.NumPendingAcks == 0) {
+ trigger(Dir_data_ack_0, in_msg.Address);
+ } else {
+ trigger(Dir_data_ack_not_0, in_msg.Address);
+ }
+ }
+
+Notice that one no longer needs to explicitly check if the queue is
+ready or to perform the peek operation.
+
+Also notice that the type of messages that arrives on the port is
+explicitly declared. All ports, incoming and outgoing, are now
+explicitly type channels. You will still be required to include the
+type of message when manipulating the queue. The type specified will
+be statically type checked and also acts as self-documenting code.
+
+Other Improvements
+------------------
+
+There will be a number of other improvements in v0.3 such as general
+performance tuning and clean up of the internals of the compiler. The
+compiler will be modified to operate on multiple files. In addition,
+the abstract syntax tree internal to the code will need to be extended
+to encompass more information, including information parsed in from
+multiple files.
+
+The affiliates talk and the document for the language should also be
+updated to reflect the changes in the new version.
+
+Looking Forward
+---------------
+
+When designing v0.3 we are keeping future plans in mind.
+
+- When our designs of the multilevel cache hierarchy are complete, we
+ expect to have a large amount of replication between the protocols
+ and caches controllers within a protocol. For v0.4 we hope to look
+ at the patterns that have evolved and look for ways in which the
+ language can capture these patterns. Exploiting reuse will provide
+ quicker protocol development and maintainability.
+
+- By keeping the specification structural, we are looking towards
+ generating VHDL/Verilog from SLICC. The type system will help this,
+ as will more explicit instantiation and declaration of types and
+ structures. The structures now written in C++ (sequencer, network,
+ cache arrays) will be ported to the HDL we select. The rest of the
+ controllers will be generated by the compiler. At first the
+ generated controller will not be optimized. I believe that with
+ more effort we can automatically generate reasonably optimized,
+ pipelined implementation of the controllers.
+
+Implementation Plan
+-------------------
+
+- HTML generator
+- Extend internal parser AST nodes
+- Add get_state function and set_state procedure declarations
+- Move trigger logic from events to in_ports
+- Types
+ - Change type declaration syntax
+ - Declare primitive types and corresponding C++ types
+ - Add default values to structures and types
+ - Add object method call syntax
+ - Write type checker
+- Documentation
+ - Revise document
+ - Update presentation
+
+Document History
+----------------
+
+$Id: SLICC_V03.txt,v 3.0 2000/09/12 20:27:59 sorin Exp $
+
+$Log: SLICC_V03.txt,v $
+Revision 3.0 2000/09/12 20:27:59 sorin
+Version 3.0 signifies a checkpoint of the source tree right after the
+final draft of the ASPLOS '00 paper.
+
+Revision 1.1.1.1 2000/03/09 10:18:38 milo
+Initial import
+
+Revision 2.0 2000/01/19 07:21:13 milo
+Version 2.0
+
+Revision 1.5 2000/01/18 10:26:24 milo
+Changed the SLICC parser so that it generates a full AST. This is the
+first step in moving towards v0.3
+
+Revision 1.4 2000/01/17 18:36:15 sorin
+*** empty log message ***
+
+Revision 1.3 2000/01/15 10:30:16 milo
+Added implementation list
+
+Revision 1.2 2000/01/15 08:11:44 milo
+Minor revisions
+
+Revision 1.1 2000/01/15 07:14:17 milo
+Converted Dan's first draft into a text file. Significant
+modifications were made.
+
diff --git a/src/mem/slicc/doc/tutorial.tex b/src/mem/slicc/doc/tutorial.tex
new file mode 100644
index 000000000..c20dba156
--- /dev/null
+++ b/src/mem/slicc/doc/tutorial.tex
@@ -0,0 +1,574 @@
+%& latex
+\documentclass[11pt]{article}
+\usepackage{graphics}
+\usepackage{color}
+
+\textheight 9.0 in
+\topmargin -0.5 in
+\textwidth 6.5 in
+\oddsidemargin -0.0 in
+\evensidemargin -0.0 in
+
+\begin{document}
+
+\definecolor{dark}{gray}{0.5}
+
+\newcommand{\syntax}[1]{%
+\begin{center}
+\fbox{\tt \small
+\begin{tabular}{l}
+#1\end{tabular}}\end{center}}
+
+\begin{center}
+{\LARGE Tutorial for SLICC v0.2} \\
+\vspace{.25in}
+{\large Milo Martin} \\
+{\large 8/25/1999}
+\end{center}
+
+\section*{Overview}
+
+This document attempts to illustrate the syntax and expressiveness of
+the cache coherence protocol specification language through a small
+example. A ``stupido'' cache coherence protocol is described in prose
+and then expressed in the language.
+
+The protocol used as the running example is described. Then each of
+the elements of the protocol is discussed and expressed in the
+language: states, events, transitions, and actions.
+
+\section*{Protocol Description}
+
+In order to make this example a simple as possible, the protocol
+described is a simple as possible and makes many simplifying
+assumptions. These simplifications were made only to clarify the
+exposition and are not indications of limitations of the
+expressiveness of the description language. We have already specified
+a more complicated MSI broadcast snooping protocol, a multicast
+snooping protocol, and a directory protocol. The simplifying
+assumptions are listed below. The remaining details of the protocol
+are described in the sections where we give the syntax of the
+language.
+
+\begin{itemize}
+
+\item
+The protocol uses broadcast snooping that assumes that a broadcast can
+only occur if all processors have processed all of their incoming
+address transactions. In essence, when a processor issue an address
+request, that request will be next in the global order. This allows
+us to avoid needed to handle the cases before we have observed our
+request in the global order.
+
+\item
+The protocol has only Modified and Idle stable states. (Note: Even
+the Shared state is omitted.)
+
+\item
+To avoid describing replacement (PutX's) and writebacks, the caches
+are in treated as infinite in size.
+
+\item
+No forward progress bit is used, so the protocol as specified does not
+guarantee forward progress.
+
+\item
+Only the mandatory request queue is used. No optional or prefetch
+queue is described.
+
+\item
+The above simplifications reduce the need for TBEs (Translation Buffer
+Entries) and thus the idea of a TBE is not include.
+
+\item
+Each memory module is assumed to have some state associated with each
+cache block in the memory. This requires a simple directory/memory
+state machine to work as a compliment to the processor state machine.
+Traditional broadcast snooping protocols often have no ``directory''
+state in the memory.
+
+\end{itemize}
+
+\section*{Protocol Messages}
+
+Cache coherence protocols communicate by sending well defined
+messages. To fully specify a cache coherence protocol we need to be
+able to specify the message types and fields. For this protocol we
+have address messages ({\tt AddressMsg}) which are broadcast, and data
+messages ({\tt DataMsg}) which are point-to-point. Address messages
+have an address field ({\tt Address}), a request type ({\tt Type}),
+and which processor made the request ({\tt Requestor}). Data message
+have an address field ({\tt Address}), a destination ({\tt
+Destination}), and the actual cache block being transfered ({\tt
+DataBlk}). The names in parenthesis are important because those are
+the names which code later in the specification will reference various
+message types and fields in the messages.
+
+Messages are declared by creating types with {\tt new\_type()} and
+adding fields with {\tt type\_field()}. The exact syntax for
+declaring types and message is a bit ugly right now and is going to be
+changed in the near future. If you wish, please see the appendix for
+an example of the current syntax.
+
+
+
+\section*{Cache States}
+
+Idle and Modified are the two stable states in our protocol. In
+addition the we have a single transient processor state. This state
+is used after a processor has issued a request and is waiting for the
+data response to arrive.
+
+Declaring states in the language is the first of a number of
+declarations we will be using. All of these declarations have a
+similar format. Below is the format for a state declaration.
+
+\syntax{
+{\tt state({\em identifier}, {\em shorthand}, {\em pair1}, {\em pair2}, ...);}
+}
+
+{\em identifier} is a name that is used later to
+refer to this state later in the description. It must start with a
+letter and after than can have any combination of letters, numbers,
+and the underscore character. {\em shorthand} is a quoted string
+which contains the shorthand that should be used for the state when
+generating tables and such.
+
+The {\em pair}'s are used to associate arbitrary information with each
+state. Zero or more pairs can be included in each declaration. For
+example we want to have a more verbose description of each state when
+we generate the table which contains the states and descriptions.
+This information is encoded in the language by adding a {\tt desc}
+parameter to a declaration. The name of the parameter is followed by
+an equal sign and a string with the description. The {\tt desc} pair
+technically optional, however the table generation tool will complain
+about a missing description if it is not present.
+
+The three states for our protocol are expressed as follows:
+
+\begin{verbatim}
+state(I, "I", desc="Idle");
+state(M, "M", desc="Modified");
+state(IM, "IM", desc="Idle, issued request but have not seen data yet");
+\end{verbatim}
+
+\section*{Cache Events}
+
+Events are external stimulus that cause the state machine to take
+action. This is most often a message in one of the queues from the
+network or processor. Events form the columns of the protocol table.
+Our simple protocol has one event per incoming queue. When a message
+is waiting in one of these queues and event can occur. We can see a
+request from the processor in the mandatory queue, another processor's
+request, or a data response.
+
+Events are declared in the language similarly to states. The {\em
+identifier}, {\em shorthand}, and {\em pair}'s have the same purpose
+as in a state declaration.
+
+\syntax{
+event({\em identifier}, {\em shorthand}, {\em pair1}, {\em pair2}, ...) \{ \\
+\hspace{.1in} {\em statement\_list} \\
+\} \\
+}
+
+Events are different in that they have a list of statements which
+allows exact specification of when the event should ``trigger'' a
+transition. These statements are mini-programming language with
+syntax similar to that of C. For example the {\tt peek} construct in
+this context checks to see if there is a message at the head of the
+specified queue, and if so, conceptually copies the message to a
+temporary variable accessed as {\tt in\_msg}. The language also
+supports various procedure calls, functions, conditional statements,
+assignment, and queue operations such as peek, enqueue and dequeue.
+The {\tt trigger()} construct takes an address as the only parameter.
+This is the address that should be triggered for the event. To give
+you a feel for what this code looks like, the three events for our
+simple protocol are below.
+
+\begin{verbatim}
+event(LoadStore, "LoadStore", desc="Load or Store request from local processor") {
+ peek(mandatoryQueue_ptr, CacheMsg) {
+ trigger(in_msg.Address);
+ }
+}
+
+event(Other_GETX, "Other GETX", desc="Observed a GETX request from another processor") {
+ peek(addressNetwork_ptr, AddressMsg) {
+ if (in_msg.Requestor != id) {
+ trigger(in_msg.Address);
+ }
+ }
+}
+
+event(Data, "Data", desc="Data for this block from the data network") {
+ peek(dataNetwork_ptr, DataMsg) {
+ trigger(in_msg.Address);
+ }
+}
+\end{verbatim}
+
+\section*{Cache Actions}
+
+Actions are the privative operations that are performed by various
+state transitions. These correspond (by convention) to the lower case
+letters in the tables. We need several actions in our protocol
+including issuing a GetX request, servicing a cache hit, send data
+from the cache to the requestor, writing data into the cache, and
+popping the various queues.
+
+The syntax of an action declaration is similar to an event
+declaration. The difference is that statements in the statement list
+are used to implement the desired action, and not triggering an event.
+
+\syntax{action({\em identifier}, {\em shorthand}, {\em pair1}, {\em pair2}, ...) \{ \\
+\hspace{.1in} {\em statement\_list} \\
+\}
+}
+
+The actions for this protocol use more of the features of the
+language. Some of the interesting case are discussed below.
+
+\begin{itemize}
+
+\item
+To manipulate values we need assignment statements (notice the use of
+{\verb+:=+} as the assignment operator). The action to write data
+into the cache looks at the incoming data message and puts the data in
+the cache. Notice the use of square brackets to lookup the block in
+the cache based on the address of the block.
+
+\begin{verbatim}
+action(w_writeDataToCache, "w", desc="Write data from data message into cache") {
+ peek(dataNetwork_ptr, DataMsg) {
+ cacheMemory_ptr[address].DataBlk := in_msg.DataBlk;
+ }
+}
+\end{verbatim}
+
+\item
+In addition to peeking at queues, we also enqueue messages. The {\tt
+enqueue} construct works similarly to the {\tt peek} construct. {\tt
+enqueue} creates a temporary called {\tt out\_msg}. You can assign
+the fields of this message. At the end of the {\tt enqueue} construct
+the message is implicitly inserted in the outgoing queue of the
+specified network. Notice also how the type of the message is
+specified and how the assignment statements use the names of the
+fields of the messages. {\tt address} is the address for which the
+event was {\tt trigger}ed.
+
+\begin{verbatim}
+action(g_issueGETX, "g", desc="Issue GETX.") {
+ enqueue(addressNetwork_ptr, AddressMsg) {
+ out_msg.Address := address;
+ out_msg.Type := "GETX";
+ out_msg.Requestor := id;
+ }
+}
+\end{verbatim}
+
+\item
+Some times we need to use both {\tt peek} and {\tt enqueue} together.
+In this example we look at an incoming address request to figure out
+who to whom to forward the data value.
+
+\begin{verbatim}
+action(r_cacheToRequestor, "r", desc="Send data from the cache to the requestor") {
+ peek(addressNetwork_ptr, AddressMsg) {
+ enqueue(dataNetwork_ptr, DataMsg) {
+ out_msg.Address := address;
+ out_msg.Destination := in_msg.Requestor;
+ out_msg.DataBlk := cacheMemory_ptr[address].DataBlk;
+ }
+ }
+}
+\end{verbatim}
+
+\item
+We also need to pop the various queues.
+\begin{verbatim}
+action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ dequeue(mandatoryQueue_ptr);
+}
+\end{verbatim}
+
+\item
+Finally we have the ability to call procedures and functions. The
+following is an example of a procedure call. Currently all of the
+procedures and functions are used to handle all of the more specific
+operations. These are currently hard coded into the generator.
+
+\begin{verbatim}
+action(h_hit, "h", desc="Service load/store from the cache.") {
+ serviceLdSt(address, cacheMemory_ptr[address].DataBlk);
+}
+\end{verbatim}
+
+\end{itemize}
+
+\section*{Cache Transitions}
+
+The cross product of states and events gives us the set of possible
+transitions. For example, for our example protocol the empty would
+be:
+
+\begin{center}
+\begin{tabular}{|l||l|l|l|} \hline
+ & LoadStore & Other GETX & Data \\ \hline \hline
+I & & & \\ \hline
+M & & & \\ \hline
+IM & & & \\ \hline
+\end{tabular}
+\end{center}
+
+
+Transitions are atomic and are the heart of the protocol
+specification. The transition specifies both what the next state, and
+also what actions are performed for each unique state/event pair. The
+transition declaration looks different from the other declarations:
+
+\syntax{
+transition({\em state}, {\em event}, {\em new\_state}, {\em pair1}, {\em pair2}, ...) \{ \\
+\hspace{.1in} {\em action\_identifier\_list}\\
+\}
+}
+
+{\em state} and {\em event} are the pair which uniquely identifies the
+transition. {\em state} correspond to the row where {\em event}
+selects the column. {\em new\_state} is an optional parameter. If
+{\em new\_state} is specified, that is the state when the atomic
+transition is completed. If the parameter is omitted there is assumed
+to be no state change. An impossible transition is specified by
+simply not declaring an event for that state/event pair.
+
+We also place list of actions in the curly braces. The {\em
+action\_identifier}'s correspond to the identifier specified as the
+first parameter of an action declaration. The action list is a list
+of operations to be performed when this transition occurs. The
+actions also knows what preconditions are necessary are required for
+the action to be performed. For example a necessary precondition for
+an action which sends a message is that there is a space available in
+the outgoing queue. Each transition is considered atomic, and thus
+the generated code ensures that all of the actions can be completed
+before performing and of the actions.
+
+In our running example protocol it is only possible to receive data in
+the {\em IM} state. The other seven cases can occur and are declared
+as follows. Below are a couple of examples. See the appendix for a
+complete list.
+
+\newpage
+\begin{verbatim}
+transition(I, LoadStore, IM) {
+ g_issueGETX;
+}
+
+transition(M, LoadStore) {
+ h_hit;
+ k_popMandatoryQueue;
+}
+
+transition(M, Other_GETX, I) {
+ r_cacheToRequestor;
+ i_popAddressQueue;
+}
+\end{verbatim}
+
+From the above declarations we can generate a table. Each box can
+have lower case letters which corresponds to the list of actions
+possibly followed by a slash and a state (in uppercase letters). If
+there is no slash and state, the transition does not change the state.
+
+\begin{center}
+\begin{tabular}{|l||l|l|l|} \hline
+ & LoadStore & Other GETX & Data \\ \hline \hline
+I & g/IM & i & (impossible)\\ \hline
+M & hk & ri/I & (impossible)\\ \hline
+IM & z & z & wj/M \\ \hline
+\end{tabular}
+\end{center}
+
+There is a useful shorthand for specifying many transitions with the
+same action. One or both of {\em event} and {\em state} can be a list
+in curly braces. This defines the cross product of the sets in one
+declaration. If no {\em new\_state} is specified none of the
+transitions cause a state change. If {\em new\_state} is specified,
+all of the transitions in the cross product of the sets has the same
+next state. For example, in the below transitions both IM/LoadStore
+and IM/Other\_GETX have the action {\tt z\_delayTrans}.
+\begin{verbatim}
+transition(IM, LoadStore) {
+ z_delayTrans;
+}
+
+transition(IM, Other_GETX) {
+ z_delayTrans;
+}
+\end{verbatim}
+These can be specified in a single declaration:
+\begin{verbatim}
+transition(IM, {LoadStore, Other_GETX}) {
+ z_delayTrans;
+}
+\end{verbatim}
+
+
+\newpage
+\section*{Appendix - Sample Cache Controller Specification}
+
+{\small
+\begin{verbatim}
+machine(processor, "Simple MI Processor") {
+
+ // AddressMsg
+ new_type(AddressMsg, "AddressMsg", message="yes", desc="");
+ type_field(AddressMsg, Address, "address",
+ desc="Physical address for this request",
+ c_type=PhysAddress, c_include="Address.hh", murphi_type="");
+ type_field(AddressMsg, Type, "type",
+ desc="Type of request (GetS, GetX, PutX, etc)",
+ c_type=CoherenceRequestType, c_include="CoherenceRequestType.hh", murphi_type="");
+ type_field(AddressMsg, Requestor, "requestor",
+ desc="Node who initiated the request",
+ c_type=ComponentID, c_include="ComponentID.hh", murphi_type="");
+
+ // DataMsg
+ new_type(DataMsg, "DataMsg", message="yes", desc="");
+ type_field(DataMsg, Address, "address",
+ desc="Physical address for this request",
+ c_type=PhysAddress, c_include="Address.hh", murphi_type="");
+ type_field(DataMsg, Destination, "destination",
+ desc="Node to whom the data is sent",
+ c_type=Set, c_include="Set.hh", murphi_type="");
+ type_field(DataMsg, DataBlk, "data",
+ desc="Node to whom the data is sent",
+ c_type=DataBlock, c_include="DataBlock.hh", murphi_type="");
+
+ // CacheEntry
+ new_type(CacheEntry, "CacheEntry");
+ type_field(CacheEntry, CacheState, "Cache state", desc="cache state",
+ c_type=CacheState, c_include="CacheState.hh", murphi_type="");
+ type_field(CacheEntry, DataBlk, "data", desc="data for the block",
+ c_type=DataBlock, c_include="DataBlock.hh", murphi_type="");
+
+ // DirectoryEntry
+ new_type(DirectoryEntry, "DirectoryEntry");
+ type_field(DirectoryEntry, DirectoryState, "Directory state", desc="Directory state",
+ c_type=DirectoryState, c_include="DirectoryState.hh", murphi_type="");
+ type_field(DirectoryEntry, DataBlk, "data", desc="data for the block",
+ c_type=DataBlock, c_include="DataBlock.hh", murphi_type="");
+
+\end{verbatim}
+\newpage
+\begin{verbatim}
+ // STATES
+ state(I, "I", desc="Idle");
+ state(M, "M", desc="Modified");
+ state(IM, "IM", desc="Idle, issued request but have not seen data yet");
+
+ // EVENTS
+
+ // From processor
+ event(LoadStore, "LoadStore", desc="Load or Store request from local processor") {
+ peek(mandatoryQueue_ptr, CacheMsg) {
+ trigger(in_msg.Address);
+ }
+ }
+
+ // From Address network
+ event(Other_GETX, "Other GETX", desc="Observed a GETX request from another processor") {
+ peek(addressNetwork_ptr, AddressMsg) {
+ if (in_msg.Requestor != id) {
+ trigger(in_msg.Address);
+ }
+ }
+ }
+
+ // From Data network
+ event(Data, "Data", desc="Data for this block from the data network") {
+ peek(dataNetwork_ptr, DataMsg) {
+ trigger(in_msg.Address);
+ }
+ }
+
+ // ACTIONS
+ action(g_issueGETX, "g", desc="Issue GETX.") {
+ enqueue(addressNetwork_ptr, AddressMsg) {
+ out_msg.Address := address;
+ out_msg.Type := "GETX";
+ out_msg.Requestor := id;
+ }
+ }
+
+ action(h_hit, "h", desc="Service load/store from the cache.") {
+ serviceLdSt(address, cacheMemory_ptr[address].DataBlk);
+ }
+
+ action(i_popAddressQueue, "i", desc="Pop incoming address queue.") {
+ dequeue(addressNetwork_ptr);
+ }
+
+ action(j_popDataQueue, "j", desc="Pop incoming data queue.") {
+ dequeue(dataNetwork_ptr);
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ dequeue(mandatoryQueue_ptr);
+ }
+
+ action(r_cacheToRequestor, "r", desc="Send data from the cache to the requestor") {
+ peek(addressNetwork_ptr, AddressMsg) {
+ enqueue(dataNetwork_ptr, DataMsg) {
+ out_msg.Address := address;
+ out_msg.Destination := in_msg.Requestor;
+ out_msg.DataBlk := cacheMemory_ptr[address].DataBlk;
+ }
+ }
+ }
+
+ action(w_writeDataToCache, "w", desc="Write data from data message into cache") {
+ peek(dataNetwork_ptr, DataMsg) {
+ cacheMemory_ptr[address].DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(z_delayTrans, "z", desc="Cannot be handled right now.") {
+ stall();
+ }
+
+ // TRANSITIONS
+
+ // Transitions from Idle
+ transition(I, LoadStore, IM) {
+ g_issueGETX;
+ }
+
+ transition(I, Other_GETX) {
+ i_popAddressQueue;
+ }
+
+ // Transitions from Modified
+ transition(M, LoadStore) {
+ h_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Other_GETX, I) {
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ // Transitions from IM
+ transition(IM, {LoadStore, Other_GETX}) {
+ z_delayTrans;
+ }
+
+ transition(IM, Data, M) {
+ w_writeDataToCache;
+ j_popDataQueue;
+ }
+}
+\end{verbatim}
+}
+\end{document}
+
diff --git a/src/mem/slicc/generator/fileio.cc b/src/mem/slicc/generator/fileio.cc
new file mode 100644
index 000000000..1707e5b7a
--- /dev/null
+++ b/src/mem/slicc/generator/fileio.cc
@@ -0,0 +1,66 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * fileio.C
+ *
+ * Description: See fileio.h
+ *
+ * $Id: fileio.C,v 3.3 2003/07/10 18:08:08 milo Exp $
+ *
+ * */
+
+#include "fileio.hh"
+
+void conditionally_write_file(string filename, ostringstream& sstr)
+{
+ ofstream out;
+ ifstream in;
+ string input_file;
+
+ // Read in the file if it exists
+ in.open(filename.c_str());
+ char c;
+ while (in.get(c)) {
+ input_file += c;
+ }
+ in.close();
+
+ // Check to see if the file is the same as what we want to write
+ if (input_file != sstr.str()) {
+ cout << " Overwriting file: " << filename << endl;
+ // Overwrite the old file with the new file
+ out.open(filename.c_str());
+ out << sstr.str();
+ out.close();
+ } else {
+ //cout << " Keeping old file: " << filename << endl;
+ }
+}
+
diff --git a/src/mem/slicc/generator/fileio.hh b/src/mem/slicc/generator/fileio.hh
new file mode 100644
index 000000000..3cca7ccaa
--- /dev/null
+++ b/src/mem/slicc/generator/fileio.hh
@@ -0,0 +1,46 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * fileio.h
+ *
+ * Description:
+ *
+ * $Id: fileio.h,v 3.2 2003/02/24 20:54:25 xu Exp $
+ *
+ * */
+
+#ifndef FILEIO_H
+#define FILEIO_H
+
+#include "slicc_global.hh"
+
+void conditionally_write_file(string filename, ostringstream& sstr);
+
+#endif //FILEIO_H
diff --git a/src/mem/slicc/generator/html_gen.cc b/src/mem/slicc/generator/html_gen.cc
new file mode 100644
index 000000000..3d17018e1
--- /dev/null
+++ b/src/mem/slicc/generator/html_gen.cc
@@ -0,0 +1,125 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * html_gen.C
+ *
+ * Description: See html_gen.h
+ *
+ * $Id: html_gen.C,v 3.4 2004/01/31 20:46:50 milo Exp $
+ *
+ * */
+
+#include "html_gen.hh"
+#include "fileio.hh"
+#include "SymbolTable.hh"
+
+string formatHTMLShorthand(const string shorthand);
+
+
+void createHTMLSymbol(const Symbol& sym, string title, ostream& out)
+{
+ out << "<HTML><BODY><BIG>" << endl;
+ out << title << ": " << endl;
+ out << formatHTMLShorthand(sym.getShorthand()) << " - ";
+ out << sym.getDescription();
+ out << "</BIG></BODY></HTML>" << endl;
+}
+
+void createHTMLindex(string title, ostream& out)
+{
+ out << "<html>" << endl;
+ out << "<head>" << endl;
+ out << "<title>" << title << "</title>" << endl;
+ out << "</head>" << endl;
+ out << "<frameset rows=\"*,30\">" << endl;
+ Vector<StateMachine*> machine_vec = g_sym_table.getStateMachines();
+ if (machine_vec.size() > 1) {
+ string machine = machine_vec[0]->getIdent();
+ out << " <frame name=\"Table\" src=\"" << machine << "_table.html\">" << endl;
+ } else {
+ out << " <frame name=\"Table\" src=\"empty.html\">" << endl;
+ }
+
+ out << " <frame name=\"Status\" src=\"empty.html\">" << endl;
+ out << "</frameset>" << endl;
+ out << "</html>" << endl;
+}
+
+string formatHTMLShorthand(const string shorthand)
+{
+ string munged_shorthand = "";
+ bool mode_is_normal = true;
+
+ // -- Walk over the string, processing superscript directives
+ for(unsigned int i = 0; i < shorthand.length(); i++) {
+ if(shorthand[i] == '!') {
+ // -- Reached logical end of shorthand name
+ break;
+ } else if( shorthand[i] == '_') {
+ munged_shorthand += " ";
+ } else if( shorthand[i] == '^') {
+ // -- Process super/subscript formatting
+ mode_is_normal = !mode_is_normal;
+ if(mode_is_normal) {
+ // -- Back to normal mode
+ munged_shorthand += "</SUP>";
+ } else {
+ // -- Going to superscript mode
+ munged_shorthand += "<SUP>";
+ }
+ } else if(shorthand[i] == '\\') {
+ // -- Process Symbol character set
+ if((i + 1) < shorthand.length()) {
+ i++; // -- Proceed to next char. Yes I know that changing the loop var is ugly!
+ munged_shorthand += "<B><FONT size=+1>";
+ munged_shorthand += shorthand[i];
+ munged_shorthand += "</FONT></B>";
+ } else {
+ // -- FIXME: Add line number info later
+ cerr << "Encountered a `\\` without anything following it!" << endl;
+ exit( -1 );
+ }
+ } else {
+ // -- Pass on un-munged
+ munged_shorthand += shorthand[i];
+ }
+ } // -- end for all characters in shorthand
+
+ // -- Do any other munging
+ if(!mode_is_normal) {
+ // -- Back to normal mode
+ munged_shorthand += "</SUP>";
+ }
+
+ // -- Return the formatted shorthand name
+ return munged_shorthand;
+}
+
+
diff --git a/src/mem/slicc/generator/html_gen.hh b/src/mem/slicc/generator/html_gen.hh
new file mode 100644
index 000000000..3f0de8df9
--- /dev/null
+++ b/src/mem/slicc/generator/html_gen.hh
@@ -0,0 +1,49 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * html_gen.h
+ *
+ * Description:
+ *
+ * $Id: html_gen.h,v 3.1 2001/12/12 01:00:35 milo Exp $
+ *
+ * */
+
+#ifndef HTML_GEN_H
+#define HTML_GEN_H
+
+#include "slicc_global.hh"
+#include "StateMachine.hh"
+
+string formatHTMLShorthand(const string shorthand);
+void createHTMLindex(string title, ostream& out);
+void createHTMLSymbol(const Symbol& sym, string title, ostream& out);
+
+#endif //HTML_GEN_H
diff --git a/src/mem/slicc/generator/mif_gen.cc b/src/mem/slicc/generator/mif_gen.cc
new file mode 100644
index 000000000..0e6253654
--- /dev/null
+++ b/src/mem/slicc/generator/mif_gen.cc
@@ -0,0 +1,1718 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "mif_gen.hh"
+#include "State.hh"
+#include "Event.hh"
+#include "Action.hh"
+#include "Transition.hh"
+
+// -- Helper functions
+string formatShorthand(const string shorthand);
+string formatCellRuling(const string shorthand);
+
+void printStateTableMIF(const StateMachine& sm, ostream& out)
+{
+ const string mif_prolog1 =
+"<MIFFile 5.50> # Generated by Multifacet MIF Mungers Inc\n\
+<Tbls\n\
+ <Tbl\n\
+ <TblID 1>\n\
+ <TblTag `Format A'>\n\
+ <TblFormat\n\
+\n\
+ <TblAlignment Center>\n\
+\n\
+ # <TblXColumnNum 0>\n\
+ <TblXColumnRuling `Medium'>\n\
+\n\
+ <TblLRuling `Medium'>\n\
+ <TblRRuling `Medium'>\n\
+ <TblTRuling `Medium'>\n\
+ <TblBRuling `Medium'>\n\
+\n\
+ <TblColumn\n\
+ <TblColumnNum 0>\n\
+ <TblColumnWidth 0.51\">\n\
+ <TblColumnH\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnH\n\
+ <TblColumnBody\n\
+ <PgfTag `CellBody'>\n\
+ > # end of TblColumnBody\n\
+ <TblColumnF\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnF\n\
+ > # end of TblColumn\n\
+ <TblColumn\n\
+ <TblColumnNum 1>\n\
+ <TblColumnWidth 0.51\">\n\
+ <TblColumnH\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnH\n\
+ <TblColumnBody\n\
+ <PgfTag `CellBody'>\n\
+ > # end of TblColumnBody\n\
+ <TblColumnF\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnF\n\
+ > # end of TblColumn\n\
+ > # end of TblFormat\n\
+\n\
+ <TblNumColumns 2>\n\
+ <TblColumnWidth 0.51\">\n\
+ <TblColumnWidth 6.00\">\n\
+ <TblTitle\n\
+ <TblTitleContent\n\
+ <Para\n\
+ <PgfTag `TableTitle'>\n\
+ <Pgf\n\
+ <PgfAlignment Center>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ > # end of Pgf\n\
+ <PgfNumString `TABLE 1. '>\n\
+ <ParaLine\n\
+ <Marker\n\
+ <MType 9>\n\
+ <MTypeName `Cross-Ref'>\n\
+ <MCurrPage `1'>\n\
+ > # end of Marker\n\
+ <String `";
+
+ const string mif_prolog2 =
+ "'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of TblTitleContent\n\
+ > # end of TblTitle\n\
+\n\
+ <TblH\n\
+ <Row\n\
+ <RowMaxHeight 14.0\">\n\
+ <RowHeight 0.44444\">\n\
+ <Cell\n\
+ <CellLRuling `Medium'>\n\
+ <CellBRuling `Medium'>\n\
+ <CellRRuling `Medium'>\n\
+ <CellTRuling `Medium'>\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellHeading'>\n\
+ <Pgf\n\
+ <PgfAlignment Center>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ >\n\
+ <ParaLine\n\
+ <String `State'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+ <Cell\n\
+ <CellLRuling `Medium'>\n\
+ <CellBRuling `Medium'>\n\
+ <CellRRuling `Medium'>\n\
+ <CellTRuling `Medium'>\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellHeading'>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ <ParaLine\n\
+ <String `Description'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+ > # end of Row\n\
+ > # end of TblH\n\
+\n\
+ <TblBody\n\
+";
+
+ const string row_before_state =
+" <Row\n\
+ <RowMaxHeight 14.0\">\n\
+ <RowHeight 0.22222\">\n\
+ <Cell\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellBody'>\n\
+ <Pgf\n\
+ <PgfAlignment Center>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ >\n\
+ <ParaLine\n\
+ <String `";
+
+ const string row_between_state_desc =
+ "'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+ <Cell\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellBody'>\n\
+ <ParaLine\n\
+ <String `";
+
+ const string row_after_desc =
+ "'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+ > # end of Row\n\
+";
+
+ const string mif_epilog =
+" > # end of TblBody\n\
+ > # end of Tbl\n\
+> # end of Tbls\n\
+\n\
+ <Para\n\
+ <ParaLine\n\
+ <ATbl 1>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+\n\
+# End of MIFFile\n\
+";
+
+ out << mif_prolog1;
+ out << formatShorthand( sm.getShorthand() );
+ out << " states";
+ out << mif_prolog2;
+
+ for( int i = 0; i < sm.numStates(); i++ )
+ {
+ out << row_before_state;
+ out << formatShorthand( sm.getState( i ).getShorthand() );
+ out << row_between_state_desc;
+ out << sm.getState( i ).getDescription();
+ out << row_after_desc;
+ }
+
+ out << mif_epilog;
+}
+
+
+void printEventTableMIF(const StateMachine& sm, ostream& out)
+{
+ const string mif_prolog1 =
+"<MIFFile 5.50> # Generated by Multifacet MIF Mungers Inc\n\
+<Tbls\n\
+ <Tbl\n\
+ <TblID 1>\n\
+ <TblTag `Format A'>\n\
+ <TblFormat\n\
+\n\
+ <TblAlignment Center>\n\
+\n\
+ # <TblXColumnNum 0>\n\
+ <TblXColumnRuling `Medium'>\n\
+\n\
+ <TblLRuling `Medium'>\n\
+ <TblRRuling `Medium'>\n\
+ <TblTRuling `Medium'>\n\
+ <TblBRuling `Medium'>\n\
+\n\
+ <TblColumn\n\
+ <TblColumnNum 0>\n\
+ <TblColumnWidth 0.51\">\n\
+ <TblColumnH\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnH\n\
+ <TblColumnBody\n\
+ <PgfTag `CellBody'>\n\
+ > # end of TblColumnBody\n\
+ <TblColumnF\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnF\n\
+ > # end of TblColumn\n\
+ <TblColumn\n\
+ <TblColumnNum 1>\n\
+ <TblColumnWidth 0.51\">\n\
+ <TblColumnH\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnH\n\
+ <TblColumnBody\n\
+ <PgfTag `CellBody'>\n\
+ > # end of TblColumnBody\n\
+ <TblColumnF\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnF\n\
+ > # end of TblColumn\n\
+ > # end of TblFormat\n\
+\n\
+ <TblNumColumns 2>\n\
+ <TblColumnWidth 0.51\">\n\
+ <TblColumnWidth 6.00\">\n\
+ <TblTitle\n\
+ <TblTitleContent\n\
+ <Para\n\
+ <PgfTag `TableTitle'>\n\
+ <Pgf\n\
+ <PgfAlignment Center>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ > # end of Pgf\n\
+ <PgfNumString `TABLE 1. '>\n\
+ <ParaLine\n\
+ <Marker\n\
+ <MType 9>\n\
+ <MTypeName `Cross-Ref'>\n\
+ <MCurrPage `1'>\n\
+ > # end of Marker\n\
+ <String `";
+ const string mif_prolog2 =
+"'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of TblTitleContent\n\
+ > # end of TblTitle\n\
+\n\
+ <TblH\n\
+ <Row\n\
+ <RowMaxHeight 14.0\">\n\
+ <RowHeight 0.44444\">\n\
+ <Cell\n\
+ <CellLRuling `Medium'>\n\
+ <CellBRuling `Medium'>\n\
+ <CellRRuling `Medium'>\n\
+ <CellTRuling `Medium'>\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellHeading'>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ <Pgf\n\
+ <PgfAlignment Center>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ >\n\
+ <ParaLine\n\
+ <String `Event'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+ <Cell\n\
+ <CellLRuling `Medium'>\n\
+ <CellBRuling `Medium'>\n\
+ <CellRRuling `Medium'>\n\
+ <CellTRuling `Medium'>\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellHeading'>\n\
+ <ParaLine\n\
+ <String `Description'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+ > # end of Row\n\
+ > # end of TblH\n\
+\n\
+ <TblBody\n\
+";
+
+ const string row_before_event =
+" <Row\n\
+ <RowMaxHeight 14.0\">\n\
+ <RowHeight 0.22222\">\n\
+ <Cell\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellBody'>\n\
+ <Pgf\n\
+ <PgfAlignment Center>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ >\n\
+ <ParaLine\n\
+ <String `";
+
+ const string row_between_event_desc =
+ "'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+ <Cell\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellBody'>\n\
+ <ParaLine\n\
+ <String `";
+
+ const string row_after_desc =
+ "'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+ > # end of Row\n\
+";
+
+ const string mif_epilog =
+" > # end of TblBody\n\
+ > # end of Tbl\n\
+> # end of Tbls\n\
+\n\
+ <Para\n\
+ <ParaLine\n\
+ <ATbl 1>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+\n\
+# End of MIFFile\n\
+";
+
+ out << mif_prolog1;
+ out << formatShorthand( sm.getShorthand() );
+ out << " events";
+ out << mif_prolog2;
+
+ for( int i = 0; i < sm.numEvents(); i++ )
+ {
+ out << row_before_event;
+ out << formatShorthand( sm.getEvent( i ).getShorthand() );
+ out << row_between_event_desc;
+ out << sm.getEvent( i ).getDescription();
+ out << row_after_desc;
+ }
+
+ out << mif_epilog;
+}
+
+
+void printActionTableMIF(const StateMachine& sm, ostream& out)
+{
+ const string mif_prolog1 =
+"<MIFFile 5.50> # Generated by Multifacet MIF Mungers Inc\n\
+<Tbls\n\
+ <Tbl\n\
+ <TblID 1>\n\
+ <TblTag `Format A'>\n\
+ <TblFormat\n\
+\n\
+ <TblAlignment Center>\n\
+\n\
+ # <TblXColumnNum 0>\n\
+ <TblXColumnRuling `Medium'>\n\
+\n\
+ <TblLRuling `Medium'>\n\
+ <TblRRuling `Medium'>\n\
+ <TblTRuling `Medium'>\n\
+ <TblBRuling `Medium'>\n\
+\n\
+ <TblColumn\n\
+ <TblColumnNum 0>\n\
+ <TblColumnWidth 0.51\">\n\
+ <TblColumnH\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnH\n\
+ <TblColumnBody\n\
+ <PgfTag `CellBody'>\n\
+ > # end of TblColumnBody\n\
+ <TblColumnF\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnF\n\
+ > # end of TblColumn\n\
+ <TblColumn\n\
+ <TblColumnNum 1>\n\
+ <TblColumnWidth 0.51\">\n\
+ <TblColumnH\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnH\n\
+ <TblColumnBody\n\
+ <PgfTag `CellBody'>\n\
+ > # end of TblColumnBody\n\
+ <TblColumnF\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnF\n\
+ > # end of TblColumn\n\
+ > # end of TblFormat\n\
+\n\
+ <TblNumColumns 2>\n\
+ <TblColumnWidth 0.51\">\n\
+ <TblColumnWidth 6.00\">\n\
+ <TblTitle\n\
+ <TblTitleContent\n\
+ <Para\n\
+ <PgfTag `TableTitle'>\n\
+ <Pgf\n\
+ <PgfAlignment Center>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ > # end of Pgf\n\
+ <PgfNumString `TABLE 1. '>\n\
+ <ParaLine\n\
+ <Marker\n\
+ <MType 9>\n\
+ <MTypeName `Cross-Ref'>\n\
+ <MCurrPage `1'>\n\
+ > # end of Marker\n\
+ <String `";
+ const string mif_prolog2 =
+"'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of TblTitleContent\n\
+ > # end of TblTitle\n\
+\n\
+ <TblH\n\
+ <Row\n\
+ <RowMaxHeight 14.0\">\n\
+ <RowHeight 0.44444\">\n\
+ <Cell\n\
+ <CellLRuling `Medium'>\n\
+ <CellBRuling `Medium'>\n\
+ <CellRRuling `Medium'>\n\
+ <CellTRuling `Medium'>\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellHeading'>\n\
+ <Pgf\n\
+ <PgfAlignment Center>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ >\n\
+ <ParaLine\n\
+ <String `Action'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+ <Cell\n\
+ <CellLRuling `Medium'>\n\
+ <CellBRuling `Medium'>\n\
+ <CellRRuling `Medium'>\n\
+ <CellTRuling `Medium'>\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellHeading'>\n\
+ <ParaLine\n\
+ <String `Description'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+ > # end of Row\n\
+ > # end of TblH\n\
+\n\
+ <TblBody\n\
+";
+
+ const string row_before_action =
+" <Row\n\
+ <RowMaxHeight 14.0\">\n\
+ <RowHeight 0.22222\">\n\
+ <Cell\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellBody'>\n\
+ <Pgf\n\
+ <PgfAlignment Center>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ >\n\
+ <ParaLine\n\
+ <String `";
+
+ const string row_between_action_desc =
+ "'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+ <Cell\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellBody'>\n\
+ <ParaLine\n\
+ <String `";
+
+ const string row_after_desc =
+ "'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+ > # end of Row\n\
+";
+
+ const string mif_epilog =
+" > # end of TblBody\n\
+ > # end of Tbl\n\
+> # end of Tbls\n\
+\n\
+ <Para\n\
+ <ParaLine\n\
+ <ATbl 1>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+\n\
+# End of MIFFile\n\
+";
+
+ out << mif_prolog1;
+ out << formatShorthand( sm.getShorthand() );
+ out << " actions";
+ out << mif_prolog2;
+
+ for( int i = 0; i < sm.numActions(); i++ )
+ {
+ out << row_before_action;
+ out << formatShorthand( sm.getAction( i ).getShorthand() );
+ out << row_between_action_desc;
+ out << sm.getAction( i ).getDescription();
+ out << row_after_desc;
+ }
+
+ out << mif_epilog;
+}
+
+
+void printTransitionTableMIF(const StateMachine& sm, ostream& out)
+{
+ const string mif_prolog =
+"<MIFFile 5.50> # Generated by Multifacet MIF Mungers Inc\n\
+<Tbls\n\
+ <Tbl\n\
+ <TblID 1>\n\
+ <TblTag `Format A'>\n\
+ <TblFormat\n\
+\n\
+ <TblAlignment Center>\n\
+\n\
+ # <TblXColumnNum 0>\n\
+ <TblXColumnRuling `Medium'>\n\
+\n\
+ <TblLRuling `Medium'>\n\
+ <TblRRuling `Medium'>\n\
+ <TblTRuling `Medium'>\n\
+ <TblBRuling `Medium'>\n\
+ \n\
+";
+
+ const string tbl_fmt_before_col_num =
+" <TblColumn\n\
+ <TblColumnNum ";
+
+ const string tbl_fmt_after_col_num =
+ ">\n\
+ <TblColumnWidth 0.51\">\n\
+ <TblColumnH\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnH\n\
+ <TblColumnBody\n\
+ <PgfTag `CellBody'>\n\
+ > # end of TblColumnBody\n\
+ <TblColumnF\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnF\n\
+ > # end of TblColumn\n\
+";
+
+ const string tbl_fmt_before_num_cols =
+" > # end of TblFormat\n\
+\n\
+ <TblNumColumns ";
+
+ const string tbl_fmt_each_col_width_begin =
+ ">\n\
+ <TblColumnWidth ";
+
+ const string tbl_fmt_each_col_width_end = "\"";
+
+ const string tbl_before_first_header1 =
+ ">\n\
+ <TblTitle\n\
+ <TblTitleContent\n\
+ <Para\n\
+ <PgfTag `TableTitle'>\n\
+ <Pgf\n\
+ <PgfAlignment Center>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ > # end of Pgf\n\
+ <PgfNumString `TABLE 1. '>\n\
+ <ParaLine\n\
+ <Marker\n\
+ <MType 9>\n\
+ <MTypeName `Cross-Ref'>\n\
+ <MCurrPage `1'>\n\
+ > # end of Marker\n\
+ <String `";
+
+ const string tbl_before_first_header2 =
+ "'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of TblTitleContent\n\
+ > # end of TblTitle\n\
+\n\
+ <TblH\n\
+ <Row\n\
+ <RowMaxHeight 14.0\">\n\
+ <RowHeight 0.44444\">";
+
+ const string tbl_before_each_header =
+" <Cell\n\
+ <CellLRuling `Medium'>\n\
+ <CellBRuling `Medium'>\n\
+ <CellRRuling `Medium'>\n\
+ <CellTRuling `Medium'>\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellHeading'>\n\
+ <Pgf\n\
+ <PgfAlignment Center>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ >\n\
+ <ParaLine\n\
+ <String `";
+
+ const string tbl_before_each_rot_header =
+" <Cell\n\
+ <CellLRuling `Medium'>\n\
+ <CellBRuling `Medium'>\n\
+ <CellRRuling `Medium'>\n\
+ <CellTRuling `Medium'>\n\
+ <CellAngle 270>\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellHeading'>\n\
+ <ParaLine\n\
+ <String `";
+
+ const string tbl_after_each_header =
+ "'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+";
+
+ const string before_first_row =
+" > # end of Row\n\
+ > # end of TblH\n\
+\n\
+ <TblBody\n\
+";
+
+ const string row_before_first_cell =
+" <Row\n\
+ <RowMaxHeight 14.0\">\n\
+ <RowHeight 0.22222\">";
+
+ const string row_cell_before_ruling =
+" <Cell\n\
+";
+
+ const string row_cell_before_contents =
+" <CellContent\n\
+ <Para\n\
+ <PgfTag `CellBody'>\n\
+ <Pgf\n\
+ <PgfAlignment Center>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ >\n\
+ <ParaLine\n\
+ <String `";
+
+ const string row_cell_after_contents =
+ "'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+";
+
+ const string row_empty_cell =
+" <CellFill 5>\n\
+ <CellColor `Cyan'>\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellBody'>\n\
+ <ParaLine\n\
+ <String `'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+";
+
+ const string row_after_last_cell =
+" > # end of Row\n\
+";
+
+
+ const string mif_epilog =
+" > # end of TblBody\n\
+ > # end of Tbl\n\
+> # end of Tbls\n\
+\n\
+ <Para\n\
+ <ParaLine\n\
+ <ATbl 1>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+\n\
+# End of MIFFile\n\
+";
+
+ int i, j, num_rows, num_cols;
+ string row_ruling;
+ string col_ruling;
+
+ num_rows = sm.numStates();
+ num_cols = sm.numEvents() + 1;
+
+ // -- Prolog
+ out << mif_prolog;
+
+ // -- Table format (for each column)
+ for( i = 0; i < num_cols; i++ )
+ {
+ out << tbl_fmt_before_col_num;
+ out << i;
+ out << tbl_fmt_after_col_num;
+ }
+
+ // -- Spell out width of each column
+
+ // -- FIXME: make following constants into parameters
+ const float total_table_width = 7.5; // -- Total page width = 7.5" (portrait mode)
+ const float min_col_width = 0.35; // -- Min col width (for legibility)
+ const float max_col_width = 0.75; // -- Max col width (for aesthetics)
+ float column_width;
+
+ // -- Calculate column width and clamp it within a range
+ column_width = total_table_width / num_cols;
+ column_width = ((column_width < min_col_width)
+ ? min_col_width
+ : ((column_width > max_col_width)
+ ? max_col_width
+ : column_width));
+
+ out << tbl_fmt_before_num_cols;
+ out << num_cols;
+ for( i = 0; i < num_cols; i++ )
+ {
+ out << tbl_fmt_each_col_width_begin << column_width << tbl_fmt_each_col_width_end;
+ }
+
+ // -- Column headers
+ out << tbl_before_first_header1;
+ out << formatShorthand( sm.getShorthand() );
+ out << " transitions";
+ out << tbl_before_first_header2;
+
+ out << tbl_before_each_header;
+ out << "State";
+ out << tbl_after_each_header;
+
+ for( i = 0; i < sm.numEvents(); i++ )
+ {
+ out << tbl_before_each_rot_header;
+ out << formatShorthand( sm.getEvent(i).getShorthand() );
+ out << tbl_after_each_header;
+ }
+ out << before_first_row;
+
+
+ // -- Body of table
+ for( i = 0; i < num_rows; i++ )
+ {
+ // -- Each row
+ out << row_before_first_cell;
+
+ // -- Figure out ruling
+ if (sm.getState(i).existPair("format")) {
+ row_ruling = formatCellRuling( sm.getState(i).lookupPair("format"));
+ } else {
+ row_ruling = "";
+ }
+
+ // -- First column = state
+ out << row_cell_before_ruling;
+ out << row_ruling;
+ out << row_cell_before_contents;
+ out << formatShorthand( sm.getState(i).getShorthand() );
+ out << row_cell_after_contents;
+
+ // -- One column for each event
+ for( j = 0; j < sm.numEvents(); j++ )
+ {
+ const Transition* trans_ptr = sm.getTransPtr( i, j );
+
+ // -- Figure out ruling
+ if (sm.getEvent(j).existPair("format")) {
+ col_ruling = formatCellRuling(sm.getEvent(j).lookupPair("format"));
+ } else {
+ col_ruling = "";
+ }
+
+ out << row_cell_before_ruling;
+ out << row_ruling;
+ out << col_ruling;
+
+ if( trans_ptr != NULL )
+ {
+ string actions;
+ string nextState;
+
+ // -- Get the actions
+ actions = formatShorthand( trans_ptr->getActionShorthands() );
+
+ // -- Get the next state
+ // FIXME: should compare index, not the string
+ if (trans_ptr->getNextStateShorthand() !=
+ sm.getState(i).getShorthand() )
+ {
+ nextState = formatShorthand( trans_ptr->getNextStateShorthand() );
+ } else
+ {
+ nextState = "";
+ }
+
+ // -- Print out "actions/next-state"
+ out << row_cell_before_contents;
+ out << actions;
+ if ((nextState.length() != 0) && (actions.length() != 0)) {
+ out << "/";
+ }
+ out << nextState;
+ out << row_cell_after_contents;
+ }
+ else
+ {
+ out << row_empty_cell;
+ }
+
+ }
+
+ out << row_after_last_cell;
+ }
+
+ // -- Epilog
+ out << mif_epilog;
+
+}
+/*
+void printTBETableMIF(const StateMachine& sm, const Vector<Field>& fields, ostream& out)
+{
+ const string mif_prolog1 =
+"<MIFFile 5.50> # Generated by Multifacet MIF Mungers Inc\n\
+<Tbls\n\
+ <Tbl\n\
+ <TblID 1>\n\
+ <TblTag `Format A'>\n\
+ <TblFormat\n\
+\n\
+ <TblAlignment Center>\n\
+\n\
+ # # <TblXColumnNum 0>\n\
+ <TblXColumnRuling `Medium'>\n\
+\n\
+ <TblLRuling `Medium'>\n\
+ <TblRRuling `Medium'>\n\
+ <TblTRuling `Medium'>\n\
+ <TblBRuling `Medium'>\n\
+\n\
+ <TblColumn\n\
+ <TblColumnNum 0>\n\
+ <TblColumnWidth 0.51\">\n\
+ <TblColumnH\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnH\n\
+ <TblColumnBody\n\
+ <PgfTag `CellBody'>\n\
+ > # end of TblColumnBody\n\
+ <TblColumnF\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnF\n\
+ > # end of TblColumn\n\
+ <TblColumn\n\
+ <TblColumnNum 1>\n\
+ <TblColumnWidth 0.51\">\n\
+ <TblColumnH\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnH\n\
+ <TblColumnBody\n\
+ <PgfTag `CellBody'>\n\
+ > # end of TblColumnBody\n\
+ <TblColumnF\n\
+ <PgfTag `CellHeading'>\n\
+ > # end of TblColumnF\n\
+ > # end of TblColumn\n\
+ > # end of TblFormat\n\
+\n\
+ <TblNumColumns 2>\n\
+ <TblColumnWidth 0.51\">\n\
+ <TblColumnWidth 6.00\">\n\
+ <TblTitle\n\
+ <TblTitleContent\n\
+ <Para\n\
+ <PgfTag `TableTitle'>\n\
+ <Pgf\n\
+ <PgfAlignment Center>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ > # end of Pgf\n\
+ <PgfNumString `TABLE 1. '>\n\
+ <ParaLine\n\
+ <Marker\n\
+ <MType 9>\n\
+ <MTypeName `Cross-Ref'>\n\
+ <MCurrPage `1'>\n\
+ > # end of Marker\n\
+ <String `";
+
+ const string mif_prolog2 =
+ "'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of TblTitleContent\n\
+ > # end of TblTitle\n\
+\n\
+ <TblH\n\
+ <Row\n\
+ <RowMaxHeight 14.0\">\n\
+ <RowHeight 0.44444\">\n\
+ <Cell\n\
+ <CellLRuling `Medium'>\n\
+ <CellBRuling `Medium'>\n\
+ <CellRRuling `Medium'>\n\
+ <CellTRuling `Medium'>\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellHeading'>\n\
+ <Pgf\n\
+ <PgfAlignment Center>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ >\n\
+ <ParaLine\n\
+ <String `Field'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+ <Cell\n\
+ <CellLRuling `Medium'>\n\
+ <CellBRuling `Medium'>\n\
+ <CellRRuling `Medium'>\n\
+ <CellTRuling `Medium'>\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellHeading'>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ <ParaLine\n\
+ <String `Description'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+ > # end of Row\n\
+ > # end of TblH\n\
+\n\
+ <TblBody\n\
+";
+
+ const string row_before_state =
+" <Row\n\
+ <RowMaxHeight 14.0\">\n\
+ <RowHeight 0.22222\">\n\
+ <Cell\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellBody'>\n\
+ <Pgf\n\
+ <PgfAlignment Center>\n\
+ <PgfFont \n\
+ <FTag `'>\n\
+ <FFamily `Times'>\n\
+ <FVar `Regular'>\n\
+ <FWeight `Regular'>\n\
+ <FAngle `Regular'>\n\
+ <FPostScriptName `Times-Roman'>\n\
+ <FEncoding `FrameRoman'>\n\
+ <FSize 11.0 pt>\n\
+ <FUnderlining FNoUnderlining>\n\
+ <FOverline No>\n\
+ <FStrike No>\n\
+ <FChangeBar No>\n\
+ <FOutline No>\n\
+ <FShadow No>\n\
+ <FPairKern Yes>\n\
+ <FTsume No>\n\
+ <FCase FAsTyped>\n\
+ <FPosition FNormal>\n\
+ <FDX 0.0%>\n\
+ <FDY 0.0%>\n\
+ <FDW 0.0%>\n\
+ <FStretch 100.0%>\n\
+ <FLanguage USEnglish>\n\
+ <FLocked No>\n\
+ <FSeparation 0>\n\
+ <FColor `Black'>\n\
+ > # end of PgfFont\n\
+ >\n\
+ <ParaLine\n\
+ <String `";
+
+ const string row_between_state_desc =
+ "'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+ <Cell\n\
+ <CellContent\n\
+ <Para\n\
+ <PgfTag `CellBody'>\n\
+ <ParaLine\n\
+ <String `";
+
+ const string row_after_desc =
+ "'>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+ > # end of CellContent\n\
+ > # end of Cell\n\
+ > # end of Row\n\
+";
+
+ const string mif_epilog =
+" > # end of TblBody\n\
+ > # end of Tbl\n\
+> # end of Tbls\n\
+\n\
+ <Para\n\
+ <ParaLine\n\
+ <ATbl 1>\n\
+ > # end of ParaLine\n\
+ > # end of Para\n\
+\n\
+# End of MIFFile\n\
+";
+
+ out << mif_prolog1;
+ out << sm.getShorthand();
+ out << " TBE";
+ out << mif_prolog2;
+
+ for( int i = 0; i < fields.size(); i++ ) {
+ out << row_before_state;
+ out << formatShorthand(fields[i].getShorthand());
+ out << row_between_state_desc;
+ out << fields[i].getDescription();
+ out << row_after_desc;
+ }
+
+ out << mif_epilog;
+}
+*/
+// --
+// -- Helper function to do some shorthand formatting (kludge before we
+// -- get the tuple attributes into the state machine language.
+// -- Current convention:
+// -- - each `_' indicates a toggle between normal mode and superscript
+// -- - each escaped (using `\') character indicates a letter formatted
+// -- using the Symbol character set. \a = alpha, \b = beta, \c = chi etc.
+// -- See the FrameMaker character sets manual in the Online Manuals.
+// -- - a `!' indicates extra stuff at the end which can be ignored (used
+// -- for determining cell ruling and so on)
+// --
+string formatShorthand(const string shorthand)
+{
+ string munged_shorthand = "";
+ bool mode_is_normal = true;
+ const string mif_superscript = "'> <Font <FPosition FSuperscript> <FLocked No> > <String `";
+ const string mif_normal = "'> <Font <FPosition FNormal> <FLocked No> > <String `";
+ const string mif_symbol = "'> <Font <FFamily `Symbol'> <FPostScriptName `Symbol'> <FEncoding `FrameRoman'> <FLocked No> > <String `";
+ const string mif_times = "'> <Font <FFamily `Times'> <FPostScriptName `Times-Roman'> <FEncoding `FrameRoman'> <FLocked No> > <String `";
+
+
+ // -- Walk over the string, processing superscript directives
+ for( unsigned int i = 0; i < shorthand.length(); i++ )
+ {
+ if( shorthand[i] == '!' )
+ {
+ // -- Reached logical end of shorthand name
+ break;
+ }
+ else if( shorthand[i] == '^' )
+ {
+ // -- Process super/subscript formatting
+
+ mode_is_normal = !mode_is_normal;
+ if( mode_is_normal )
+ {
+ // -- Back to normal mode
+ munged_shorthand += mif_normal;
+ }
+ else
+ {
+ // -- Going to superscript mode
+ munged_shorthand += mif_superscript;
+ }
+
+ }
+ else if( shorthand[i] == '\\' )
+ {
+ // -- Process Symbol character set
+ if( (i + 1) < shorthand.length() )
+ {
+ i++; // -- Proceed to next char. Yes I know that changing the loop var is ugly!
+ munged_shorthand += mif_symbol;
+ munged_shorthand += shorthand[i];
+ munged_shorthand += mif_times;
+ }
+ else
+ {
+ // -- FIXME: Add line number info later
+ cerr << "Encountered a `\\` without anything following it!" << endl;
+ exit( -1 );
+ }
+
+ }
+ else
+ {
+ // -- Pass on un-munged
+ munged_shorthand += shorthand[i];
+ }
+
+ } // -- end for all characters in shorthand
+
+ // -- Do any other munging
+
+ // -- Return the formatted shorthand name
+ return munged_shorthand;
+}
+
+
+// --
+// -- Helper function to figure out where to put rules in the table (kludge before we
+// -- get the tuple attributes into the shorthand machine language.
+// -- Current convention:
+// -- - a `!' in the shorthand indicates the beginning of ruling information
+// -- - `b' => bottom of this row is ruled
+// -- - `r' => right of this column is ruled
+// --
+string formatCellRuling( const string shorthand)
+{
+ for( unsigned int i = 0; i < shorthand.length(); i++ )
+ {
+ if( shorthand[i] == '!' )
+ {
+ // -- OK, found beginning of ruling information
+ for( unsigned int j = i+1; j < shorthand.length(); j++ )
+ {
+ if( shorthand[j] == 'b')
+ {
+ // -- Rule the bottom
+ return "<CellBRuling `Medium'>\n";
+ }
+ else if( shorthand[j] == 'r')
+ {
+ // -- Rule the bottom
+ return "<CellRRuling `Medium'>\n";
+ }
+
+ }
+
+ // -- No ruling directives recognized, return default ruling
+ return "";
+ }
+
+ }
+
+ // -- No ruling information found, return default ruling
+ return "";
+}
diff --git a/src/mem/slicc/generator/mif_gen.hh b/src/mem/slicc/generator/mif_gen.hh
new file mode 100644
index 000000000..ba1dc0b0b
--- /dev/null
+++ b/src/mem/slicc/generator/mif_gen.hh
@@ -0,0 +1,45 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: mif_gen.h,v 3.1 2001/12/12 01:00:35 milo Exp $
+ *
+ */
+
+#ifndef MIF_GEN_H
+#define MIF_GEN_H
+
+#include "StateMachine.hh"
+
+void printStateTableMIF(const StateMachine& sm, ostream& out);
+void printEventTableMIF(const StateMachine& sm, ostream& out);
+void printActionTableMIF(const StateMachine& sm, ostream& out);
+void printTransitionTableMIF(const StateMachine& sm, ostream& out);
+
+#endif //MIF_GEN_H
diff --git a/src/mem/slicc/main.cc b/src/mem/slicc/main.cc
new file mode 100644
index 000000000..acd9e73f9
--- /dev/null
+++ b/src/mem/slicc/main.cc
@@ -0,0 +1,246 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * */
+
+#include "main.hh"
+#include "StateMachine.hh"
+#include "mif_gen.hh"
+#include "html_gen.hh"
+#include "fileio.hh"
+#include "DeclListAST.hh"
+#include "Type.hh"
+#include "SymbolTable.hh"
+#include "Event.hh"
+#include "State.hh"
+#include "Action.hh"
+#include "Transition.hh"
+
+// -- Main conversion functions
+
+void printDotty(const StateMachine& sm, ostream& out);
+void printTexTable(const StateMachine& sm, ostream& out);
+
+DeclListAST* g_decl_list_ptr;
+DeclListAST* parse(string filename);
+
+int main(int argc, char *argv[])
+{
+ cerr << "SLICC v0.3" << endl;
+
+ if (argc < 5) {
+ cerr << " Usage: generator.exec <code path> <html path> <ident> <html direction> files ... " << endl;
+ exit(1);
+ }
+
+ // The path we should place the generated code
+ string code_path(argv[1]);
+ code_path += "/";
+
+ // The path we should place the generated html
+ string html_path(argv[2]);
+ html_path += "/";
+
+ string ident(argv[3]);
+
+ string html_generate(argv[4]);
+
+ Vector<DeclListAST*> decl_list_vec;
+
+ // Parse
+ cerr << "Parsing..." << endl;
+ for(int i=5; i<argc; i++) {
+ cerr << " " << argv[i] << endl;
+ DeclListAST* decl_list_ptr = parse(argv[i]);
+ decl_list_vec.insertAtBottom(decl_list_ptr);
+ }
+
+ // Find machines
+ cerr << "Generator pass 1..." << endl;
+ int size = decl_list_vec.size();
+ for(int i=0; i<size; i++) {
+ DeclListAST* decl_list_ptr = decl_list_vec[i];
+ decl_list_ptr->findMachines();
+ }
+
+ // Generate Code
+ cerr << "Generator pass 2..." << endl;
+ for(int i=0; i<size; i++) {
+ DeclListAST* decl_list_ptr = decl_list_vec[i];
+ decl_list_ptr->generate();
+ delete decl_list_ptr;
+ }
+
+ // Generate C/C++ files
+ cerr << "Writing C files..." << endl;
+
+ {
+ // Generate the name of the protocol
+ ostringstream sstr;
+ sstr << "// Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<<endl;
+ sstr << endl;
+ sstr << "#ifndef PROTOCOL_NAME_H" << endl;
+ sstr << "#define PROTOCOL_NAME_H" << endl;
+ sstr << endl;
+ sstr << "const char CURRENT_PROTOCOL[] = \"";
+ sstr << ident << "\";\n";
+ sstr << "#endif // PROTOCOL_NAME_H" << endl;
+ conditionally_write_file(code_path + "/protocol_name.hh", sstr);
+ }
+
+ g_sym_table.writeCFiles(code_path);
+
+ // Generate HTML files
+ if (html_generate == "html") {
+ cerr << "Writing HTML files..." << endl;
+ g_sym_table.writeHTMLFiles(html_path);
+ } else if (html_generate == "no_html") {
+ cerr << "No HTML files generated" << endl;
+ } else {
+ cerr << "ERROR, unidentified html direction" << endl;
+ }
+
+ cerr << "Done..." << endl;
+
+ // Generate MIF files
+ cerr << "Writing MIF files..." << endl;
+ g_sym_table.writeMIFFiles(html_path);
+
+ cerr << "Done..." << endl;
+
+}
+ /*
+ if(!strcmp(argv[2], "parse")) {
+ // Parse only
+ } else if(!strcmp(argv[2], "state")) {
+ printStateTableMIF(s, cout);
+ } else if(!strcmp( argv[2], "event")) {
+ printEventTableMIF(s, cout);
+ } else if(!strcmp( argv[2], "action")) {
+ printActionTableMIF(s, cout);
+ } else if(!strcmp( argv[2], "transition")) {
+ printTransitionTableMIF(s, cout);
+ } else if(!strcmp( argv[2], "tbe")) {
+ for(int i=0; i<s.numTypes(); i++) {
+ if (s.getType(i).getIdent() == "TBE") {
+ printTBETableMIF(s, s.getTypeFields(i), cout);
+ }
+ }
+ } else if(!strcmp( argv[2], "dot")) {
+ printDotty(s, cout);
+ } else if(!strcmp( argv[2], "latex")) {
+ printTexTable(s, cout);
+ } else if (!strcmp( argv[2], "murphi")) {
+ printMurphi(s, cout);
+ } else if (!strcmp( argv[2], "html")) {
+ printHTML(s);
+ } else if(!strcmp( argv[2], "code")) {
+ if (argc < 4) {
+ cerr << "Error: Wrong number of command line parameters!" << endl;
+ exit(1);
+ }
+ */
+
+
+void printDotty(const StateMachine& sm, ostream& out)
+{
+ out << "digraph " << sm.getIdent() << " {" << endl;
+ for(int i=0; i<sm.numTransitions(); i++) {
+ const Transition& t = sm.getTransition(i);
+ // Don't print ignored transitions
+ if ((t.getActionShorthands() != "--") && (t.getActionShorthands() != "z")) {
+ // if (t.getStateShorthand() != t.getNextStateShorthand()) {
+ out << " " << t.getStateShorthand() << " -> ";
+ out << t.getNextStateShorthand() << "[label=\"";
+ out << t.getEventShorthand() << "/"
+ << t.getActionShorthands() << "\"]" << endl;
+ }
+ }
+ out << "}" << endl;
+}
+
+void printTexTable(const StateMachine& sm, ostream& out)
+{
+ const Transition* trans_ptr;
+ int stateIndex, eventIndex;
+ string actions;
+ string nextState;
+
+ out << "%& latex" << endl;
+ out << "\\documentclass[12pt]{article}" << endl;
+ out << "\\usepackage{graphics}" << endl;
+ out << "\\begin{document}" << endl;
+ // out << "{\\large" << endl;
+ out << "\\begin{tabular}{|l||";
+ for(eventIndex=0; eventIndex < sm.numEvents(); eventIndex++) {
+ out << "l";
+ }
+ out << "|} \\hline" << endl;
+
+ for(eventIndex=0; eventIndex < sm.numEvents(); eventIndex++) {
+ out << " & \\rotatebox{90}{";
+ out << sm.getEvent(eventIndex).getShorthand();
+ out << "}";
+ }
+ out << "\\\\ \\hline \\hline" << endl;
+
+ for(stateIndex=0; stateIndex < sm.numStates(); stateIndex++) {
+ out << sm.getState(stateIndex).getShorthand();
+ for(eventIndex=0; eventIndex < sm.numEvents(); eventIndex++) {
+ out << " & ";
+ trans_ptr = sm.getTransPtr(stateIndex, eventIndex);
+ if (trans_ptr == NULL) {
+ } else {
+ actions = trans_ptr->getActionShorthands();
+ // FIXME: should compare index, not the string
+ if (trans_ptr->getNextStateShorthand() !=
+ sm.getState(stateIndex).getShorthand() ) {
+ nextState = trans_ptr->getNextStateShorthand();
+ } else {
+ nextState = "";
+ }
+
+ out << actions;
+ if ((nextState.length() != 0) && (actions.length() != 0)) {
+ out << "/";
+ }
+ out << nextState;
+ }
+ }
+ out << "\\\\" << endl;
+ }
+ out << "\\hline" << endl;
+ out << "\\end{tabular}" << endl;
+ // out << "}" << endl;
+ out << "\\end{document}" << endl;
+}
+
diff --git a/src/mem/slicc/main.hh b/src/mem/slicc/main.hh
new file mode 100644
index 000000000..e9fdee034
--- /dev/null
+++ b/src/mem/slicc/main.hh
@@ -0,0 +1,48 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * main.h
+ *
+ * Description:
+ *
+ * $Id: main.h,v 3.2 2003/03/17 01:50:01 xu Exp $
+ *
+ * */
+
+#ifndef MAIN_H
+#define MAIN_H
+
+#include "slicc_global.hh"
+#include "DeclListAST.hh"
+#include "Map.hh"
+
+extern DeclListAST* g_decl_list_ptr;
+
+#endif //MAIN_H
diff --git a/src/mem/slicc/parser/lexer.ll b/src/mem/slicc/parser/lexer.ll
new file mode 100644
index 000000000..7903b0ee7
--- /dev/null
+++ b/src/mem/slicc/parser/lexer.ll
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+%{
+
+#include <assert.h>
+#include "ASTs.hh"
+#include "parser.h"
+#include <string>
+
+extern "C" int yylex();
+extern "C" void yyerror();
+extern "C" int yywrap()
+{
+ return 1;
+}
+
+%}
+%x CMNT
+%x IMBEDED
+%%
+
+[\t ]+ /* Ignore whitespace */
+[\n] { g_line_number++; }
+"//".*[\n] { g_line_number++; } /* C++ style comments */
+
+"/*" BEGIN CMNT;
+<CMNT>. ;
+<CMNT>\n { g_line_number++; }
+<CMNT>"*/" { BEGIN INITIAL; }
+
+true { yylval.str_ptr = new string(yytext); return LIT_BOOL; }
+false { yylval.str_ptr = new string(yytext); return LIT_BOOL; }
+global { return GLOBAL_DECL; }
+machine { return MACHINE_DECL; }
+in_port { return IN_PORT_DECL; }
+out_port { return OUT_PORT_DECL; }
+action { return ACTION_DECL; }
+transition { return TRANSITION_DECL; }
+structure { return STRUCT_DECL; }
+external_type { return EXTERN_TYPE_DECL; }
+enumeration { return ENUM_DECL; }
+peek { return PEEK; }
+enqueue { return ENQUEUE; }
+copy_head { return COPY_HEAD; }
+check_allocate { return CHECK_ALLOCATE; }
+check_stop_slots { return CHECK_STOP_SLOTS; }
+if { return IF; }
+else { return ELSE; }
+return { return RETURN; }
+THIS { return THIS; }
+CHIP { return CHIP; }
+void { yylval.str_ptr = new string(yytext); return VOID; }
+
+== { yylval.str_ptr = new string(yytext); return EQ; }
+!= { yylval.str_ptr = new string(yytext); return NE; }
+[<] { yylval.str_ptr = new string(yytext); return '<'; }
+[>] { yylval.str_ptr = new string(yytext); return '>'; }
+[<][<] { yylval.str_ptr = new string(yytext); return LEFTSHIFT; }
+[>][>] { yylval.str_ptr = new string(yytext); return RIGHTSHIFT; }
+[<][=] { yylval.str_ptr = new string(yytext); return LE; }
+[>][=] { yylval.str_ptr = new string(yytext); return GE; }
+[!] { yylval.str_ptr = new string(yytext); return NOT; }
+[&][&] { yylval.str_ptr = new string(yytext); return AND; }
+[|][|] { yylval.str_ptr = new string(yytext); return OR; }
+[+] { yylval.str_ptr = new string(yytext); return PLUS; }
+[-] { yylval.str_ptr = new string(yytext); return DASH; }
+[*] { yylval.str_ptr = new string(yytext); return STAR; }
+[/] { yylval.str_ptr = new string(yytext); return SLASH; }
+:: { return DOUBLE_COLON; }
+[:] { return ':'; }
+[;] { return SEMICOLON; }
+[[] { return '['; }
+[]] { return ']'; }
+[{] { return '{'; }
+[}] { return '}'; }
+[(] { return '('; }
+[)] { return ')'; }
+[,] { return ','; }
+[=] { return '='; }
+:= { return ASSIGN; }
+[.] { return DOT; }
+
+[0-9]*[.][0-9]* { yylval.str_ptr = new string(yytext); return FLOATNUMBER; }
+[0-9]* { yylval.str_ptr = new string(yytext); return NUMBER; }
+[a-zA-Z_][a-zA-Z_0-9]{0,50} { yylval.str_ptr = new string(yytext); return IDENT; }
+\"[^"\n]*\" { yytext[strlen(yytext)-1] = '\0'; yylval.str_ptr = new string(yytext+1); return STRING; }
+\'[^'\n]*\' { yytext[strlen(yytext)-1] = '\0'; yylval.str_ptr = new string(yytext+1); return STRING; }
+
+. { return OTHER; } /* Need so that we handle all characters */
+
+%%
+
diff --git a/src/mem/slicc/parser/parser.yy b/src/mem/slicc/parser/parser.yy
new file mode 100644
index 000000000..f4d9a1a62
--- /dev/null
+++ b/src/mem/slicc/parser/parser.yy
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+%{
+#include <string>
+#include <stdio.h>
+#include <assert.h>
+#include "ASTs.hh"
+
+#define YYMAXDEPTH 100000
+#define YYERROR_VERBOSE
+
+extern char* yytext;
+
+extern "C" void yyerror(char*);
+extern "C" int yylex();
+
+%}
+
+%union {
+ string* str_ptr;
+ Vector<string>* string_vector_ptr;
+
+ // Decls
+ DeclAST* decl_ptr;
+ DeclListAST* decl_list_ptr;
+ Vector<DeclAST*>* decl_vector_ptr;
+
+ // TypeField
+ TypeFieldAST* type_field_ptr;
+ Vector<TypeFieldAST*>* type_field_vector_ptr;
+
+ // Type
+ TypeAST* type_ptr;
+ Vector<TypeAST*>* type_vector_ptr;
+
+ // Formal Params
+ FormalParamAST* formal_param_ptr;
+ Vector<FormalParamAST*>* formal_param_vector_ptr;
+
+ // Statements
+ StatementAST* statement_ptr;
+ StatementListAST* statement_list_ptr;
+ Vector<StatementAST*>* statement_vector_ptr;
+
+ // Pairs
+ PairAST* pair_ptr;
+ PairListAST* pair_list_ptr;
+
+ // Expressions
+ VarExprAST* var_expr_ptr;
+ ExprAST* expr_ptr;
+ Vector<ExprAST*>* expr_vector_ptr;
+}
+
+%type <type_ptr> type void type_or_void
+%type <type_vector_ptr> types type_list
+
+ // Formal Params
+%type <formal_param_ptr> formal_param
+%type <formal_param_vector_ptr> formal_params formal_param_list
+
+%type <str_ptr> ident field
+%type <string_vector_ptr> ident_list idents
+
+%type <statement_ptr> statement if_statement
+%type <statement_list_ptr> statement_list
+%type <statement_vector_ptr> statements
+
+%type <decl_ptr> decl
+%type <decl_list_ptr> decl_list
+%type <decl_vector_ptr> decls
+
+%type <type_field_vector_ptr> type_members type_enums type_methods
+%type <type_field_ptr> type_member type_enum type_method
+
+%type <var_expr_ptr> var
+%type <expr_ptr> expr literal enumeration
+%type <expr_vector_ptr> expr_list
+
+%type <pair_ptr> pair
+%type <pair_list_ptr> pair_list pairs
+
+%token <str_ptr> IDENT STRING NUMBER FLOATNUMBER LIT_BOOL VOID
+%token <str_ptr> IMBED IMBED_TYPE
+%token CHIP THIS
+%token ASSIGN DOUBLE_COLON DOT SEMICOLON COLON
+%token GLOBAL_DECL MACHINE_DECL IN_PORT_DECL OUT_PORT_DECL
+%token PEEK ENQUEUE COPY_HEAD CHECK_ALLOCATE CHECK_STOP_SLOTS
+//%token DEQUEUE REMOVE_EARLY SKIP_EARLY PEEK_EARLY
+%token DEBUG_EXPR_TOKEN DEBUG_MSG_TOKEN
+%token ACTION_DECL TRANSITION_DECL TYPE_DECL STRUCT_DECL EXTERN_TYPE_DECL ENUM_DECL
+%token TYPE_FIELD OTHER IF ELSE RETURN
+
+%token <str_ptr> EQ NE '<' '>' LE GE NOT AND OR PLUS DASH STAR SLASH RIGHTSHIFT LEFTSHIFT
+
+%left OR
+%left AND
+%nonassoc EQ NE
+%nonassoc '<' '>' GE LE
+%left PLUS DASH
+%left STAR SLASH
+%nonassoc NOT
+%nonassoc DOUBLE_COLON DOT '['
+
+%%
+
+file: decl_list { g_decl_list_ptr = $1; }
+
+decl_list: decls { $$ = new DeclListAST($1); }
+
+decls: decl decls { $2->insertAtTop($1); $$ = $2; }
+ | { $$ = new Vector<DeclAST*>; }
+ ;
+
+decl: MACHINE_DECL '(' ident pair_list ')' '{' decl_list '}' { $$ = new MachineAST($3, $4, $7); }
+ | ACTION_DECL '(' ident pair_list ')' statement_list { $$ = new ActionDeclAST($3, $4, $6); }
+ | IN_PORT_DECL '(' ident ',' type ',' var pair_list ')' statement_list { $$ = new InPortDeclAST($3, $5, $7, $8, $10); }
+ | OUT_PORT_DECL '(' ident ',' type ',' var pair_list ')' SEMICOLON { $$ = new OutPortDeclAST($3, $5, $7, $8); }
+ | TRANSITION_DECL '(' ident_list ',' ident_list ',' ident pair_list ')' ident_list { $$ = new TransitionDeclAST($3, $5, $7, $8, $10); }
+ | TRANSITION_DECL '(' ident_list ',' ident_list pair_list ')' ident_list { $$ = new TransitionDeclAST($3, $5, NULL, $6, $8); }
+ | EXTERN_TYPE_DECL '(' type pair_list ')' SEMICOLON { $4->addPair(new PairAST("external", "yes")); $$ = new TypeDeclAST($3, $4, NULL); }
+ | EXTERN_TYPE_DECL '(' type pair_list ')' '{' type_methods '}' { $4->addPair(new PairAST("external", "yes")); $$ = new TypeDeclAST($3, $4, $7); }
+ | GLOBAL_DECL '(' type pair_list ')' '{' type_members '}' { $4->addPair(new PairAST("global", "yes"));$$ = new TypeDeclAST($3, $4, $7); }
+ | STRUCT_DECL '(' type pair_list ')' '{' type_members '}' { $$ = new TypeDeclAST($3, $4, $7); }
+ | ENUM_DECL '(' type pair_list ')' '{' type_enums '}' { $4->addPair(new PairAST("enumeration", "yes")); $$ = new EnumDeclAST($3, $4, $7); }
+ | type ident pair_list SEMICOLON { $$ = new ObjDeclAST($1, $2, $3); }
+ | type ident '(' formal_param_list ')' pair_list SEMICOLON { $$ = new FuncDeclAST($1, $2, $4, $6, NULL); } // non-void function
+ | void ident '(' formal_param_list ')' pair_list SEMICOLON { $$ = new FuncDeclAST($1, $2, $4, $6, NULL); } // void function
+ | type ident '(' formal_param_list ')' pair_list statement_list { $$ = new FuncDeclAST($1, $2, $4, $6, $7); } // non-void function
+ | void ident '(' formal_param_list ')' pair_list statement_list { $$ = new FuncDeclAST($1, $2, $4, $6, $7); } // void function
+ ;
+
+// Type fields
+
+type_members: type_member type_members { $2->insertAtTop($1); $$ = $2; }
+ | { $$ = new Vector<TypeFieldAST*>; }
+ ;
+
+type_member: type ident pair_list SEMICOLON { $$ = new TypeFieldMemberAST($1, $2, $3, NULL); }
+ | type ident ASSIGN expr SEMICOLON { $$ = new TypeFieldMemberAST($1, $2, new PairListAST(), $4); }
+ ;
+
+// Methods
+type_methods: type_method type_methods { $2->insertAtTop($1); $$ = $2; }
+ | { $$ = new Vector<TypeFieldAST*>; }
+ ;
+
+type_method: type_or_void ident '(' type_list ')' pair_list SEMICOLON { $$ = new TypeFieldMethodAST($1, $2, $4, $6); }
+ ;
+
+// Enum fields
+type_enums: type_enum type_enums { $2->insertAtTop($1); $$ = $2; }
+ | { $$ = new Vector<TypeFieldAST*>; }
+ ;
+
+type_enum: ident pair_list SEMICOLON { $$ = new TypeFieldEnumAST($1, $2); }
+ ;
+
+// Type
+type_list : types { $$ = $1; }
+ | { $$ = new Vector<TypeAST*>; }
+ ;
+
+types : type ',' types { $3->insertAtTop($1); $$ = $3; }
+ | type { $$ = new Vector<TypeAST*>; $$->insertAtTop($1); }
+ ;
+
+type: ident { $$ = new TypeAST($1); }
+ ;
+
+void: VOID { $$ = new TypeAST($1); }
+ ;
+
+type_or_void: type { $$ = $1; }
+ | void { $$ = $1; }
+ ;
+
+// Formal Param
+formal_param_list : formal_params { $$ = $1; }
+ | { $$ = new Vector<FormalParamAST*>; }
+ ;
+
+formal_params : formal_param ',' formal_params { $3->insertAtTop($1); $$ = $3; }
+ | formal_param { $$ = new Vector<FormalParamAST*>; $$->insertAtTop($1); }
+ ;
+
+formal_param : type ident { $$ = new FormalParamAST($1, $2); }
+ ;
+
+// Idents and lists
+ident: IDENT { $$ = $1; } ;
+
+ident_list: '{' idents '}' { $$ = $2; }
+ | ident { $$ = new Vector<string>; $$->insertAtTop(*($1)); delete $1; }
+ ;
+
+idents: ident SEMICOLON idents { $3->insertAtTop(*($1)); $$ = $3; delete $1; }
+ | ident ',' idents { $3->insertAtTop(*($1)); $$ = $3; delete $1; }
+ | ident idents { $2->insertAtTop(*($1)); $$ = $2; delete $1; }
+ | { $$ = new Vector<string>; }
+ ;
+
+// Pair and pair lists
+pair_list: ',' pairs { $$ = $2; }
+ | { $$ = new PairListAST(); }
+
+pairs : pair ',' pairs { $3->addPair($1); $$ = $3; }
+ | pair { $$ = new PairListAST(); $$->addPair($1); }
+ ;
+
+pair : ident '=' STRING { $$ = new PairAST($1, $3); }
+ | ident '=' ident { $$ = new PairAST($1, $3); }
+ | STRING { $$ = new PairAST(new string("short"), $1); }
+ ;
+
+// Below are the rules for action descriptions
+
+statement_list: '{' statements '}' { $$ = new StatementListAST($2); }
+ ;
+
+statements: statement statements { $2->insertAtTop($1); $$ = $2; }
+ | { $$ = new Vector<StatementAST*>; }
+ ;
+
+expr_list: expr ',' expr_list { $3->insertAtTop($1); $$ = $3; }
+ | expr { $$ = new Vector<ExprAST*>; $$->insertAtTop($1); }
+ | { $$ = new Vector<ExprAST*>; }
+ ;
+
+statement: expr SEMICOLON { $$ = new ExprStatementAST($1); }
+ | expr ASSIGN expr SEMICOLON { $$ = new AssignStatementAST($1, $3); }
+ | ENQUEUE '(' var ',' type pair_list ')' statement_list { $$ = new EnqueueStatementAST($3, $5, $6, $8); }
+ | PEEK '(' var ',' type ')' statement_list { $$ = new PeekStatementAST($3, $5, $7, "peek"); }
+// | PEEK_EARLY '(' var ',' type ')' statement_list { $$ = new PeekStatementAST($3, $5, $7, "peekEarly"); }
+ | COPY_HEAD '(' var ',' var pair_list ')' SEMICOLON { $$ = new CopyHeadStatementAST($3, $5, $6); }
+ | CHECK_ALLOCATE '(' var ')' SEMICOLON { $$ = new CheckAllocateStatementAST($3); }
+ | CHECK_STOP_SLOTS '(' var ',' STRING ',' STRING ')' SEMICOLON { $$ = new CheckStopSlotsStatementAST($3, $5, $7); }
+ | if_statement { $$ = $1; }
+ | RETURN expr SEMICOLON { $$ = new ReturnStatementAST($2); }
+ ;
+
+if_statement: IF '(' expr ')' statement_list ELSE statement_list { $$ = new IfStatementAST($3, $5, $7); }
+ | IF '(' expr ')' statement_list { $$ = new IfStatementAST($3, $5, NULL); }
+ | IF '(' expr ')' statement_list ELSE if_statement { $$ = new IfStatementAST($3, $5, new StatementListAST($7)); }
+ ;
+
+expr: var { $$ = $1; }
+ | literal { $$ = $1; }
+ | enumeration { $$ = $1; }
+ | ident '(' expr_list ')' { $$ = new FuncCallExprAST($1, $3); }
+
+
+// globally access a local chip component and call a method
+ | THIS DOT var '[' expr ']' DOT var DOT ident '(' expr_list ')' { $$ = new ChipComponentAccessAST($3, $5, $8, $10, $12 ); }
+// globally access a local chip component and access a data member
+ | THIS DOT var '[' expr ']' DOT var DOT field { $$ = new ChipComponentAccessAST($3, $5, $8, $10 ); }
+// globally access a specified chip component and call a method
+ | CHIP '[' expr ']' DOT var '[' expr ']' DOT var DOT ident '(' expr_list ')' { $$ = new ChipComponentAccessAST($3, $6, $8, $11, $13, $15 ); }
+// globally access a specified chip component and access a data member
+ | CHIP '[' expr ']' DOT var '[' expr ']' DOT var DOT field { $$ = new ChipComponentAccessAST($3, $6, $8, $11, $13 ); }
+
+
+ | expr DOT field { $$ = new MemberExprAST($1, $3); }
+ | expr DOT ident '(' expr_list ')' { $$ = new MethodCallExprAST($1, $3, $5); }
+ | type DOUBLE_COLON ident '(' expr_list ')' { $$ = new MethodCallExprAST($1, $3, $5); }
+ | expr '[' expr_list ']' { $$ = new MethodCallExprAST($1, new string("lookup"), $3); }
+ | expr STAR expr { $$ = new InfixOperatorExprAST($1, $2, $3); }
+ | expr SLASH expr { $$ = new InfixOperatorExprAST($1, $2, $3); }
+ | expr PLUS expr { $$ = new InfixOperatorExprAST($1, $2, $3); }
+ | expr DASH expr { $$ = new InfixOperatorExprAST($1, $2, $3); }
+ | expr '<' expr { $$ = new InfixOperatorExprAST($1, $2, $3); }
+ | expr '>' expr { $$ = new InfixOperatorExprAST($1, $2, $3); }
+ | expr LE expr { $$ = new InfixOperatorExprAST($1, $2, $3); }
+ | expr GE expr { $$ = new InfixOperatorExprAST($1, $2, $3); }
+ | expr EQ expr { $$ = new InfixOperatorExprAST($1, $2, $3); }
+ | expr NE expr { $$ = new InfixOperatorExprAST($1, $2, $3); }
+ | expr AND expr { $$ = new InfixOperatorExprAST($1, $2, $3); }
+ | expr OR expr { $$ = new InfixOperatorExprAST($1, $2, $3); }
+ | expr RIGHTSHIFT expr { $$ = new InfixOperatorExprAST($1, $2, $3); }
+ | expr LEFTSHIFT expr { $$ = new InfixOperatorExprAST($1, $2, $3); }
+// | NOT expr { $$ = NULL; } // FIXME - unary not
+// | DASH expr %prec NOT { $$ = NULL; } // FIXME - unary minus
+ | '(' expr ')' { $$ = $2; }
+ ;
+
+literal: STRING { $$ = new LiteralExprAST($1, "string"); }
+ | NUMBER { $$ = new LiteralExprAST($1, "int"); }
+ | FLOATNUMBER { $$ = new LiteralExprAST($1, "int"); }
+ | LIT_BOOL { $$ = new LiteralExprAST($1, "bool"); }
+ ;
+
+enumeration: ident ':' ident { $$ = new EnumExprAST(new TypeAST($1), $3); }
+ ;
+
+var: ident { $$ = new VarExprAST($1); }
+ ;
+
+field: ident { $$ = $1; }
+ ;
+
+%%
+
+extern FILE *yyin;
+
+DeclListAST* parse(string filename)
+{
+ FILE *file;
+ file = fopen(filename.c_str(), "r");
+ if (!file) {
+ cerr << "Error: Could not open file: " << filename << endl;
+ exit(1);
+ }
+ g_line_number = 1;
+ g_file_name = filename;
+ yyin = file;
+ g_decl_list_ptr = NULL;
+ yyparse();
+ return g_decl_list_ptr;
+}
+
+extern "C" void yyerror(char* s)
+{
+ fprintf(stderr, "%s:%d: %s at %s\n", g_file_name.c_str(), g_line_number, s, yytext);
+ exit(1);
+}
+
diff --git a/src/mem/slicc/slicc_global.hh b/src/mem/slicc/slicc_global.hh
new file mode 100644
index 000000000..72af5be58
--- /dev/null
+++ b/src/mem/slicc/slicc_global.hh
@@ -0,0 +1,127 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SLICC_GLOBAL_H
+#define SLICC_GLOBAL_H
+
+#include <assert.h> /* slicc needs to include this in order to use classes in
+ * ../common directory.
+ */
+
+#include "std-includes.hh"
+#include "Map.hh"
+
+typedef unsigned char uint8;
+typedef unsigned int uint32;
+typedef unsigned long long uint64;
+
+typedef signed char int8;
+typedef int int32;
+typedef long long int64;
+
+typedef long long integer_t;
+typedef unsigned long long uinteger_t;
+
+const bool ASSERT_FLAG = true;
+
+// when CHECK_RESOURCE_DEADLOCK is enabled, slicc will generate additional code
+// that works in conjuction with the resources rank value specified in the protocol
+// to detect invalid resource stalls as soon as they occur.
+const bool CHECK_INVALID_RESOURCE_STALLS = false;
+
+#undef assert
+#define assert(EXPR) ASSERT(EXPR)
+
+#define ASSERT(EXPR)\
+{\
+ if (ASSERT_FLAG) {\
+ if (!(EXPR)) {\
+ cerr << "failed assertion '"\
+ << #EXPR << "' at fn "\
+ << __PRETTY_FUNCTION__ << " in "\
+ << __FILE__ << ":"\
+ << __LINE__ << endl;\
+ if(isatty(STDIN_FILENO)) {\
+ cerr << "At this point you might want to attach a debug to ";\
+ cerr << "the running and get to the" << endl;\
+ cerr << "crash site; otherwise press enter to continue" << endl;\
+ cerr << "PID: " << getpid();\
+ cerr << endl << flush; \
+ char c; \
+ cin.get(c); \
+ }\
+ abort();\
+ }\
+ }\
+}
+
+class State;
+class Event;
+class Symbol;
+class Var;
+
+namespace __gnu_cxx {
+ template <> struct hash<State*>
+ {
+ size_t operator()(State* s) const { return (size_t) s; }
+ };
+ template <> struct hash<Event*>
+ {
+ size_t operator()(Event* s) const { return (size_t) s; }
+ };
+ template <> struct hash<Symbol*>
+ {
+ size_t operator()(Symbol* s) const { return (size_t) s; }
+ };
+ template <> struct hash<Var*>
+ {
+ size_t operator()(Var* s) const { return (size_t) s; }
+ };
+} // namespace __gnu_cxx
+
+namespace std {
+ template <> struct equal_to<Event*>
+ {
+ bool operator()(Event* s1, Event* s2) const { return s1 == s2; }
+ };
+ template <> struct equal_to<State*>
+ {
+ bool operator()(State* s1, State* s2) const { return s1 == s2; }
+ };
+ template <> struct equal_to<Symbol*>
+ {
+ bool operator()(Symbol* s1, Symbol* s2) const { return s1 == s2; }
+ };
+ template <> struct equal_to<Var*>
+ {
+ bool operator()(Var* s1, Var* s2) const { return s1 == s2; }
+ };
+} // namespace std
+
+#endif //SLICC_GLOBAL_H
diff --git a/src/mem/slicc/symbols/Action.hh b/src/mem/slicc/symbols/Action.hh
new file mode 100644
index 000000000..0f6e49290
--- /dev/null
+++ b/src/mem/slicc/symbols/Action.hh
@@ -0,0 +1,52 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#ifndef ACTION_H
+#define ACTION_H
+
+#include "Symbol.hh"
+
+class Action : public Symbol {
+public:
+ Action(string id,
+ const Map<Var*, string>& resources,
+ const Location& location,
+ const Map<string, string>& pairs) : Symbol(id, location, pairs) { m_resources = resources; }
+ const Map<Var*, string>& getResources() const { return m_resources; }
+ void print(ostream& out) const { out << "[Action: " << getIdent() << "]"; }
+
+private:
+ Map<Var*, string> m_resources;
+};
+
+#endif //ACTION_H
diff --git a/src/mem/slicc/symbols/Event.hh b/src/mem/slicc/symbols/Event.hh
new file mode 100644
index 000000000..f272e8eb1
--- /dev/null
+++ b/src/mem/slicc/symbols/Event.hh
@@ -0,0 +1,45 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#ifndef EVENT_H
+#define EVENT_H
+
+#include "Symbol.hh"
+
+class Event : public Symbol {
+public:
+ Event(string id, const Location& location, const Map<string, string>& pairs) : Symbol(id, location, pairs) {}
+ void print(ostream& out) const { out << "[Event: " << getIdent() << "]"; }
+};
+
+#endif //EVENT_H
diff --git a/src/mem/slicc/symbols/Func.cc b/src/mem/slicc/symbols/Func.cc
new file mode 100644
index 000000000..1af1e299c
--- /dev/null
+++ b/src/mem/slicc/symbols/Func.cc
@@ -0,0 +1,144 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Func.C
+ *
+ * Description: See Func.h
+ *
+ * $Id$
+ *
+ */
+
+#include "Func.hh"
+#include "SymbolTable.hh"
+#include "fileio.hh"
+#include "StateMachine.hh"
+
+Func::Func(string id, const Location& location,
+ Type* type_ptr, const Vector<Type*>& param_type_vec,
+ const Vector<string>& param_string_vec, string body,
+ const Map<string, string>& pairs, StateMachine* machine_ptr)
+ : Symbol(id, location, pairs)
+{
+ m_type_ptr = type_ptr;
+ m_param_type_vec = param_type_vec;
+ m_param_string_vec = param_string_vec;
+ m_body = body;
+ m_isInternalMachineFunc = false;
+
+ if (machine_ptr == NULL) {
+ m_c_ident = id;
+ } else if (existPair("external") || existPair("primitive")) {
+ m_c_ident = id;
+ } else {
+ m_machineStr = machine_ptr->toString();
+ m_c_ident = m_machineStr + "_" + id; // Append with machine name
+ m_isInternalMachineFunc = true;
+ }
+}
+
+void Func::funcPrototype(string& code) const
+{
+ if (isExternal()) {
+ // Do nothing
+ } else {
+ string return_type = m_type_ptr->cIdent();
+ Type* void_type_ptr = g_sym_table.getType("void");
+ if (existPair("return_by_ref") && (m_type_ptr != void_type_ptr)) {
+ return_type += "&";
+ }
+ code += return_type + " " + cIdent() + "(";
+ int size = m_param_string_vec.size();
+ for(int i=0; i<size; i++) {
+ // Generate code
+ if (i != 0) {
+ code += ", ";
+ }
+ code += m_param_string_vec[i];
+ }
+ code += ");\n";
+ }
+}
+
+// This write a function of object Chip
+void Func::writeCFiles(string path) const
+{
+ if (isExternal()) {
+ // Do nothing
+ } else {
+ ostringstream out;
+
+ // Header
+ out << "/** Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< " */" << endl;
+ out << endl;
+ out << "#include \"Types.hh\"" << endl;
+ out << "#include \"Chip.hh\"" << endl;
+ if (m_isInternalMachineFunc) {
+ out << "#include \"" << m_machineStr << "_Controller.hh\"" << endl;
+ }
+ out << endl;
+
+ // Generate function header
+ string code;
+ Type* void_type_ptr = g_sym_table.getType("void");
+ string return_type = m_type_ptr->cIdent();
+ code += return_type;
+ if (existPair("return_by_ref") && m_type_ptr != void_type_ptr) {
+ code += "&";
+ }
+ if (!m_isInternalMachineFunc) {
+ code += " Chip::" + cIdent() + "(";
+ } else {
+ code += " " + m_machineStr + "_Controller::" + cIdent() + "(";
+ }
+ int size = m_param_type_vec.size();
+ for(int i=0; i<size; i++) {
+ // Generate code
+ if (i != 0) {
+ code += ", ";
+ }
+ code += m_param_string_vec[i];
+ }
+ code += ")";
+
+ // Function body
+ code += "\n{\n";
+ code += m_body;
+ code += "}\n";
+ out << code << endl;
+
+ // Write it out
+ conditionally_write_file(path + cIdent() + ".cc", out);
+ }
+}
+
+void Func::print(ostream& out) const
+{
+}
diff --git a/src/mem/slicc/symbols/Func.hh b/src/mem/slicc/symbols/Func.hh
new file mode 100644
index 000000000..763827701
--- /dev/null
+++ b/src/mem/slicc/symbols/Func.hh
@@ -0,0 +1,96 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Func.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef FUNC_H
+#define FUNC_H
+
+#include "slicc_global.hh"
+#include "Type.hh"
+class StateMachine;
+
+class Func : public Symbol {
+public:
+ // Constructors
+ Func(string id, const Location& location,
+ Type* type_ptr, const Vector<Type*>& param_type_vec, const Vector<string>& param_string_vec,
+ string body, const Map<string, string>& pairs, StateMachine* machine_ptr);
+
+ // Destructor
+ ~Func() {}
+
+ // Public Methods
+ string cIdent() const { return m_c_ident; }
+ const Vector<Type*>& getParamTypes() const { return m_param_type_vec; }
+ Type* getReturnType() const { return m_type_ptr; }
+ void writeCFiles(string path) const;
+ void funcPrototype(string& code) const;
+ bool isExternal() const { return existPair("external"); }
+ bool isInternalMachineFunc() const { return m_isInternalMachineFunc; }
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ Func(const Func& obj);
+ Func& operator=(const Func& obj);
+
+ // Data Members (m_ prefix)
+ Type* m_type_ptr;
+ Vector<Type*> m_param_type_vec;
+ Vector<string> m_param_string_vec;
+ string m_body;
+ string m_c_ident;
+ string m_machineStr;
+ bool m_isInternalMachineFunc;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Func& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Func& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //FUNC_H
diff --git a/src/mem/slicc/symbols/State.hh b/src/mem/slicc/symbols/State.hh
new file mode 100644
index 000000000..cac743e57
--- /dev/null
+++ b/src/mem/slicc/symbols/State.hh
@@ -0,0 +1,45 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#ifndef STATE_H
+#define STATE_H
+
+#include "Symbol.hh"
+
+class State : public Symbol {
+public:
+ State(string id, const Location& location, const Map<string, string>& pairs) : Symbol(id, location, pairs) {}
+ void print(ostream& out) const { out << "[State: " << getIdent() << "]"; }
+};
+
+#endif //STATE_H
diff --git a/src/mem/slicc/symbols/StateMachine.cc b/src/mem/slicc/symbols/StateMachine.cc
new file mode 100644
index 000000000..d4436870e
--- /dev/null
+++ b/src/mem/slicc/symbols/StateMachine.cc
@@ -0,0 +1,993 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * */
+
+#include "StateMachine.hh"
+#include "fileio.hh"
+#include "html_gen.hh"
+#include "Action.hh"
+#include "Event.hh"
+#include "State.hh"
+#include "Transition.hh"
+#include "Var.hh"
+#include "SymbolTable.hh"
+#include "util.hh"
+#include "Vector.hh"
+
+StateMachine::StateMachine(string ident, const Location& location, const Map<string, string>& pairs)
+ : Symbol(ident, location, pairs)
+{
+ m_table_built = false;
+}
+
+StateMachine::~StateMachine()
+{
+ // FIXME
+ // assert(0);
+}
+
+void StateMachine::addState(State* state_ptr)
+{
+ assert(m_table_built == false);
+ m_state_map.add(state_ptr, m_states.size());
+ m_states.insertAtBottom(state_ptr);
+}
+
+void StateMachine::addEvent(Event* event_ptr)
+{
+ assert(m_table_built == false);
+ m_event_map.add(event_ptr, m_events.size());
+ m_events.insertAtBottom(event_ptr);
+}
+
+void StateMachine::addAction(Action* action_ptr)
+{
+ assert(m_table_built == false);
+
+ // Check for duplicate action
+ int size = m_actions.size();
+ for(int i=0; i<size; i++) {
+ if (m_actions[i]->getIdent() == action_ptr->getIdent()) {
+ m_actions[i]->warning("Duplicate action definition: " + m_actions[i]->getIdent());
+ action_ptr->error("Duplicate action definition: " + action_ptr->getIdent());
+ }
+ if (m_actions[i]->getShorthand() == action_ptr->getShorthand()) {
+ m_actions[i]->warning("Duplicate action shorthand: " + m_actions[i]->getIdent());
+ m_actions[i]->warning(" shorthand = " + m_actions[i]->getShorthand());
+ action_ptr->warning("Duplicate action shorthand: " + action_ptr->getIdent());
+ action_ptr->error(" shorthand = " + action_ptr->getShorthand());
+ }
+ }
+
+ m_actions.insertAtBottom(action_ptr);
+}
+
+void StateMachine::addTransition(Transition* trans_ptr)
+{
+ assert(m_table_built == false);
+ trans_ptr->checkIdents(m_states, m_events, m_actions);
+ m_transitions.insertAtBottom(trans_ptr);
+}
+
+void StateMachine::addFunc(Func* func_ptr)
+{
+ // register func in the symbol table
+ g_sym_table.registerSym(func_ptr->toString(), func_ptr);
+ m_internal_func_vec.insertAtBottom(func_ptr);
+}
+
+void StateMachine::buildTable()
+{
+ assert(m_table_built == false);
+ int numStates = m_states.size();
+ int numEvents = m_events.size();
+ int numTransitions = m_transitions.size();
+ int stateIndex, eventIndex;
+
+ for(stateIndex=0; stateIndex < numStates; stateIndex++) {
+ m_table.insertAtBottom(Vector<Transition*>());
+ for(eventIndex=0; eventIndex < numEvents; eventIndex++) {
+ m_table[stateIndex].insertAtBottom(NULL);
+ }
+ }
+
+ for(int i=0; i<numTransitions; i++) {
+ Transition* trans_ptr = m_transitions[i];
+
+ // Track which actions we touch so we know if we use them all --
+ // really this should be done for all symbols as part of the
+ // symbol table, then only trigger it for Actions, States, Events,
+ // etc.
+
+ Vector<Action*> actions = trans_ptr->getActions();
+ for(int actionIndex=0; actionIndex < actions.size(); actionIndex++) {
+ actions[actionIndex]->markUsed();
+ }
+
+ stateIndex = getStateIndex(trans_ptr->getStatePtr());
+ eventIndex = getEventIndex(trans_ptr->getEventPtr());
+ if (m_table[stateIndex][eventIndex] != NULL) {
+ m_table[stateIndex][eventIndex]->warning("Duplicate transition: " + m_table[stateIndex][eventIndex]->toString());
+ trans_ptr->error("Duplicate transition: " + trans_ptr->toString());
+ }
+ m_table[stateIndex][eventIndex] = trans_ptr;
+ }
+
+ // Look at all actions to make sure we used them all
+ for(int actionIndex=0; actionIndex < m_actions.size(); actionIndex++) {
+ Action* action_ptr = m_actions[actionIndex];
+ if (!action_ptr->wasUsed()) {
+ string error_msg = "Unused action: " + action_ptr->getIdent();
+ if (action_ptr->existPair("desc")) {
+ error_msg += ", " + action_ptr->getDescription();
+ }
+ action_ptr->warning(error_msg);
+ }
+ }
+
+ m_table_built = true;
+}
+
+const Transition* StateMachine::getTransPtr(int stateIndex, int eventIndex) const
+{
+ return m_table[stateIndex][eventIndex];
+}
+
+// *********************** //
+// ******* C Files ******* //
+// *********************** //
+
+void StateMachine::writeCFiles(string path) const
+{
+ string comp = getIdent();
+ string filename;
+
+ // Output switch statement for transition table
+ {
+ ostringstream sstr;
+ printCSwitch(sstr, comp);
+ conditionally_write_file(path + comp + "_Transitions.cc", sstr);
+ }
+
+ // Output the actions for performing the actions
+ {
+ ostringstream sstr;
+ printControllerC(sstr, comp);
+ conditionally_write_file(path + comp + "_Controller.cc", sstr);
+ }
+
+ // Output the method declarations for the class declaration
+ {
+ ostringstream sstr;
+ printControllerH(sstr, comp);
+ conditionally_write_file(path + comp + "_Controller.hh", sstr);
+ }
+
+ // Output the wakeup loop for the events
+ {
+ ostringstream sstr;
+ printCWakeup(sstr, comp);
+ conditionally_write_file(path + comp + "_Wakeup.cc", sstr);
+ }
+
+ // Profiling
+ {
+ ostringstream sstr;
+ printProfilerC(sstr, comp);
+ conditionally_write_file(path + comp + "_Profiler.cc", sstr);
+ }
+ {
+ ostringstream sstr;
+ printProfilerH(sstr, comp);
+ conditionally_write_file(path + comp + "_Profiler.hh", sstr);
+ }
+
+ // Write internal func files
+ for(int i=0; i<m_internal_func_vec.size(); i++) {
+ m_internal_func_vec[i]->writeCFiles(path);
+ }
+
+}
+
+void StateMachine::printControllerH(ostream& out, string component) const
+{
+ out << "/** \\file " << getIdent() << ".hh" << endl;
+ out << " * " << endl;
+ out << " * Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< endl;
+ out << " * Created by slicc definition of Module \"" << getShorthand() << "\"" << endl;
+ out << " */" << endl;
+ out << endl;
+ out << "#ifndef " << component << "_CONTROLLER_H" << endl;
+ out << "#define " << component << "_CONTROLLER_H" << endl;
+ out << endl;
+ out << "#include \"Global.hh\"" << endl;
+ out << "#include \"Consumer.hh\"" << endl;
+ out << "#include \"TransitionResult.hh\"" << endl;
+ out << "#include \"Types.hh\"" << endl;
+ out << "#include \"" << component << "_Profiler.hh\"" << endl;
+ out << endl;
+
+ // for adding information to the protocol debug trace
+ out << "extern stringstream " << component << "_" << "transitionComment;" << endl;
+
+ out << "class " << component << "_Controller : public Consumer {" << endl;
+
+ /* the coherence checker needs to call isBlockExclusive() and isBlockShared()
+ making the Chip a friend class is an easy way to do this for now */
+ out << "#ifdef CHECK_COHERENCE" << endl;
+ out << " friend class Chip;" << endl;
+ out << "#endif /* CHECK_COHERENCE */" << endl;
+
+ out << "public:" << endl;
+ out << " " << component << "_Controller(Chip* chip_ptr, int version);" << endl;
+ out << " void print(ostream& out) const;" << endl;
+ out << " void wakeup();" << endl;
+ out << " static void dumpStats(ostream& out) { s_profiler.dumpStats(out); }" << endl;
+ out << " static void clearStats() { s_profiler.clearStats(); }" << endl;
+ out << "private:" << endl;
+ out << " TransitionResult doTransition(" << component << "_Event event, " << component
+ << "_State state, const Address& addr";
+ if(CHECK_INVALID_RESOURCE_STALLS) {
+ out << ", int priority";
+ }
+ out << "); // in " << component << "_Transitions.cc" << endl;
+ out << " TransitionResult doTransitionWorker(" << component << "_Event event, " << component
+ << "_State state, " << component << "_State& next_state, const Address& addr";
+ if(CHECK_INVALID_RESOURCE_STALLS) {
+ out << ", int priority";
+ }
+ out << "); // in " << component << "_Transitions.cc" << endl;
+ out << " Chip* m_chip_ptr;" << endl;
+ out << " NodeID m_id;" << endl;
+ out << " NodeID m_version;" << endl;
+ out << " MachineID m_machineID;" << endl;
+ out << " static " << component << "_Profiler s_profiler;" << endl;
+
+ // internal function protypes
+ out << " // Internal functions" << endl;
+ for(int i=0; i<m_internal_func_vec.size(); i++) {
+ Func* func = m_internal_func_vec[i];
+ string proto;
+ func->funcPrototype(proto);
+ if (proto != "") {
+ out << " " << proto;
+ }
+ }
+
+ out << " // Actions" << endl;
+ for(int i=0; i < numActions(); i++) {
+ const Action& action = getAction(i);
+ out << "/** \\brief " << action.getDescription() << "*/" << endl;
+ out << " void " << action.getIdent() << "(const Address& addr);" << endl;
+ }
+ out << "};" << endl;
+ out << "#endif // " << component << "_CONTROLLER_H" << endl;
+}
+
+void StateMachine::printControllerC(ostream& out, string component) const
+{
+ out << "/** \\file " << getIdent() << ".cc" << endl;
+ out << " * " << endl;
+ out << " * Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< endl;
+ out << " * Created by slicc definition of Module \"" << getShorthand() << "\"" << endl;
+ out << " */" << endl;
+ out << endl;
+ out << "#include \"Global.hh\"" << endl;
+ out << "#include \"RubySlicc_includes.hh\"" << endl;
+ out << "#include \"" << component << "_Controller.hh\"" << endl;
+ out << "#include \"" << component << "_State.hh\"" << endl;
+ out << "#include \"" << component << "_Event.hh\"" << endl;
+ out << "#include \"Types.hh\"" << endl;
+ out << "#include \"System.hh\"" << endl;
+ out << "#include \"Chip.hh\"" << endl;
+ out << endl;
+
+ // for adding information to the protocol debug trace
+ out << "stringstream " << component << "_" << "transitionComment;" << endl;
+ out << "#define APPEND_TRANSITION_COMMENT(str) (" << component << "_" << "transitionComment << str)" << endl;
+
+ out << "/** \\brief static profiler defn */" << endl;
+ out << component << "_Profiler " << component << "_Controller::s_profiler;" << endl;
+ out << endl;
+
+ out << "/** \\brief constructor */" << endl;
+ out << component << "_Controller::" << component
+ << "_Controller(Chip* chip_ptr, int version)" << endl;
+ out << "{" << endl;
+ out << " m_chip_ptr = chip_ptr;" << endl;
+ out << " m_id = m_chip_ptr->getID();" << endl;
+ out << " m_version = version;" << endl;
+ out << " m_machineID.type = MachineType_" << component << ";" << endl;
+ out << " m_machineID.num = m_id*RubyConfig::numberOf"<< component << "PerChip()+m_version;" << endl;
+
+ // Set the queue consumers
+ for(int i=0; i < m_in_ports.size(); i++) {
+ const Var* port = m_in_ports[i];
+ out << " " << port->getCode() << ".setConsumer(this);" << endl;
+ }
+
+ out << endl;
+ // Set the queue descriptions
+ for(int i=0; i < m_in_ports.size(); i++) {
+ const Var* port = m_in_ports[i];
+ out << " " << port->getCode()
+ << ".setDescription(\"[Chip \" + int_to_string(m_chip_ptr->getID()) + \" \" + int_to_string(m_version) + \", "
+ << component << ", " << port->toString() << "]\");" << endl;
+ }
+
+ // Initialize the transition profiling
+ out << endl;
+ for(int i=0; i<numTransitions(); i++) {
+ const Transition& t = getTransition(i);
+ const Vector<Action*>& action_vec = t.getActions();
+ int numActions = action_vec.size();
+
+ // Figure out if we stall
+ bool stall = false;
+ for (int i=0; i<numActions; i++) {
+ if(action_vec[i]->getIdent() == "z_stall") {
+ stall = true;
+ }
+ }
+
+ // Only possible if it is not a 'z' case
+ if (!stall) {
+ out << " s_profiler.possibleTransition(" << component << "_State_"
+ << t.getStatePtr()->getIdent() << ", " << component << "_Event_"
+ << t.getEventPtr()->getIdent() << ");" << endl;
+ }
+ }
+
+ out << "}" << endl;
+
+ out << endl;
+
+ out << "void " << component << "_Controller::print(ostream& out) const { out << \"[" << component
+ << "_Controller \" << m_chip_ptr->getID() << \" \" << m_version << \"]\"; }" << endl;
+
+ out << endl;
+ out << "// Actions" << endl;
+ out << endl;
+
+ for(int i=0; i < numActions(); i++) {
+ const Action& action = getAction(i);
+ if (action.existPair("c_code")) {
+ out << "/** \\brief " << action.getDescription() << "*/" << endl;
+ out << "void " << component << "_Controller::"
+ << action.getIdent() << "(const Address& addr)" << endl;
+ out << "{" << endl;
+ out << " DEBUG_MSG(GENERATED_COMP, HighPrio,\"executing\");" << endl;
+ out << action.lookupPair("c_code");
+ out << "}" << endl;
+ }
+ out << endl;
+ }
+}
+
+void StateMachine::printCWakeup(ostream& out, string component) const
+{
+ out << "// Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< endl;
+ out << "// " << getIdent() << ": " << getShorthand() << endl;
+ out << endl;
+ out << "#include \"Global.hh\"" << endl;
+ out << "#include \"RubySlicc_includes.hh\"" << endl;
+ out << "#include \"" << component << "_Controller.hh\"" << endl;
+ out << "#include \"" << component << "_State.hh\"" << endl;
+ out << "#include \"" << component << "_Event.hh\"" << endl;
+ out << "#include \"Types.hh\"" << endl;
+ out << "#include \"System.hh\"" << endl;
+ out << "#include \"Chip.hh\"" << endl;
+ out << endl;
+ out << "void " << component << "_Controller::wakeup()" << endl;
+ out << "{" << endl;
+ // out << " DEBUG_EXPR(GENERATED_COMP, MedPrio,*this);" << endl;
+ // out << " DEBUG_EXPR(GENERATED_COMP, MedPrio,g_eventQueue_ptr->getTime());" << endl;
+ out << endl;
+ out << "int counter = 0;" << endl;
+ out << " while (true) {" << endl;
+ out << " // Some cases will put us into an infinite loop without this limit" << endl;
+ out << " assert(counter <= RubyConfig::" << getIdent() << "TransitionsPerCycle());" << endl;
+ out << " if (counter == RubyConfig::" << getIdent() << "TransitionsPerCycle()) {" << endl;
+ out << " g_system_ptr->getProfiler()->controllerBusy(m_machineID); // Count how often we're fully utilized" << endl;
+ out << " g_eventQueue_ptr->scheduleEvent(this, 1); // Wakeup in another cycle and try again" << endl;
+ out << " break;" << endl;
+ out << " }" << endl;
+
+ // InPorts
+ for(int i=0; i < m_in_ports.size(); i++) {
+ const Var* port = m_in_ports[i];
+ assert(port->existPair("c_code_in_port"));
+ out << " // "
+ << component << "InPort " << port->toString()
+ << endl;
+ out << port->lookupPair("c_code_in_port");
+ out << endl;
+ }
+
+ out << " break; // If we got this far, we have nothing left todo" << endl;
+ out << " }" << endl;
+ // out << " g_eventQueue_ptr->scheduleEvent(this, 1);" << endl;
+ // out << " DEBUG_NEWLINE(GENERATED_COMP, MedPrio);" << endl;
+ out << "}" << endl;
+ out << endl;
+}
+
+void StateMachine::printCSwitch(ostream& out, string component) const
+{
+ out << "// Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< endl;
+ out << "// " << getIdent() << ": " << getShorthand() << endl;
+ out << endl;
+ out << "#include \"Global.hh\"" << endl;
+ out << "#include \"" << component << "_Controller.hh\"" << endl;
+ out << "#include \"" << component << "_State.hh\"" << endl;
+ out << "#include \"" << component << "_Event.hh\"" << endl;
+ out << "#include \"Types.hh\"" << endl;
+ out << "#include \"System.hh\"" << endl;
+ out << "#include \"Chip.hh\"" << endl;
+ out << endl;
+ out << "#define HASH_FUN(state, event) ((int(state)*" << component
+ << "_Event_NUM)+int(event))" << endl;
+ out << endl;
+ out << "#define GET_TRANSITION_COMMENT() (" << component << "_" << "transitionComment.str())" << endl;
+ out << "#define CLEAR_TRANSITION_COMMENT() (" << component << "_" << "transitionComment.str(\"\"))" << endl;
+ out << endl;
+ out << "TransitionResult " << component << "_Controller::doTransition("
+ << component << "_Event event, "
+ << component << "_State state, "
+ << "const Address& addr" << endl;
+ if(CHECK_INVALID_RESOURCE_STALLS) {
+ out << ", int priority";
+ }
+ out << ")" << endl;
+
+ out << "{" << endl;
+ out << " " << component << "_State next_state = state;" << endl;
+ out << endl;
+ out << " DEBUG_NEWLINE(GENERATED_COMP, MedPrio);" << endl;
+ out << " DEBUG_MSG(GENERATED_COMP, MedPrio,*this);" << endl;
+ out << " DEBUG_EXPR(GENERATED_COMP, MedPrio,g_eventQueue_ptr->getTime());" << endl;
+ out << " DEBUG_EXPR(GENERATED_COMP, MedPrio,state);" << endl;
+ out << " DEBUG_EXPR(GENERATED_COMP, MedPrio,event);" << endl;
+ out << " DEBUG_EXPR(GENERATED_COMP, MedPrio,addr);" << endl;
+ out << endl;
+ out << " TransitionResult result = doTransitionWorker(event, state, next_state, addr";
+ if(CHECK_INVALID_RESOURCE_STALLS) {
+ out << ", priority";
+ }
+ out << ");" << endl;
+ out << endl;
+ out << " if (result == TransitionResult_Valid) {" << endl;
+ out << " DEBUG_EXPR(GENERATED_COMP, MedPrio, next_state);" << endl;
+ out << " DEBUG_NEWLINE(GENERATED_COMP, MedPrio);" << endl;
+ out << " s_profiler.countTransition(state, event);" << endl;
+ out << " if (PROTOCOL_DEBUG_TRACE) {" << endl
+ << " g_system_ptr->getProfiler()->profileTransition(\"" << component
+ << "\", m_chip_ptr->getID(), m_version, addr, " << endl
+ << " " << component << "_State_to_string(state), " << endl
+ << " " << component << "_Event_to_string(event), " << endl
+ << " " << component << "_State_to_string(next_state), GET_TRANSITION_COMMENT());" << endl
+ << " }" << endl;
+ out << " CLEAR_TRANSITION_COMMENT();" << endl;
+ out << " " << component << "_setState(addr, next_state);" << endl;
+ out << " " << endl;
+ out << " } else if (result == TransitionResult_ResourceStall) {" << endl;
+ out << " if (PROTOCOL_DEBUG_TRACE) {" << endl
+ << " g_system_ptr->getProfiler()->profileTransition(\"" << component
+ << "\", m_chip_ptr->getID(), m_version, addr, " << endl
+ << " " << component << "_State_to_string(state), " << endl
+ << " " << component << "_Event_to_string(event), " << endl
+ << " " << component << "_State_to_string(next_state), " << endl
+ << " \"Resource Stall\");" << endl
+ << " }" << endl;
+ out << " } else if (result == TransitionResult_ProtocolStall) {" << endl;
+ out << " DEBUG_MSG(GENERATED_COMP,HighPrio,\"stalling\");" << endl
+ << " DEBUG_NEWLINE(GENERATED_COMP, MedPrio);" << endl;
+ out << " if (PROTOCOL_DEBUG_TRACE) {" << endl
+ << " g_system_ptr->getProfiler()->profileTransition(\"" << component
+ << "\", m_chip_ptr->getID(), m_version, addr, " << endl
+ << " " << component << "_State_to_string(state), " << endl
+ << " " << component << "_Event_to_string(event), " << endl
+ << " " << component << "_State_to_string(next_state), " << endl
+ << " \"Protocol Stall\");" << endl
+ << " }" << endl
+ << " }" << endl;
+ out << " return result;" << endl;
+ out << "}" << endl;
+ out << endl;
+ out << "TransitionResult " << component << "_Controller::doTransitionWorker("
+ << component << "_Event event, "
+ << component << "_State state, "
+ << component << "_State& next_state, "
+ << "const Address& addr" << endl;
+ if(CHECK_INVALID_RESOURCE_STALLS) {
+ out << ", int priority" << endl;
+ }
+ out << ")" << endl;
+
+ out << "{" << endl;
+ out << "" << endl;
+
+ out << " switch(HASH_FUN(state, event)) {" << endl;
+
+ Map<string, Vector<string> > code_map; // This map will allow suppress generating duplicate code
+ Vector<string> code_vec;
+
+ for(int i=0; i<numTransitions(); i++) {
+ const Transition& t = getTransition(i);
+ string case_string = component + "_State_" + t.getStatePtr()->getIdent()
+ + ", " + component + "_Event_" + t.getEventPtr()->getIdent();
+
+ string code;
+
+ code += " {\n";
+ // Only set next_state if it changes
+ if (t.getStatePtr() != t.getNextStatePtr()) {
+ code += " next_state = " + component + "_State_" + t.getNextStatePtr()->getIdent() + ";\n";
+ }
+
+ const Vector<Action*>& action_vec = t.getActions();
+ int numActions = action_vec.size();
+
+ // Check for resources
+ Vector<string> code_sorter;
+ const Map<Var*, string>& res = t.getResources();
+ Vector<Var*> res_keys = res.keys();
+ for (int i=0; i<res_keys.size(); i++) {
+ string temp_code;
+ if (res_keys[i]->getType()->cIdent() == "DNUCAStopTable") {
+ temp_code += res.lookup(res_keys[i]);
+ } else {
+ temp_code += " if (!" + (res_keys[i]->getCode()) + ".areNSlotsAvailable(" + res.lookup(res_keys[i]) + ")) {\n";
+ if(CHECK_INVALID_RESOURCE_STALLS) {
+ // assert that the resource stall is for a resource of equal or greater priority
+ temp_code += " assert(priority >= "+ (res_keys[i]->getCode()) + ".getPriority());\n";
+ }
+ temp_code += " return TransitionResult_ResourceStall;\n";
+ temp_code += " }\n";
+ }
+ code_sorter.insertAtBottom(temp_code);
+ }
+
+ // Emit the code sequences in a sorted order. This makes the
+ // output deterministic (without this the output order can vary
+ // since Map's keys() on a vector of pointers is not deterministic
+ code_sorter.sortVector();
+ for (int i=0; i<code_sorter.size(); i++) {
+ code += code_sorter[i];
+ }
+
+ // Figure out if we stall
+ bool stall = false;
+ for (int i=0; i<numActions; i++) {
+ if(action_vec[i]->getIdent() == "z_stall") {
+ stall = true;
+ }
+ }
+
+ if (stall) {
+ code += " return TransitionResult_ProtocolStall;\n";
+ } else {
+ for (int i=0; i<numActions; i++) {
+ code += " " + action_vec[i]->getIdent() + "(addr);\n";
+ }
+ code += " return TransitionResult_Valid;\n";
+ }
+ code += " }\n";
+
+
+ // Look to see if this transition code is unique.
+ if (code_map.exist(code)) {
+ code_map.lookup(code).insertAtBottom(case_string);
+ } else {
+ Vector<string> vec;
+ vec.insertAtBottom(case_string);
+ code_map.add(code, vec);
+ code_vec.insertAtBottom(code);
+ }
+ }
+
+ // Walk through all of the unique code blocks and spit out the
+ // corresponding case statement elements
+ for (int i=0; i<code_vec.size(); i++) {
+ string code = code_vec[i];
+
+ // Iterative over all the multiple transitions that share the same code
+ for (int case_num=0; case_num<code_map.lookup(code).size(); case_num++) {
+ string case_string = code_map.lookup(code)[case_num];
+ out << " case HASH_FUN(" << case_string << "):" << endl;
+ }
+ out << code;
+ }
+
+ out << " default:" << endl;
+ out << " WARN_EXPR(m_id);" << endl;
+ out << " WARN_EXPR(m_version);" << endl;
+ out << " WARN_EXPR(g_eventQueue_ptr->getTime());" << endl;
+ out << " WARN_EXPR(addr);" << endl;
+ out << " WARN_EXPR(event);" << endl;
+ out << " WARN_EXPR(state);" << endl;
+ out << " ERROR_MSG(\"Invalid transition\");" << endl;
+ out << " }" << endl;
+ out << " return TransitionResult_Valid;" << endl;
+ out << "}" << endl;
+}
+
+void StateMachine::printProfilerH(ostream& out, string component) const
+{
+ out << "// Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< endl;
+ out << "// " << getIdent() << ": " << getShorthand() << endl;
+ out << endl;
+ out << "#ifndef " << component << "_PROFILER_H" << endl;
+ out << "#define " << component << "_PROFILER_H" << endl;
+ out << endl;
+ out << "#include \"Global.hh\"" << endl;
+ out << "#include \"" << component << "_State.hh\"" << endl;
+ out << "#include \"" << component << "_Event.hh\"" << endl;
+ out << endl;
+ out << "class " << component << "_Profiler {" << endl;
+ out << "public:" << endl;
+ out << " " << component << "_Profiler();" << endl;
+ out << " void countTransition(" << component << "_State state, " << component << "_Event event);" << endl;
+ out << " void possibleTransition(" << component << "_State state, " << component << "_Event event);" << endl;
+ out << " void dumpStats(ostream& out) const;" << endl;
+ out << " void clearStats();" << endl;
+ out << "private:" << endl;
+ out << " int m_counters[" << component << "_State_NUM][" << component << "_Event_NUM];" << endl;
+ out << " int m_event_counters[" << component << "_Event_NUM];" << endl;
+ out << " bool m_possible[" << component << "_State_NUM][" << component << "_Event_NUM];" << endl;
+ out << "};" << endl;
+ out << "#endif // " << component << "_PROFILER_H" << endl;
+}
+
+void StateMachine::printProfilerC(ostream& out, string component) const
+{
+ out << "// Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< endl;
+ out << "// " << getIdent() << ": " << getShorthand() << endl;
+ out << endl;
+ out << "#include \"" << component << "_Profiler.hh\"" << endl;
+ out << endl;
+
+ // Constructor
+ out << component << "_Profiler::" << component << "_Profiler()" << endl;
+ out << "{" << endl;
+ out << " for (int state = 0; state < " << component << "_State_NUM; state++) {" << endl;
+ out << " for (int event = 0; event < " << component << "_Event_NUM; event++) {" << endl;
+ out << " m_possible[state][event] = false;" << endl;
+ out << " m_counters[state][event] = 0;" << endl;
+ out << " }" << endl;
+ out << " }" << endl;
+ out << " for (int event = 0; event < " << component << "_Event_NUM; event++) {" << endl;
+ out << " m_event_counters[event] = 0;" << endl;
+ out << " }" << endl;
+ out << "}" << endl;
+
+ // Clearstats
+ out << "void " << component << "_Profiler::clearStats()" << endl;
+ out << "{" << endl;
+ out << " for (int state = 0; state < " << component << "_State_NUM; state++) {" << endl;
+ out << " for (int event = 0; event < " << component << "_Event_NUM; event++) {" << endl;
+ out << " m_counters[state][event] = 0;" << endl;
+ out << " }" << endl;
+ out << " }" << endl;
+ out << " for (int event = 0; event < " << component << "_Event_NUM; event++) {" << endl;
+ out << " m_event_counters[event] = 0;" << endl;
+ out << " }" << endl;
+ out << "}" << endl;
+
+ // Count Transition
+ out << "void " << component << "_Profiler::countTransition(" << component << "_State state, " << component << "_Event event)" << endl;
+ out << "{" << endl;
+ out << " assert(m_possible[state][event]);" << endl;
+ out << " m_counters[state][event]++;" << endl;
+ out << " m_event_counters[event]++;" << endl;
+ out << "}" << endl;
+
+ // Possible Transition
+ out << "void " << component << "_Profiler::possibleTransition(" << component << "_State state, " << component << "_Event event)" << endl;
+ out << "{" << endl;
+ out << " m_possible[state][event] = true;" << endl;
+ out << "}" << endl;
+
+ // dumpStats
+ out << "void " << component << "_Profiler::dumpStats(ostream& out) const" << endl;
+ out << "{" << endl;
+ out << " out << \" --- " << component << " ---\" << endl;" << endl;
+ out << " out << \" - Event Counts -\" << endl;" << endl;
+ out << " for (int event = 0; event < " << component << "_Event_NUM; event++) {" << endl;
+ out << " int count = m_event_counters[event];" << endl;
+ out << " out << (" << component << "_Event) event << \" \" << count << endl;" << endl;
+ out << " }" << endl;
+ out << " out << endl;" << endl;
+ out << " out << \" - Transitions -\" << endl;" << endl;
+ out << " for (int state = 0; state < " << component << "_State_NUM; state++) {" << endl;
+ out << " for (int event = 0; event < " << component << "_Event_NUM; event++) {" << endl;
+ out << " if (m_possible[state][event]) {" << endl;
+ out << " int count = m_counters[state][event];" << endl;
+ out << " out << (" << component << "_State) state << \" \" << (" << component << "_Event) event << \" \" << count;" << endl;
+ out << " if (count == 0) {" << endl;
+ out << " out << \" <-- \";" << endl;
+ out << " }" << endl;
+ out << " out << endl;" << endl;
+ out << " }" << endl;
+ out << " }" << endl;
+ out << " out << endl;" << endl;
+ out << " }" << endl;
+ out << "}" << endl;
+}
+
+
+
+// ************************** //
+// ******* HTML Files ******* //
+// ************************** //
+
+string frameRef(string click_href, string click_target, string over_href, string over_target_num, string text)
+{
+ string temp;
+ temp += "<A href=\"" + click_href + "\" ";
+ temp += "target=\"" + click_target + "\" ";
+ string javascript = "if (parent.frames[" + over_target_num + "].location != parent.location + '" + over_href + "') { parent.frames[" + over_target_num + "].location='" + over_href + "' }";
+ // string javascript = "parent." + target + ".location='" + href + "'";
+ temp += "onMouseOver=\"" + javascript + "\" ";
+ temp += ">" + text + "</A>";
+ return temp;
+}
+
+string frameRef(string href, string target, string target_num, string text)
+{
+ return frameRef(href, target, href, target_num, text);
+}
+
+
+void StateMachine::writeHTMLFiles(string path) const
+{
+ string filename;
+ string component = getIdent();
+
+ /*
+ {
+ ostringstream out;
+ out << "<html>" << endl;
+ out << "<head>" << endl;
+ out << "<title>" << component << "</title>" << endl;
+ out << "</head>" << endl;
+ out << "<frameset rows=\"30,30,*\" frameborder=\"1\">" << endl;
+ out << " <frame name=\"Status\" src=\"empty.html\" marginheight=\"1\">" << endl;
+ out << " <frame name=\"Table\" src=\"" << component << "_table.html\" marginheight=\"1\">" << endl;
+ out << "</frameset>" << endl;
+ out << "</html>" << endl;
+ conditionally_write_file(path + component + ".html", out);
+ }
+ */
+
+ // Create table with no row hilighted
+ {
+ ostringstream out;
+ printHTMLTransitions(out, numStates()+1);
+
+ // -- Write file
+ filename = component + "_table.html";
+ conditionally_write_file(path + filename, out);
+ }
+
+ // Generate transition tables
+ for(int i=0; i<numStates(); i++) {
+ ostringstream out;
+ printHTMLTransitions(out, i);
+
+ // -- Write file
+ filename = component + "_table_" + getState(i).getIdent() + ".html";
+ conditionally_write_file(path + filename, out);
+ }
+
+ // Generate action descriptions
+ for(int i=0; i<numActions(); i++) {
+ ostringstream out;
+ createHTMLSymbol(getAction(i), "Action", out);
+
+ // -- Write file
+ filename = component + "_action_" + getAction(i).getIdent() + ".html";
+ conditionally_write_file(path + filename, out);
+ }
+
+ // Generate state descriptions
+ for(int i=0; i<numStates(); i++) {
+ ostringstream out;
+ createHTMLSymbol(getState(i), "State", out);
+
+ // -- Write file
+ filename = component + "_State_" + getState(i).getIdent() + ".html";
+ conditionally_write_file(path + filename, out);
+ }
+
+ // Generate event descriptions
+ for(int i=0; i<numEvents(); i++) {
+ ostringstream out;
+ createHTMLSymbol(getEvent(i), "Event", out);
+
+ // -- Write file
+ filename = component + "_Event_" + getEvent(i).getIdent() + ".html";
+ conditionally_write_file(path + filename, out);
+ }
+}
+
+void StateMachine::printHTMLTransitions(ostream& out, int active_state) const
+{
+ // -- Prolog
+ out << "<HTML><BODY link=\"blue\" vlink=\"blue\">" << endl;
+
+ // -- Header
+ out << "<H1 align=\"center\">" << formatHTMLShorthand(getShorthand()) << ": " << endl;
+ Vector<StateMachine*> machine_vec = g_sym_table.getStateMachines();
+ for (int i=0; i<machine_vec.size(); i++) {
+ StateMachine* type = machine_vec[i];
+ if (i != 0) {
+ out << " - ";
+ }
+ if (type == this) {
+ out << type->getIdent() << endl;
+ } else {
+ out << "<A target=\"Table\"href=\"" + type->getIdent() + "_table.html\">" + type->getIdent() + "</A> " << endl;
+ }
+ }
+ out << "</H1>" << endl;
+
+ // -- Table header
+ out << "<TABLE border=1>" << endl;
+
+ // -- Column headers
+ out << "<TR>" << endl;
+
+ // -- First column header
+ out << " <TH> </TH>" << endl;
+
+ for(int event = 0; event < numEvents(); event++ ) {
+ out << " <TH bgcolor=white>";
+ out << frameRef(getIdent() + "_Event_" + getEvent(event).getIdent() + ".html", "Status", "1", formatHTMLShorthand(getEvent(event).getShorthand()));
+ out << "</TH>" << endl;
+ }
+
+ out << "</TR>" << endl;
+
+ // -- Body of table
+ for(int state = 0; state < numStates(); state++ ) {
+ out << "<TR>" << endl;
+
+ // -- Each row
+ if (state == active_state) {
+ out << " <TH bgcolor=yellow>";
+ } else {
+ out << " <TH bgcolor=white>";
+ }
+
+ string click_href = getIdent() + "_table_" + getState(state).getIdent() + ".html";
+ string text = formatHTMLShorthand(getState(state).getShorthand());
+
+ out << frameRef(click_href, "Table", getIdent() + "_State_" + getState(state).getIdent() + ".html", "1", formatHTMLShorthand(getState(state).getShorthand()));
+ out << "</TH>" << endl;
+
+ // -- One column for each event
+ for(int event = 0; event < numEvents(); event++ ) {
+ const Transition* trans_ptr = getTransPtr(state, event);
+
+ if( trans_ptr != NULL ) {
+ bool stall_action = false;
+ string nextState;
+ string actions_str;
+
+ // -- Get the actions
+ // actions = trans_ptr->getActionShorthands();
+ const Vector<Action*> actions = trans_ptr->getActions();
+ for (int action=0; action < actions.size(); action++) {
+ if ((actions[action]->getIdent() == "z_stall") ||
+ (actions[action]->getIdent() == "zz_recycleMandatoryQueue")) {
+ stall_action = true;
+ }
+ actions_str += " ";
+ actions_str += frameRef(getIdent() + "_action_" + actions[action]->getIdent() + ".html", "Status", "1",
+ formatHTMLShorthand(actions[action]->getShorthand()));
+ actions_str += "\n";
+ }
+
+ // -- Get the next state
+ if (trans_ptr->getNextStatePtr()->getIdent() != getState(state).getIdent()) {
+ string click_href = getIdent() + "_table_" + trans_ptr->getNextStatePtr()->getIdent() + ".html";
+ nextState = frameRef(click_href, "Table", getIdent() + "_State_" + trans_ptr->getNextStatePtr()->getIdent() + ".html", "1",
+ formatHTMLShorthand(trans_ptr->getNextStateShorthand()));
+ } else {
+ nextState = "";
+ }
+
+ // -- Print out "actions/next-state"
+ if (stall_action) {
+ if (state == active_state) {
+ out << " <TD bgcolor=#C0C000>";
+ } else {
+ out << " <TD bgcolor=lightgrey>";
+ }
+ } else if (active_state < numStates() && (trans_ptr->getNextStatePtr()->getIdent() == getState(active_state).getIdent())) {
+ out << " <TD bgcolor=aqua>";
+ } else if (state == active_state) {
+ out << " <TD bgcolor=yellow>";
+ } else {
+ out << " <TD bgcolor=white>";
+ }
+
+ out << actions_str;
+ if ((nextState.length() != 0) && (actions_str.length() != 0)) {
+ out << "/";
+ }
+ out << nextState;
+ out << "</TD>" << endl;
+ } else {
+ // This is the no transition case
+ if (state == active_state) {
+ out << " <TD bgcolor=#C0C000>&nbsp;</TD>" << endl;
+ } else {
+ out << " <TD bgcolor=lightgrey>&nbsp;</TD>" << endl;
+ }
+ }
+ }
+ // -- Each row
+ if (state == active_state) {
+ out << " <TH bgcolor=yellow>";
+ } else {
+ out << " <TH bgcolor=white>";
+ }
+
+ click_href = getIdent() + "_table_" + getState(state).getIdent() + ".html";
+ text = formatHTMLShorthand(getState(state).getShorthand());
+
+ out << frameRef(click_href, "Table", getIdent() + "_State_" + getState(state).getIdent() + ".html", "1", formatHTMLShorthand(getState(state).getShorthand()));
+ out << "</TH>" << endl;
+
+ out << "</TR>" << endl;
+ }
+
+ // -- Column footer
+ out << "<TR>" << endl;
+ out << " <TH> </TH>" << endl;
+
+ for(int i = 0; i < numEvents(); i++ ) {
+ out << " <TH bgcolor=white>";
+ out << frameRef(getIdent() + "_Event_" + getEvent(i).getIdent() + ".html", "Status", "1", formatHTMLShorthand(getEvent(i).getShorthand()));
+ out << "</TH>" << endl;
+ }
+ out << "</TR>" << endl;
+
+ // -- Epilog
+ out << "</TABLE>" << endl;
+ out << "</BODY></HTML>" << endl;
+}
+
+
diff --git a/src/mem/slicc/symbols/StateMachine.hh b/src/mem/slicc/symbols/StateMachine.hh
new file mode 100644
index 000000000..9f3663ed4
--- /dev/null
+++ b/src/mem/slicc/symbols/StateMachine.hh
@@ -0,0 +1,141 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * */
+
+#ifndef STATEMACHINE_H
+#define STATEMACHINE_H
+
+#include "slicc_global.hh"
+#include "Vector.hh"
+#include "Map.hh"
+#include "Symbol.hh"
+
+class Transition;
+class Event;
+class State;
+class Action;
+class Var;
+class Func;
+
+class StateMachine : public Symbol {
+public:
+ // Constructors
+ StateMachine(string ident, const Location& location, const Map<string, string>& pairs);
+
+ // Destructor
+ ~StateMachine();
+
+ // Public Methods
+
+ // Add items to the state machine
+ // void setMachine(string ident, const Map<string, string>& pairs);
+ void addState(State* state_ptr);
+ void addEvent(Event* event_ptr);
+ void addAction(Action* action_ptr);
+ void addTransition(Transition* trans_ptr);
+ void addInPort(Var* var) { m_in_ports.insertAtBottom(var); }
+ void addFunc(Func* func);
+
+ // Accessors to vectors
+ const State& getState(int index) const { return *m_states[index]; }
+ const Event& getEvent(int index) const { return *m_events[index]; }
+ const Action& getAction(int index) const { return *m_actions[index]; }
+ const Transition& getTransition(int index) const { return *m_transitions[index]; }
+ const Transition* getTransPtr(int stateIndex, int eventIndex) const;
+
+ // Accessors for size of vectors
+ int numStates() const { return m_states.size(); }
+ int numEvents() const { return m_events.size(); }
+ int numActions() const { return m_actions.size(); }
+ int numTransitions() const { return m_transitions.size(); }
+
+ void buildTable(); // Needs to be called before accessing the table
+
+ // Code generator methods
+ void writeCFiles(string path) const;
+ void writeHTMLFiles(string path) const;
+
+ void print(ostream& out) const { out << "[StateMachine: " << toString() << "]" << endl; }
+private:
+ // Private Methods
+ void checkForDuplicate(const Symbol& sym) const;
+
+ int getStateIndex(State* state_ptr) const { return m_state_map.lookup(state_ptr); }
+ int getEventIndex(Event* event_ptr) const { return m_event_map.lookup(event_ptr); }
+
+ // Private copy constructor and assignment operator
+ // StateMachine(const StateMachine& obj);
+ // StateMachine& operator=(const StateMachine& obj);
+
+ void printControllerH(ostream& out, string component) const;
+ void printControllerC(ostream& out, string component) const;
+ void printCWakeup(ostream& out, string component) const;
+ void printCSwitch(ostream& out, string component) const;
+ void printProfilerH(ostream& out, string component) const;
+ void printProfilerC(ostream& out, string component) const;
+
+ void printHTMLTransitions(ostream& out, int active_state) const;
+
+ // Data Members (m_ prefix)
+ Vector<State*> m_states;
+ Vector<Event*> m_events;
+ Vector<Action*> m_actions;
+ Vector<Transition*> m_transitions;
+ Vector<Func*> m_internal_func_vec;
+
+ Map<State*, int> m_state_map;
+ Map<Event*, int> m_event_map;
+
+ Vector<Var*> m_in_ports;
+
+ // Table variables
+ bool m_table_built;
+ Vector<Vector<Transition*> > m_table;
+
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const StateMachine& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const StateMachine& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //STATEMACHINE_H
diff --git a/src/mem/slicc/symbols/Symbol.cc b/src/mem/slicc/symbols/Symbol.cc
new file mode 100644
index 000000000..3365c94eb
--- /dev/null
+++ b/src/mem/slicc/symbols/Symbol.cc
@@ -0,0 +1,72 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "Symbol.hh"
+
+Symbol::Symbol(string id, const Location& location, const Map<string, string>& pairs)
+{
+ m_id = id;
+ m_location = location;
+ m_pairs = pairs;
+ if (!existPair("short")) {
+ addPair("short", m_id);
+ }
+ m_used = false;
+}
+
+Symbol::Symbol(string id, const Location& location)
+{
+ m_id = id;
+ m_location = location;
+ if (!existPair("short")) {
+ addPair("short", m_id);
+ }
+ m_used = false;
+}
+
+const string& Symbol::lookupPair(const string& key) const
+{
+ if (!existPair(key)) {
+ error("Value for pair '" + key + "' missing.");
+ }
+ return m_pairs.lookup(key);
+}
+
+void Symbol::addPair(const string& key, const string& value)
+{
+ if (existPair(key)) {
+ warning("Pair key '" + key + "' re-defined. new: '" + value + "' old: '" + lookupPair(key) + "'");
+ }
+ m_pairs.add(key, value);
+}
diff --git a/src/mem/slicc/symbols/Symbol.hh b/src/mem/slicc/symbols/Symbol.hh
new file mode 100644
index 000000000..bca7d32db
--- /dev/null
+++ b/src/mem/slicc/symbols/Symbol.hh
@@ -0,0 +1,100 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#ifndef SYMBOL_H
+#define SYMBOL_H
+
+#include "slicc_global.hh"
+#include "Map.hh"
+#include "Location.hh"
+
+class Symbol {
+public:
+ // Constructors
+ Symbol(string id, const Location& location, const Map<string, string>& pairs);
+ Symbol(string id, const Location& location);
+ // Destructor
+ virtual ~Symbol() { }
+
+ // Public Methods
+ void error(string err_msg) const { m_location.error(err_msg); }
+ void warning(string err_msg) const { m_location.warning(err_msg); }
+ const Location& getLocation() const { return m_location; }
+
+ const string& toString() const { return m_id; }
+
+ const string& getIdent() const { return m_id; }
+ const string& getShorthand() const { return lookupPair("short"); }
+ const string& getDescription() const { return lookupPair("desc"); }
+
+ void markUsed() { m_used = true; }
+ bool wasUsed() { return m_used; }
+
+ bool existPair(const string& key) const { return m_pairs.exist(key); }
+ const string& lookupPair(const string& key) const;
+ void addPair(const string& key, const string& value);
+
+ // virtual string getCode() const = 0;
+ virtual void writeCFiles(string path) const {}
+ virtual void writeHTMLFiles(string path) const {}
+ virtual void print(ostream& out) const { out << "[Symbol: " << getIdent() << "]"; }
+
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ // Symbol(const Symbol& obj);
+ // Symbol& operator=(const Symbol& obj);
+
+ // Data Members (m_ prefix)
+ string m_id;
+ Map<string, string> m_pairs;
+ Location m_location;
+ bool m_used;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Symbol& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Symbol& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //SYMBOL_H
diff --git a/src/mem/slicc/symbols/SymbolTable.cc b/src/mem/slicc/symbols/SymbolTable.cc
new file mode 100644
index 000000000..37e233e88
--- /dev/null
+++ b/src/mem/slicc/symbols/SymbolTable.cc
@@ -0,0 +1,934 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "SymbolTable.hh"
+#include "fileio.hh"
+#include "html_gen.hh"
+#include "mif_gen.hh"
+#include "Action.hh"
+
+SymbolTable g_sym_table;
+
+SymbolTable::SymbolTable()
+{
+ m_sym_map_vec.setSize(1);
+ m_depth = 0;
+
+ {
+ Map<string, string> pairs;
+ pairs.add("enumeration", "yes");
+ newSym(new Type("MachineType", Location(), pairs));
+ }
+
+ {
+ Map<string, string> pairs;
+ pairs.add("primitive", "yes");
+ pairs.add("external", "yes");
+ newSym(new Type("void", Location(), pairs));
+ }
+}
+
+SymbolTable::~SymbolTable()
+{
+ int size = m_sym_vec.size();
+ for(int i=0; i<size; i++) {
+ delete m_sym_vec[i];
+ }
+}
+
+void SymbolTable::newSym(Symbol* sym_ptr)
+{
+ registerSym(sym_ptr->toString(), sym_ptr);
+ m_sym_vec.insertAtBottom(sym_ptr); // Holder for the allocated Sym objects.
+}
+
+void SymbolTable::newMachComponentSym(Symbol* sym_ptr)
+{
+ // used to cheat-- that is, access components in other machines
+ StateMachine* mach_ptr = getStateMachine("current_machine");
+ if (mach_ptr != NULL) {
+ m_machine_component_map_vec.lookup(mach_ptr->toString()).add(sym_ptr->toString(), sym_ptr);
+ }
+}
+
+Var* SymbolTable::getMachComponentVar(string mach, string ident)
+{
+ Symbol* s = m_machine_component_map_vec.lookup(mach).lookup(ident);
+ return dynamic_cast<Var*>(s);
+}
+
+
+void SymbolTable::registerSym(string id, Symbol* sym_ptr)
+{
+
+ // Check for redeclaration (in the current frame only)
+ if (m_sym_map_vec[m_depth].exist(id)) {
+ sym_ptr->error("Symbol '" + id + "' redeclared in same scope.");
+ }
+ // FIXME - warn on masking of a declaration in a previous frame
+ m_sym_map_vec[m_depth].add(id, sym_ptr);
+}
+
+void SymbolTable::registerGlobalSym(string id, Symbol* sym_ptr)
+{
+ // Check for redeclaration (global frame only)
+ if (m_sym_map_vec[0].exist(id)) {
+ sym_ptr->error("Global symbol '" + id + "' redeclared in global scope.");
+ }
+ m_sym_map_vec[0].add(id, sym_ptr);
+}
+
+Symbol* SymbolTable::getSym(string ident) const
+{
+ for (int i=m_depth; i>=0; i--) {
+ if (m_sym_map_vec[i].exist(ident)) {
+ return m_sym_map_vec[i].lookup(ident);
+ }
+ }
+ return NULL;
+}
+
+void SymbolTable::newCurrentMachine(StateMachine* sym_ptr)
+{
+ registerGlobalSym(sym_ptr->toString(), sym_ptr);
+ registerSym("current_machine", sym_ptr);
+ m_sym_vec.insertAtBottom(sym_ptr); // Holder for the allocated Sym objects.
+
+ Map<string, Symbol*> m;
+ m_machine_component_map_vec.add(sym_ptr->toString(),m);
+
+}
+
+Type* SymbolTable::getType(string ident) const
+{
+ return dynamic_cast<Type*>(getSym(ident));
+}
+
+Var* SymbolTable::getVar(string ident) const
+{
+ return dynamic_cast<Var*>(getSym(ident));
+}
+
+Func* SymbolTable::getFunc(string ident) const
+{
+ return dynamic_cast<Func*>(getSym(ident));
+}
+
+StateMachine* SymbolTable::getStateMachine(string ident) const
+{
+ return dynamic_cast<StateMachine*>(getSym(ident));
+}
+
+void SymbolTable::pushFrame()
+{
+ m_depth++;
+ m_sym_map_vec.expand(1);
+ m_sym_map_vec[m_depth].clear();
+}
+
+void SymbolTable::popFrame()
+{
+ m_depth--;
+ assert(m_depth >= 0);
+ m_sym_map_vec.expand(-1);
+}
+
+void SymbolTable::writeCFiles(string path) const
+{
+ int size = m_sym_vec.size();
+ {
+ // Write the Types.hh include file for the types
+ ostringstream sstr;
+ sstr << "/** Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< " */" << endl;
+ sstr << endl;
+ sstr << "#include \"RubySlicc_includes.hh\"" << endl;
+ for(int i=0; i<size; i++) {
+ Type* type = dynamic_cast<Type*>(m_sym_vec[i]);
+ if (type != NULL && !type->isPrimitive()) {
+ sstr << "#include \"" << type->cIdent() << ".hh" << "\"" << endl;
+ }
+ }
+ conditionally_write_file(path + "/Types.hh", sstr);
+ }
+
+ // Write all the symbols
+ for(int i=0; i<size; i++) {
+ m_sym_vec[i]->writeCFiles(path + '/');
+ }
+
+ writeChipFiles(path);
+}
+
+void SymbolTable::writeChipFiles(string path) const
+{
+ // Create Chip.cc and Chip.hh
+
+ // FIXME - Note: this method is _really_ ugly. Most of this
+ // functionality should be pushed into each type of symbol and use
+ // virtual methods to get the right behavior for each type of
+ // symbol. This is also more flexible, and much cleaner.
+
+ int size = m_sym_vec.size();
+
+ // Create Chip.h
+ {
+ ostringstream sstr;
+ sstr << "/** \\file Chip.h " << endl;
+ sstr << " * Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<<endl;
+ sstr << " */ " <<endl<<endl;
+
+ sstr << "#ifndef CHIP_H" << endl;
+ sstr << "#define CHIP_H" << endl;
+ sstr << endl;
+
+ // Includes
+ sstr << "#include \"Global.hh\"" << endl;
+ sstr << "#include \"Types.hh\"" << endl;
+ sstr << "#include \"AbstractChip.hh\"" << endl;
+ sstr << "class Network;" << endl;
+ sstr << endl;
+
+ // Class declarations for all Machines/Controllers
+ for(int i=0; i<size; i++) {
+ StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
+ if (machine != NULL) {
+ sstr << "class " << machine->getIdent() << "_Controller;" << endl;
+ }
+ }
+
+ sstr << "class Chip : public AbstractChip {" << endl;
+ sstr << "public:" << endl;
+ sstr << endl;
+ sstr << " // Constructors" << endl;
+ sstr << " Chip(NodeID chip_number, Network* net_ptr);" << endl;
+ sstr << endl;
+ sstr << " // Destructor" << endl;
+ sstr << " ~Chip();" << endl;
+ sstr << endl;
+ sstr << " // Public Methods" << endl;
+ sstr << " void recordCacheContents(CacheRecorder& tr) const;" << endl;
+ sstr << " void dumpCaches(ostream& out) const;" << endl;
+ sstr << " void dumpCacheData(ostream& out) const;" << endl;
+ sstr << " static void printStats(ostream& out);" << endl;
+ sstr << " static void clearStats();" << endl;
+ sstr << " void printConfig(ostream& out);" << endl;
+ sstr << " void print(ostream& out) const;" << endl;
+
+ // Used by coherence checker
+ sstr << "#ifdef CHECK_COHERENCE" << endl;
+ sstr << " bool isBlockShared(const Address& addr) const;" << endl;
+ sstr << " bool isBlockExclusive(const Address& addr) const;" << endl;
+ sstr << "#endif /* CHECK_COHERENCE */" << endl;
+
+ sstr << endl;
+ sstr << "private:" << endl;
+ sstr << " // Private copy constructor and assignment operator" << endl;
+ sstr << " Chip(const Chip& obj);" << endl;
+ sstr << " Chip& operator=(const Chip& obj);" << endl;
+ sstr << endl;
+ sstr << "public: // FIXME - these should not be public" << endl;
+ sstr << " // Data Members (m_ prefix)" << endl;
+ sstr << endl;
+ sstr << " Chip* m_chip_ptr;" << endl;
+ sstr << endl;
+ sstr << " // SLICC object variables" << endl;
+ sstr << endl;
+
+ // Look at all 'Vars'
+ for(int i=0; i<size; i++) {
+ Var* var = dynamic_cast<Var*>(m_sym_vec[i]);
+ if (var != NULL) {
+ if (var->existPair("chip_object")) {
+ if (var->existPair("no_chip_object")) {
+ // Do nothing
+ } else {
+ string template_hack = "";
+ if (var->existPair("template_hack")) {
+ template_hack = var->lookupPair("template_hack");
+ }
+ if (// var->existPair("network") || var->getType()->existPair("cache") ||
+// var->getType()->existPair("tbe") || var->getType()->existPair("newtbe") ||
+// var->getType()->existPair("dir") || var->getType()->existPair("persistent") ||
+// var->getType()->existPair("filter") || var->getType()->existPair("timer") ||
+// var->existPair("trigger_queue")
+ var->existPair("no_vector")
+ ) {
+ sstr << " " << var->getType()->cIdent() << template_hack << "* m_"
+ << var->cIdent() << "_ptr;" << endl;
+ } else {
+ // create pointer except those created in AbstractChip
+ if (!(var->existPair("abstract_chip_ptr"))) {
+ sstr << " Vector < " << var->getType()->cIdent() << template_hack
+ << "* > m_" << var->cIdent() << "_vec;" << endl;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ sstr << endl;
+ sstr << " // SLICC machine/controller variables" << endl;
+
+ // Look at all 'Machines'
+ for(int i=0; i<size; i++) {
+ StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
+ if (machine != NULL) {
+ string ident = machine->getIdent() + "_Controller";
+ sstr << " Vector < " << ident << "* > m_" << ident << "_vec;\n";
+ }
+ }
+
+ sstr << endl;
+
+ sstr << " // machine external SLICC function decls\n";
+
+ // Look at all 'Functions'
+ for(int i=0; i<size; i++) {
+ Func* func = dynamic_cast<Func*>(m_sym_vec[i]);
+ if (func != NULL) {
+ string proto;
+ func->funcPrototype(proto);
+ if (proto != "") {
+ sstr << " " << proto;
+ }
+ }
+ }
+
+ sstr << "};" << endl;
+ sstr << endl;
+ sstr << "#endif // CHIP_H" << endl;
+
+ conditionally_write_file(path + "/Chip.hh", sstr);
+ }
+ // Create Chip.cc
+ {
+ ostringstream sstr;
+ sstr << "// Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<<endl<<endl;
+ sstr << "#include \"Chip.hh\"" << endl;
+ sstr << "#include \"Network.hh\"" << endl;
+ sstr << "#include \"CacheRecorder.hh\"" << endl;
+ sstr << "" << endl;
+
+ sstr << "// Includes for controllers" << endl;
+ for(int i=0; i<size; i++) {
+ StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
+ if (machine != NULL) {
+ sstr << "#include \"" << machine->getIdent() << "_Controller.hh\"" << endl;
+ }
+ }
+
+ sstr << "" << endl;
+ sstr << "Chip::Chip(NodeID id, Network* net_ptr):AbstractChip(id, net_ptr)" << endl;
+ sstr << "{" << endl;
+ sstr << " m_chip_ptr = this;" << endl;
+
+ // FIXME - WHY IS THIS LOOP HERE?
+ // WE SEEM TO BE CREATING A SEQUENCER HERE THEN OVERWRITTING THAT INSTANITATION
+ // IN THE NEXT LOOP
+// // find sequencer's type
+// for(int i=0; i<size; i++) {
+// Var* var = dynamic_cast<Var*>(m_sym_vec[i]);
+// if(var && var->cIdent() == "sequencer")
+// sstr << " m_sequencer_ptr = new " << var->getType()->cIdent() << "(this);\n";
+// }
+
+ // Look at all 'Vars'
+ for(int i=0; i<size; i++) {
+ Var* var = dynamic_cast<Var*>(m_sym_vec[i]);
+ if (var != NULL && var->existPair("chip_object") && !var->existPair("no_chip_object")) {
+
+ sstr << " // " << var->cIdent() << endl;
+ if (!var->existPair("network")) {
+ // Not a network port object
+ if (var->getType()->existPair("primitive")) {
+ // Normal non-object
+ // sstr << " m_" << var->cIdent() << "_ptr = new " << var->getType()->cIdent() << ";\n";
+
+ sstr << " m_" << var->cIdent();
+ sstr << "_vec.setSize(RubyConfig::numberOf";
+ sstr << var->getMachine()->getIdent() << "PerChip(m_id));" << endl;
+ sstr << " for (int i = 0; i < RubyConfig::numberOf" << var->getMachine()->getIdent()
+ << "PerChip(m_id); i++) {" << endl;
+ sstr << " m_" << var->cIdent() << "_vec[i] = new " << var->getType()->cIdent() << ";\n";
+ if (var->existPair("default")) {
+ sstr << " *(m_" << var->cIdent() << "_vec[i]) = " << var->lookupPair("default") << ";\n";
+ }
+ sstr << " }\n";
+
+ } else {
+
+ // Normal Object
+ string template_hack = "";
+ if (var->existPair("template_hack")) {
+ template_hack = var->lookupPair("template_hack");
+ }
+ if (// var->getType()->existPair("cache") || var->getType()->existPair("tbe") ||
+// var->getType()->existPair("newtbe") || var->getType()->existPair("timer") ||
+// var->getType()->existPair("dir") || var->getType()->existPair("persistent") ||
+// var->getType()->existPair("filter") || var->existPair("trigger_queue")
+ var->existPair("no_vector")) {
+ sstr << " m_" << var->cIdent() << "_ptr = new " << var->getType()->cIdent() << template_hack;
+ if (!var->getType()->existPair("non_obj") && (!var->getType()->isEnumeration())) {
+ if (var->existPair("constructor_hack")) {
+ string constructor_hack = var->lookupPair("constructor_hack");
+ sstr << "(this, " << constructor_hack << ")";
+ } else {
+ sstr << "(this)";
+ }
+ }
+ sstr << ";\n";
+ sstr << " assert(m_" << var->cIdent() << "_ptr != NULL);" << endl;
+
+ if (var->existPair("default")) {
+ sstr << " (*m_" << var->cIdent() << "_ptr) = " << var->lookupPair("default")
+ << "; // Object default" << endl;
+ } else if (var->getType()->hasDefault()) {
+ sstr << " (*m_" << var->cIdent() << "_ptr) = " << var->getType()->getDefault()
+ << "; // Type " << var->getType()->getIdent() << " default" << endl;
+ }
+
+ // Set ordering
+ if (var->existPair("ordered") && !var->existPair("trigger_queue")) {
+ // A buffer
+ string ordered = var->lookupPair("ordered");
+ sstr << " m_" << var->cIdent() << "_ptr->setOrdering(" << ordered << ");\n";
+ }
+
+ // Set randomization
+ if (var->existPair("random")) {
+ // A buffer
+ string value = var->lookupPair("random");
+ sstr << " m_" << var->cIdent() << "_ptr->setRandomization(" << value << ");\n";
+ }
+
+ // Set Priority
+ if (var->getType()->isBuffer() && var->existPair("rank") && !var->existPair("trigger_queue")) {
+ string rank = var->lookupPair("rank");
+ sstr << " m_" << var->cIdent() << "_ptr->setPriority(" << rank << ");\n";
+ }
+ } else if ((var->getType()->existPair("mover")) && (var->getMachine()->getIdent() == "L2Cache")) {
+ // FIXME - dnuca mover is a special case
+ sstr << " m_" << var->cIdent() << "_ptr = NULL;" << endl;
+ sstr << " if (RubyConfig::isL2CacheDNUCAMoverChip(m_id)) {" << endl;
+ sstr << " m_" << var->cIdent() << "_ptr = new " << var->getType()->cIdent() << template_hack;
+ if (!var->getType()->existPair("non_obj") && (!var->getType()->isEnumeration())) {
+ if (var->existPair("constructor_hack")) {
+ string constructor_hack = var->lookupPair("constructor_hack");
+ sstr << "(this, " << constructor_hack << ")";
+ } else {
+ sstr << "(this)";
+ }
+ }
+ sstr << ";\n";
+ sstr << " }\n";
+ } else if (var->getType()->existPair("mover") && ((var->getMachine()->getIdent() == "L1Cache") || (var->getMachine()->getIdent() == "Collector"))) {
+ sstr << " m_" << var->cIdent() << "_ptr = NULL;" << endl;
+ sstr << " \n";
+ } else {
+ sstr << " m_" << var->cIdent();
+ sstr << "_vec.setSize(RubyConfig::numberOf";
+ sstr << var->getMachine()->getIdent() << "PerChip(m_id));" << endl;
+ sstr << " for (int i = 0; i < RubyConfig::numberOf" << var->getMachine()->getIdent()
+ << "PerChip(m_id); i++) {" << endl;
+
+
+ ostringstream tail;
+ tail << template_hack;
+ if (!var->getType()->existPair("non_obj") && (!var->getType()->isEnumeration())) {
+ if (var->existPair("constructor_hack")) {
+ string constructor_hack = var->lookupPair("constructor_hack");
+ tail << "(this, " << constructor_hack << ")";
+ } else {
+ tail << "(this)";
+ }
+ }
+ tail << ";\n";
+
+
+ if(var->existPair("child_selector")){
+ string child_selector = var->lookupPair("child_selector");
+ string child_types = var->lookupPair("child_types");
+ string::iterator it = child_types.begin();
+
+ uint num_types = 0;
+ for(uint t=0;t<child_types.size();t++){
+ if(child_types.at(t) == '<'){
+ num_types++;
+ }
+ }
+
+ string* types = new string[num_types];
+ string* ids = new string[num_types];
+ int type_idx = 0;
+ bool id_done = false;
+ for(uint t=0;t<child_types.size();t++){
+ if(child_types[t] == '<'){
+ id_done = false;
+ uint r;
+ for(r=t+1;child_types.at(r)!='>';r++){
+ if(r == child_types.size()){
+ cerr << "Parse error in child_types" << endl;
+ exit(EXIT_FAILURE);
+ }
+ if(child_types.at(r) == ' ') continue; //ignore whitespace
+ if(child_types.at(r) == ',') {id_done = true;continue;}
+ if(id_done == true)
+ types[type_idx].push_back(child_types.at(r));
+ else
+ ids[type_idx].push_back(child_types.at(r));
+ }
+ type_idx++;
+ t = r;
+ }
+ }
+
+ for(uint t=0;t<num_types;t++){
+ if(t==0)
+ sstr << " if(strcmp(" << child_selector << ", \"" << ids[t] << "\") == 0)" << endl;
+ else
+ sstr << " else if(strcmp(" << child_selector << ", \"" << ids[t] << "\") == 0)" << endl;
+ sstr << " m_" << var->cIdent() << "_vec[i] = new " << types[t] << tail.str() << endl;
+ }
+ }
+ else {
+ sstr << " m_" << var->cIdent() << "_vec[i] = new " << var->getType()->cIdent() << tail.str() << endl;
+ }
+
+ sstr << " assert(m_" << var->cIdent() << "_vec[i] != NULL);" << endl;
+ if (var->existPair("ordered")) {
+ string ordered = var->lookupPair("ordered");
+ sstr << " m_" << var->cIdent() << "_vec[i]->setOrdering(" << ordered << ");\n";
+ }
+ if (var->existPair("rank")) {
+ string rank = var->lookupPair("rank");
+ sstr << " m_" << var->cIdent() << "_vec[i]->setPriority(" << rank << ");\n";
+ }
+
+ // Set buffer size
+ if (var->getType()->isBuffer() && !var->existPair("infinite")) {
+ sstr << " if (FINITE_BUFFERING) {\n";
+ sstr << " m_" << var->cIdent() << "_vec[i]->setSize(PROCESSOR_BUFFER_SIZE);\n";
+ sstr << " }\n";
+ }
+
+ sstr << " }\n";
+ }
+ }
+
+ sstr << endl;
+
+ } else {
+ // Network port object
+ string network = var->lookupPair("network");
+ string ordered = var->lookupPair("ordered");
+ string vnet = var->lookupPair("virtual_network");
+
+ if (var->getMachine() != NULL) {
+ sstr << " m_" << var->cIdent() << "_vec.setSize(RubyConfig::numberOf"
+ << var->getMachine()->getIdent() << "PerChip(m_id));" << endl;
+ sstr << " for (int i = 0; i < RubyConfig::numberOf" << var->getMachine()->getIdent()
+ << "PerChip(m_id); i++) {" << endl;
+ sstr << " m_" << var->cIdent() << "_vec[i] = m_net_ptr->get"
+ << network << "NetQueue(i+m_id*RubyConfig::numberOf" <<var->getMachine()->getIdent()
+ << "PerChip()+MachineType_base_number(string_to_MachineType(\""
+ << var->getMachine()->getIdent() << "\")), "
+ << ordered << ", " << vnet << ");\n";
+ sstr << " assert(m_" << var->cIdent() << "_vec[i] != NULL);" << endl;
+ } else { // old protocol
+ sstr << " m_" << var->cIdent() << "_vec.setSize(1);" << endl;
+ sstr << " for (int i = 0; i < 1; i++) {" << endl;
+ sstr << " m_" << var->cIdent() << "_vec[i] = m_net_ptr->get"
+ << network << "NetQueue(m_id, "
+ << ordered << ", " << vnet << ");\n";
+ sstr << " assert(m_" << var->cIdent() << "_vec[i] != NULL);" << endl;
+ }
+
+ // Set ordering
+ if (var->existPair("ordered")) {
+ // A buffer
+ string ordered = var->lookupPair("ordered");
+ sstr << " m_" << var->cIdent() << "_vec[i]->setOrdering(" << ordered << ");\n";
+ }
+
+ // Set randomization
+ if (var->existPair("random")) {
+ // A buffer
+ string value = var->lookupPair("random");
+ sstr << " m_" << var->cIdent() << "_vec[i]->setRandomization(" << value << ");\n";
+ }
+
+ // Set Priority
+ if (var->existPair("rank")) {
+ string rank = var->lookupPair("rank");
+ sstr << " m_" << var->cIdent() << "_vec[i]->setPriority(" << rank << ");\n";
+ }
+
+ // Set buffer size
+ if (var->getType()->isBuffer()) {
+ sstr << " if (FINITE_BUFFERING) {\n";
+ sstr << " m_" << var->cIdent() << "_vec[i]->setSize(PROTOCOL_BUFFER_SIZE);\n";
+ sstr << " }\n";
+ }
+
+ sstr << " }\n";
+ }
+ }
+ }
+ // Look at all 'Machines'
+ for(int i=0; i<size; i++) {
+ StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
+ if (machine != NULL) {
+ string ident = machine->getIdent() + "_Controller";
+ sstr << " m_" << ident << "_vec.setSize(RubyConfig::numberOf" << machine->getIdent()
+ << "PerChip(m_id));" << endl;
+ sstr << " for (int i = 0; i < RubyConfig::numberOf" << machine->getIdent()
+ << "PerChip(m_id); i++) {" << endl;
+ sstr << " m_" << ident << "_vec[i] = new " << ident << "(this, i);\n";
+ sstr << " assert(m_" << ident << "_vec[i] != NULL);" << endl;
+ sstr << " }\n";
+ sstr << endl;
+ }
+ }
+
+ sstr << "}" << endl;
+ sstr << endl;
+ sstr << "Chip::~Chip()\n";
+ sstr << "{\n";
+
+// // FIXME: sequencer shouldn' be manually handled
+// sstr << " delete m_sequencer_ptr;" << endl;
+
+ // Look at all 'Vars'
+ for(int i=0; i<size; i++) {
+ Var* var = dynamic_cast<Var*>(m_sym_vec[i]);
+ if (var != NULL) {
+ if (var->existPair("chip_object")) {
+ if (var->existPair("no_chip_object")) {
+ // Do nothing
+ } else {
+ string template_hack = "";
+ if (var->existPair("template_hack")) {
+ template_hack = var->lookupPair("template_hack");
+ }
+ if (// var->getType()->existPair("cache") || var->getType()->existPair("tbe") ||
+// var->getType()->existPair("newtbe") || var->getType()->existPair("timer") ||
+// var->getType()->existPair("dir") || var->getType()->existPair("persistent") ||
+// var->getType()->existPair("filter") || var->existPair("trigger_queue")
+ var->existPair("no_vector")) {
+ sstr << " delete m_" << var->cIdent() << "_ptr;\n";
+ } else if ((var->getType()->existPair("mover")) && (var->getMachine()->getIdent() == "L2Cache")) {
+ sstr << " if (RubyConfig::isL2CacheDNUCAMoverChip(m_id)) {" << endl;
+ sstr << " delete m_" << var->cIdent() << "_ptr;\n";
+ sstr << " }\n";
+ } else if (var->getType()->existPair("mover") && ((var->getMachine()->getIdent() == "L1Cache") || (var->getMachine()->getIdent() == "Collector"))) {
+ sstr << " m_" << var->cIdent() << "_ptr = NULL;" << endl;
+ } else if (!var->existPair("network")) {
+ // Normal Object
+ sstr << " for (int i = 0; i < RubyConfig::numberOf" << var->getMachine()->getIdent()
+ << "PerChip(m_id); i++) {" << endl;
+ sstr << " delete m_" << var->cIdent() << "_vec[i];\n";
+ sstr << " }\n";
+ }
+ }
+ }
+ }
+ }
+
+ // Look at all 'Machines'
+ for(int i=0; i<size; i++) {
+ StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
+ if (machine != NULL) {
+ string ident = machine->getIdent() + "_Controller";
+ sstr << " for (int i = 0; i < RubyConfig::numberOf" << machine->getIdent()
+ << "PerChip(m_id); i++) {" << endl;
+ sstr << " delete m_" << ident << "_vec[i];\n";
+ sstr << " }\n";
+ }
+ }
+ sstr << "}\n";
+
+ sstr << "\n";
+ sstr << "void Chip::clearStats()\n";
+ sstr << "{\n";
+
+
+ // Look at all 'Machines'
+ for(int i=0; i<size; i++) {
+ StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
+ if (machine != NULL) {
+ string ident = machine->getIdent() + "_Controller";
+ sstr << " " << ident << "::clearStats();\n";
+ }
+ }
+
+ sstr << "}\n";
+
+ sstr << "\n";
+ sstr << "void Chip::printStats(ostream& out)\n";
+ sstr << "{\n";
+ sstr << " out << endl;\n";
+ sstr << " out << \"Chip Stats\" << endl;\n";
+ sstr << " out << \"----------\" << endl << endl;\n";
+
+ // Look at all 'Machines'
+ for(int i=0; i<size; i++) {
+ StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
+ if (machine != NULL) {
+ string ident = machine->getIdent() + "_Controller";
+ sstr << " " << ident << "::dumpStats(out);\n";
+ }
+ }
+
+ sstr << "}" << endl;
+ sstr << endl;
+ sstr << "void Chip::printConfig(ostream& out)\n";
+ sstr << "{\n";
+ sstr << " out << \"Chip Config\" << endl;\n";
+ sstr << " out << \"-----------\" << endl;\n";
+ sstr << " out << \"Total_Chips: \" << RubyConfig::numberOfChips() << endl;\n";
+
+ // Look at all 'Vars'
+ for(int i=0; i<size; i++) {
+ Var* var = dynamic_cast<Var*>(m_sym_vec[i]);
+ if (var != NULL) {
+ if (var->existPair("chip_object")) {
+ if (var->existPair("no_chip_object")) {
+ // Do nothing
+ } else {
+ string template_hack = "";
+ if (var->existPair("template_hack")) {
+ template_hack = var->lookupPair("template_hack");
+ }
+
+ if (!var->existPair("network") && (!var->getType()->existPair("primitive"))) {
+ // Normal Object
+ if (!var->getType()->existPair("non_obj") && (!var->getType()->isEnumeration())) {
+ if (var->existPair("no_vector")) {
+ sstr << " m_" << var->cIdent() << "_ptr->printConfig(out);\n";
+ } else {
+ sstr << " out << \"\\n" << var->cIdent() << " numberPerChip: \" << RubyConfig::numberOf" << var->getMachine()->getIdent()
+ << "PerChip() << endl;\n";
+ sstr << " m_" << var->cIdent() << "_vec[0]->printConfig(out);\n";
+// sstr << " for (int i = 0; i < RubyConfig::numberOf" << var->getMachine()->getIdent()
+// << "PerChip(m_id); i++) {" << endl;
+// sstr << " m_" << var->cIdent() << "_vec[i]->printConfig(out);\n";
+// sstr << " }\n";
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ sstr << " out << endl;\n";
+ sstr << "}" << endl;
+
+ sstr << endl;
+ sstr << "void Chip::print(ostream& out) const\n";
+ sstr << "{\n";
+ sstr << " out << \"Ruby Chip\" << endl;\n";
+ sstr << "}" << endl;
+
+ sstr << "#ifdef CHECK_COHERENCE" << endl;
+ sstr << endl;
+ sstr << "bool Chip::isBlockShared(const Address& addr) const" << endl;
+ sstr << "{" << endl;
+
+ // Look at all 'Machines'
+ for(int i=0; i<size; i++) {
+ StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
+ if (machine != NULL) {
+ string ident = machine->getIdent() + "_Controller";
+ sstr << " for (int i = 0; i < RubyConfig::numberOf" << machine->getIdent()
+ << "PerChip(m_id); i++) {" << endl;
+ sstr << " if (m_" << ident << "_vec[i]->" << machine->getIdent() << "_isBlockShared(addr)) {\n";
+ sstr << " return true; \n";
+ sstr << " }\n";
+ sstr << " }\n";
+ }
+ }
+ sstr << " return false;" << endl;
+ sstr << "}" << endl;
+ sstr << endl;
+
+ sstr << endl;
+ sstr << "bool Chip::isBlockExclusive(const Address& addr) const" << endl;
+ sstr << "{" << endl;
+
+ // Look at all 'Machines'
+ for(int i=0; i<size; i++) {
+ StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
+ if (machine != NULL) {
+ string ident = machine->getIdent() + "_Controller";
+ sstr << " for (int i = 0; i < RubyConfig::numberOf" << machine->getIdent()
+ << "PerChip(m_id); i++) {" << endl;
+ sstr << " if (m_" << ident << "_vec[i]->" << machine->getIdent() << "_isBlockExclusive(addr)) {\n";
+ sstr << " return true; \n";
+ sstr << " }\n";
+ sstr << " }\n";
+ }
+ }
+
+ sstr << " return false;" << endl;
+ sstr << "}" << endl;
+ sstr << endl;
+
+ sstr << "#endif /* CHECK_COHERENCE */ " << endl;
+
+
+ sstr << endl;
+ sstr << "void Chip::dumpCaches(ostream& out) const" << endl;
+ sstr << "{" << endl;
+
+ // Look at all 'Vars'
+ for(int i=0; i<size; i++) {
+ Var* var = dynamic_cast<Var*>(m_sym_vec[i]);
+ if (var != NULL) {
+ if (var->getType()->existPair("cache")){ // caches are partitioned one per controller instaniation
+ sstr << " for (int i = 0; i < RubyConfig::numberOf" << var->getMachine()->getIdent()
+ << "PerChip(m_id); i++) {" << endl;
+ sstr << " m_" << var->cIdent() << "_vec[i]->print(out);\n";
+ sstr << " }\n";
+ }
+ }
+ }
+ sstr << "}" << endl;
+ sstr << endl;
+
+ // Function to dump cache tag and data information
+ sstr << "void Chip::dumpCacheData(ostream& out) const" << endl;
+ sstr << "{" << endl;
+
+ // Look at all 'Vars'
+ for(int i=0; i<size; i++) {
+ Var* var = dynamic_cast<Var*>(m_sym_vec[i]);
+ if (var != NULL) {
+ if (var->getType()->existPair("cache")){ // caches are partitioned one per controller instaniation
+ sstr << " for (int i = 0; i < RubyConfig::numberOf" << var->getMachine()->getIdent()
+ << "PerChip(m_id); i++) {" << endl;
+ sstr << " m_" << var->cIdent() << "_vec[i]->printData(out);\n";
+ sstr << " }\n";
+ }
+ }
+ }
+ sstr << "}" << endl;
+ sstr << endl;
+
+ sstr << "void Chip::recordCacheContents(CacheRecorder& tr) const" << endl;
+ sstr << "{" << endl;
+
+ // Look at all 'Vars'
+ for(int i=0; i<size; i++) {
+ Var* var = dynamic_cast<Var*>(m_sym_vec[i]);
+ if (var != NULL) {
+ if (var->getType()->existPair("cache")){ // caches are partitioned one per controller instaniation
+ sstr << " for (int i = 0; i < RubyConfig::numberOf" << var->getMachine()->getIdent()
+ << "PerChip(m_id); i++) {" << endl;
+ sstr << " m_" << var->cIdent() << "_vec[i]->recordCacheContents(tr);\n";
+ sstr << " }\n";
+ }
+ }
+ }
+ sstr << "}" << endl;
+
+ conditionally_write_file(path + "/Chip.cc", sstr);
+ }
+}
+
+Vector<StateMachine*> SymbolTable::getStateMachines() const
+{
+ Vector<StateMachine*> machine_vec;
+ int size = m_sym_vec.size();
+ for(int i=0; i<size; i++) {
+ StateMachine* type = dynamic_cast<StateMachine*>(m_sym_vec[i]);
+ if (type != NULL) {
+ machine_vec.insertAtBottom(type);
+ }
+ }
+ return machine_vec;
+}
+
+void SymbolTable::writeHTMLFiles(string path) const
+{
+ // Create index.html
+ {
+ ostringstream out;
+ createHTMLindex(path, out);
+ conditionally_write_file(path + "index.html", out);
+ }
+
+ // Create empty.html
+ {
+ ostringstream out;
+ out << "<HTML></HTML>";
+ conditionally_write_file(path + "empty.html", out);
+ }
+
+ // Write all the symbols
+ int size = m_sym_vec.size();
+ for(int i=0; i<size; i++) {
+ m_sym_vec[i]->writeHTMLFiles(path);
+ }
+}
+
+void write_file(string filename, ostringstream& sstr)
+{
+ ofstream out;
+
+ out.open(filename.c_str());
+ out << sstr.str();
+ out.close();
+}
+
+void SymbolTable::writeMIFFiles(string path) const
+{
+ int size = m_sym_vec.size();
+ for(int i=0; i<size; i++) {
+ ostringstream states, events, actions, transitions;
+ StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
+ if (machine != NULL) {
+ printStateTableMIF(*machine, states);
+ write_file(path + machine->getIdent() + "_states.mif", states);
+ printEventTableMIF(*machine, events);
+ write_file(path + machine->getIdent() + "_events.mif", events);
+ printActionTableMIF(*machine, actions);
+ write_file(path + machine->getIdent() + "_actions.mif", actions);
+ printTransitionTableMIF(*machine, transitions);
+ write_file(path + machine->getIdent() + "_transitions.mif", transitions);
+ }
+ }
+}
+
+
+void SymbolTable::print(ostream& out) const
+{
+ out << "[SymbolTable]"; // FIXME
+}
diff --git a/src/mem/slicc/symbols/SymbolTable.hh b/src/mem/slicc/symbols/SymbolTable.hh
new file mode 100644
index 000000000..8f40fe8c5
--- /dev/null
+++ b/src/mem/slicc/symbols/SymbolTable.hh
@@ -0,0 +1,121 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * SymbolTable.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ * */
+
+#ifndef SYMBOLTABLE_H
+#define SYMBOLTABLE_H
+
+#include "slicc_global.hh"
+#include "Map.hh"
+#include "Vector.hh"
+
+#include "Symbol.hh"
+#include "Type.hh"
+#include "Var.hh"
+#include "Func.hh"
+#include "StateMachine.hh"
+
+class SymbolTable;
+
+extern SymbolTable g_sym_table;
+
+class SymbolTable {
+public:
+ // Constructors
+ SymbolTable();
+
+ // Destructor
+ ~SymbolTable();
+
+ // Public Methods
+ void newSym(Symbol* sym_ptr);
+ void registerSym(string id, Symbol* sym_ptr);
+ Symbol* getSym(string id) const;
+
+ // used to cheat-- that is, access components in other machines
+ void newMachComponentSym(Symbol* sym_ptr);
+ Var* getMachComponentVar(string mach, string ident);
+
+ void newCurrentMachine(StateMachine* machine_ptr);
+ StateMachine* getStateMachine(string ident) const;
+ StateMachine* getStateMachine() const { return getStateMachine("current_machine"); }
+ Type* getType(string ident) const;
+
+ Var* getVar(string ident) const;
+ Func* getFunc(string ident) const;
+
+ void pushFrame();
+ void popFrame();
+
+ Vector<StateMachine*> getStateMachines() const;
+
+ void writeCFiles(string path) const;
+ void writeHTMLFiles(string path) const;
+ void writeMIFFiles(string path) const;
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+ void registerGlobalSym(string id, Symbol* sym_ptr);
+ void writeChipFiles(string path) const;
+
+ // Private copy constructor and assignment operator
+ SymbolTable(const SymbolTable& obj);
+ SymbolTable& operator=(const SymbolTable& obj);
+
+ // Data Members (m_ prefix)
+ Vector<Symbol*> m_sym_vec;
+ Vector<Map<string, Symbol*> > m_sym_map_vec;
+ Map<string, Map<string, Symbol*> > m_machine_component_map_vec;
+ int m_depth;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const SymbolTable& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const SymbolTable& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //SYMBOLTABLE_H
diff --git a/src/mem/slicc/symbols/Transition.cc b/src/mem/slicc/symbols/Transition.cc
new file mode 100644
index 000000000..7c144c101
--- /dev/null
+++ b/src/mem/slicc/symbols/Transition.cc
@@ -0,0 +1,173 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * */
+
+#include "Transition.hh"
+#include "State.hh"
+#include "Event.hh"
+#include "Action.hh"
+#include "util.hh"
+#include "Var.hh"
+
+Transition::Transition(string state, string event, string nextState,
+ const Vector<string>& actionList,
+ const Location& location,
+ const Map<string, string>& pairMap)
+ : Symbol(state + "|" + event, location, pairMap)
+{
+ m_state = state;
+ m_event = event;
+ m_nextState = nextState;
+ m_actionList = actionList;
+
+ // Ptrs are undefined at this point
+ m_statePtr = NULL;
+ m_eventPtr = NULL;
+ m_nextStatePtr = NULL;
+ m_actionPtrsValid = false;
+}
+
+void Transition::checkIdents(const Vector<State*>& states,
+ const Vector<Event*>& events,
+ const Vector<Action*>& actions)
+{
+ m_statePtr = findIndex(states, m_state);
+ m_eventPtr = findIndex(events, m_event);
+ m_nextStatePtr = findIndex(states, m_nextState);
+
+ for(int i=0; i < m_actionList.size(); i++) {
+ Action* action_ptr = findIndex(actions, m_actionList[i]);
+ int size = action_ptr->getResources().keys().size();
+ for (int j=0; j < size; j++) {
+ Var* var_ptr = action_ptr->getResources().keys()[j];
+ if (var_ptr->getType()->cIdent() != "DNUCAStopTable") {
+ int num = atoi((action_ptr->getResources().lookup(var_ptr)).c_str());
+ if (m_resources.exist(var_ptr)) {
+ num += atoi((m_resources.lookup(var_ptr)).c_str());
+ }
+ m_resources.add(var_ptr, int_to_string(num));
+ } else {
+ m_resources.add(var_ptr, action_ptr->getResources().lookup(var_ptr));
+ }
+ }
+ m_actionPtrs.insertAtBottom(action_ptr);
+ }
+ m_actionPtrsValid = true;
+}
+
+const string& Transition::getStateShorthand() const
+{
+ assert(m_statePtr != NULL);
+ return m_statePtr->getShorthand();
+}
+
+const string& Transition::getEventShorthand() const
+{
+ assert(m_eventPtr != NULL);
+ return m_eventPtr->getShorthand();
+}
+
+const string& Transition::getNextStateShorthand() const
+{
+ assert(m_nextStatePtr != NULL);
+ return m_nextStatePtr->getShorthand();
+}
+
+string Transition::getActionShorthands() const
+{
+ assert(m_actionPtrsValid);
+ string str;
+ int numActions = m_actionPtrs.size();
+ for (int i=0; i<numActions; i++) {
+ str += m_actionPtrs[i]->getShorthand();
+ }
+ return str;
+}
+
+void Transition::print(ostream& out) const
+{
+ out << "[Transition: ";
+ out << "(" << m_state;
+ if (m_statePtr != NULL) {
+ out << ":" << *m_statePtr;
+ }
+ out << ", " << m_event;
+ if (m_eventPtr != NULL) {
+ out << ":" << *m_eventPtr;
+ }
+ out << ") -> ";
+ out << m_nextState;
+ if (m_nextStatePtr != NULL) {
+ out << ":" << *m_nextStatePtr;
+ }
+ out << ", ";
+ out << m_actionList;
+ out << "]";
+}
+
+Event* Transition::findIndex(const Vector<Event*>& vec, string ident)
+{
+ int size = vec.size();
+ for(int i=0; i<size; i++) {
+ if (ident == vec[i]->getIdent()) {
+ return vec[i];
+ }
+ }
+ error("Event not found: " + ident);
+ return NULL;
+}
+
+State* Transition::findIndex(const Vector<State*>& vec, string ident)
+{
+ int size = vec.size();
+ for(int i=0; i<size; i++) {
+ if (ident == vec[i]->getIdent()) {
+ return vec[i];
+ }
+ }
+ error("State not found: " + ident);
+ return NULL;
+}
+
+Action* Transition::findIndex(const Vector<Action*>& vec, string ident)
+{
+ int size = vec.size();
+ for(int i=0; i<size; i++) {
+ if (ident == vec[i]->getIdent()) {
+ return vec[i];
+ }
+ }
+ error("Action not found: " + ident);
+ return NULL;
+}
+
diff --git a/src/mem/slicc/symbols/Transition.hh b/src/mem/slicc/symbols/Transition.hh
new file mode 100644
index 000000000..af8eb3a05
--- /dev/null
+++ b/src/mem/slicc/symbols/Transition.hh
@@ -0,0 +1,120 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Transition.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ * */
+
+#ifndef TRANSITION_H
+#define TRANSITION_H
+
+#include "slicc_global.hh"
+#include "Vector.hh"
+#include "Symbol.hh"
+
+class State;
+class Event;
+class Action;
+class Var;
+
+class Transition : public Symbol {
+public:
+ // Constructors
+ Transition(string state, string event, string nextState,
+ const Vector<string>& actionList,
+ const Location& location,
+ const Map<string, string>& pairMap);
+ // Destructor
+ ~Transition() { }
+
+ // Public Methods
+ State* getStatePtr() const { assert(m_statePtr != NULL); return m_statePtr; }
+ Event* getEventPtr() const { assert(m_eventPtr != NULL); return m_eventPtr; }
+ State* getNextStatePtr() const { assert(m_nextStatePtr != NULL); return m_nextStatePtr; }
+
+ // int getStateIndex() const { assert(m_statePtr != NULL); return m_statePtr->getIndex(); }
+ // int getEventIndex() const { assert(m_eventPtr != NULL); return m_eventPtr->getIndex(); }
+ // int getNextStateIndex() const { assert(m_nextStatePtr != NULL); return m_nextStatePtr->getIndex(); }
+ void checkIdents(const Vector<State*>& states,
+ const Vector<Event*>& events,
+ const Vector<Action*>& actions);
+
+ const string& getStateShorthand() const;
+ const string& getEventShorthand() const;
+ const string& getNextStateShorthand() const;
+ string getActionShorthands() const;
+ const Vector<Action*>& getActions() const { assert(m_actionPtrsValid); return m_actionPtrs; }
+ const Map<Var*, string>& getResources() const { assert(m_actionPtrsValid); return m_resources; }
+
+ void print(ostream& out) const;
+
+ // Default copy constructor and assignment operator
+ // Transition(const Transition& obj);
+ // Transition& operator=(const Transition& obj);
+private:
+ // Private Methods
+ Event* findIndex(const Vector<Event*>& vec, string ident);
+ State* findIndex(const Vector<State*>& vec, string ident);
+ Action* findIndex(const Vector<Action*>& vec, string ident);
+
+ // Data Members (m_ prefix)
+ string m_state;
+ string m_event;
+ string m_nextState;
+
+ State* m_statePtr;
+ Event* m_eventPtr;
+ State* m_nextStatePtr;
+
+ Vector<string> m_actionList;
+ Vector<Action*> m_actionPtrs;
+ Map<Var*, string> m_resources;
+ bool m_actionPtrsValid;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Transition& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Transition& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //TRANSITION_H
diff --git a/src/mem/slicc/symbols/Type.cc b/src/mem/slicc/symbols/Type.cc
new file mode 100644
index 000000000..a49e9d7ba
--- /dev/null
+++ b/src/mem/slicc/symbols/Type.cc
@@ -0,0 +1,746 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "Type.hh"
+#include "fileio.hh"
+#include "Map.hh"
+#include "StateMachine.hh"
+
+Type::Type(string id, const Location& location,
+ const Map<string, string>& pairs,
+ StateMachine* machine_ptr)
+ : Symbol(id, location, pairs)
+{
+ if (machine_ptr == NULL) {
+ m_c_id = id;
+ } else if (isExternal() || isPrimitive()) {
+ if (existPair("external_name")) {
+ m_c_id = lookupPair("external_name");
+ } else {
+ m_c_id = id;
+ }
+ } else {
+ m_c_id = machine_ptr->toString() + "_" + id; // Append with machine name
+ }
+
+ if(existPair("desc")){
+ m_desc = lookupPair("desc");
+ } else {
+ m_desc = "No description avaliable";
+ }
+
+ // check for interface that this Type implements
+ if(existPair("interface")) {
+ string interface = lookupPair("interface");
+ if(interface == "Message" || interface == "NetworkMessage") {
+ addPair("message", "yes");
+ }
+ if(interface == "NetworkMessage") {
+ addPair("networkmessage", "yes");
+ }
+ }
+
+ // FIXME - all of the following id comparisons are fragile hacks
+ if ((getIdent() == "CacheMemory") || (getIdent() == "NewCacheMemory") ||
+ (getIdent() == "TLCCacheMemory") || (getIdent() == "DNUCACacheMemory") ||
+ (getIdent() == "DNUCABankCacheMemory") || (getIdent() == "L2BankCacheMemory") ||
+ (getIdent() == "CompressedCacheMemory") || (getIdent() == "PrefetchCacheMemory")) {
+ addPair("cache", "yes");
+ }
+
+ if ((getIdent() == "TBETable") || (getIdent() == "DNUCATBETable") || (getIdent() == "DNUCAStopTable")) {
+ addPair("tbe", "yes");
+ }
+
+ if ((getIdent() == "NewTBETable")) {
+ addPair("newtbe", "yes");
+ }
+
+ if ((getIdent() == "TimerTable")) {
+ addPair("timer", "yes");
+ }
+
+ if ((getIdent() == "DirectoryMemory")) {
+ addPair("dir", "yes");
+ }
+
+ if ((getIdent() == "PersistentTable")) {
+ addPair("persistent", "yes");
+ }
+
+ if ((getIdent() == "Prefetcher")) {
+ addPair("prefetcher", "yes");
+ }
+
+ if ((getIdent() == "DNUCA_Movement")) {
+ addPair("mover", "yes");
+ }
+
+ if (id == "MachineType") {
+ m_isMachineType = true;
+ } else {
+ m_isMachineType = false;
+ }
+}
+
+// Return false on error
+bool Type::dataMemberAdd(string id, Type* type_ptr, Map<string, string>& pairs,
+ string* init_code)
+{
+ if (dataMemberExist(id)) {
+ return false; // Error
+ } else {
+ m_data_member_map.add(id, type_ptr);
+ m_data_member_ident_vec.insertAtBottom(id);
+ m_data_member_type_vec.insertAtBottom(type_ptr);
+ m_data_member_pairs_vec.insertAtBottom(pairs);
+ m_data_member_init_code_vec.insertAtBottom(init_code);
+ }
+
+ return true;
+}
+
+string Type::methodId(string name,
+ const Vector<Type*>& param_type_vec)
+{
+ string paramStr = "";
+ for (int i = 0; i < param_type_vec.size(); i++) {
+ paramStr += "_"+param_type_vec[i]->cIdent();
+ }
+ return name+paramStr;
+}
+
+bool Type::methodAdd(string name,
+ Type* return_type_ptr,
+ const Vector<Type*>& param_type_vec)
+{
+ string id = methodId(name, param_type_vec);
+ if (methodExist(id)) {
+ return false; // Error
+ } else {
+ m_method_return_type_map.add(id, return_type_ptr);
+ m_method_param_type_map.add(id, param_type_vec);
+ return true;
+ }
+}
+
+bool Type::enumAdd(string id, Map<string, string> pairs_map)
+{
+ if (enumExist(id)) {
+ return false;
+ } else {
+ m_enum_map.add(id, true);
+ m_enum_vec.insertAtBottom(id);
+ m_enum_pairs.insertAtBottom(pairs_map);
+
+ // Add default
+ if (!existPair("default")) {
+ addPair("default", cIdent()+"_NUM");
+ }
+
+ return true;
+ }
+}
+
+void Type::writeCFiles(string path) const
+{
+ if (isExternal()) {
+ // Do nothing
+ } else if (isEnumeration()) {
+ printEnumH(path);
+ printEnumC(path);
+ } else { // User defined structs and messages
+ printTypeH(path);
+ printTypeC(path);
+ }
+}
+
+void Type::printTypeH(string path) const
+{
+ ostringstream out;
+ int size = m_data_member_type_vec.size();
+ string type_name = cIdent(); // Identifier for the type in C
+
+ // Header
+ out << "/** \\file " << type_name << ".hh" << endl;
+ out << " * " << endl;
+ out << " * Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< endl;
+ out << " */" << endl;
+ out << endl;
+ out << "#ifndef " << type_name << "_H" << endl;
+ out << "#define " << type_name << "_H" << endl;
+ out << endl;
+
+ // Include all of the #includes needed
+ out << "#include \"Global.hh\"" << endl;
+ out << "#include \"Allocator.hh\"" << endl;
+ for (int i=0; i < size; i++) {
+ Type* type = m_data_member_type_vec[i];
+ if (!type->isPrimitive()) {
+ out << "#include \"" << type->cIdent() << ".hh" << "\"" << endl;
+ }
+ }
+ string interface = "";
+ if(existPair("interface")) {
+ interface = lookupPair("interface");
+ out << "#include \"" << interface << ".hh\"" << endl;
+ }
+
+ // Class definition
+ out << "class " << type_name;
+
+ if(interface != "") {
+ out << " : public " << interface ;
+ }
+
+ out << " {" << endl;
+ out << "public:" << endl;
+
+ // ******** Default constructor ********
+
+ out << " " << type_name << "() " << endl;
+
+ // Call superclass constructor
+ if (interface != "") {
+ out << " : " << interface << "()" << endl;
+ }
+
+ out << " {" << endl;
+
+ if(!isGlobal()) {
+ for (int i=0; i < size; i++) {
+
+ Type* type_ptr = m_data_member_type_vec[i];
+ string id = m_data_member_ident_vec[i];
+ if (m_data_member_pairs_vec[i].exist("default")) {
+ // look for default value
+ string default_value = m_data_member_pairs_vec[i].lookup("default");
+ out << " m_" << id << " = " << default_value << "; // default for this field " << endl;
+ } else if (type_ptr->hasDefault()) {
+ // Look for the type default
+ string default_value = type_ptr->getDefault();
+ out << " m_" << id << " = " << default_value << "; // default value of " << type_ptr->cIdent() << endl;
+ } else {
+ out << " // m_" << id << " has no default" << endl;
+ }
+ }
+ } // end of if(!isGlobal())
+ out << " }" << endl;
+
+ // ******** Default destructor ********
+ out << " ";
+ out << "~" << type_name << "() { };" << endl;
+
+ // ******** Full init constructor ********
+ if(! isGlobal()) {
+ out << " " << type_name << "(";
+
+ for (int i=0; i < size; i++) {
+ if (i != 0) {
+ out << ", ";
+ }
+ Type* type = m_data_member_type_vec[i];
+ string id = m_data_member_ident_vec[i];
+ out << "const " << type->cIdent() << "& local_" << id;
+ }
+ out << ")" << endl;
+
+ // Call superclass constructor
+ if (interface != "") {
+ out << " : " << interface << "()" << endl;
+ }
+
+ out << " {" << endl;
+ for (int i=0; i < size; i++) {
+ Type* type_ptr = m_data_member_type_vec[i];
+ string id = m_data_member_ident_vec[i];
+ out << " m_" << id << " = local_" << id << ";" << endl;
+ if (m_data_member_pairs_vec[i].exist("nextLineCallHack")) {
+ string next_line_value = m_data_member_pairs_vec[i].lookup("nextLineCallHack");
+ out << " m_" << id << next_line_value << ";" << endl;
+ }
+
+ }
+ out << " }" << endl;
+ } // end of if(!isGlobal())
+
+ // bobba -
+ //******** Partial init constructor ********
+ //** Constructor needs only the first n-1 data members for init
+ //** HACK to create objects with partially specified data members
+ //** Need to get rid of this and use hierarchy instead
+// if(! isGlobal()) {
+// out << " " << type_name << "(";
+
+// for (int i=0; i < size-1; i++) {
+// if (i != 0) {
+// out << ", ";
+// }
+// Type* type = m_data_member_type_vec[i];
+// string id = m_data_member_ident_vec[i];
+// out << "const " << type->cIdent() << "& local_" << id;
+// }
+// out << ")" << endl;
+
+// // Call superclass constructor
+// if (interface != "") {
+// out << " : " << interface << "()" << endl;
+// }
+
+// out << " {" << endl;
+// for (int i=0; i < size-1; i++) {
+// Type* type_ptr = m_data_member_type_vec[i];
+// string id = m_data_member_ident_vec[i];
+// out << " m_" << id << " = local_" << id << ";" << endl;
+// if (m_data_member_pairs_vec[i].exist("nextLineCallHack")) {
+// string next_line_value = m_data_member_pairs_vec[i].lookup("nextLineCallHack");
+// out << " m_" << id << next_line_value << ";" << endl;
+// }
+
+// }
+// out << " }" << endl;
+// } // end of if(!isGlobal())
+
+ // ******** Message member functions ********
+ // FIXME: those should be moved into slicc file, slicc should support more of
+ // the c++ class inheritance
+
+ if (isMessage()) {
+ out << " Message* clone() const { checkAllocator(); return s_allocator_ptr->allocate(*this); }" << endl;
+ out << " void destroy() { checkAllocator(); s_allocator_ptr->deallocate(this); }" << endl;
+ out << " static Allocator<" << type_name << ">* s_allocator_ptr;" << endl;
+ out << " static void checkAllocator() { if (s_allocator_ptr == NULL) { s_allocator_ptr = new Allocator<" << type_name << ">; }}" << endl;
+ }
+
+ if(!isGlobal()) {
+ // const Get methods for each field
+ out << " // Const accessors methods for each field" << endl;
+ for (int i=0; i < size; i++) {
+ Type* type_ptr = m_data_member_type_vec[i];
+ string type = type_ptr->cIdent();
+ string id = m_data_member_ident_vec[i];
+ out << "/** \\brief Const accessor method for " << id << " field." << endl;
+ out << " * \\return " << id << " field" << endl;
+ out << " */" << endl;
+ out << " const " << type << "& get" << id
+ << "() const { return m_" << id << "; }" << endl;
+ }
+
+ out << endl;
+
+ // Non-const Get methods for each field
+ out << " // Non const Accessors methods for each field" << endl;
+ for (int i=0; i < size; i++) {
+ Type* type_ptr = m_data_member_type_vec[i];
+ string type = type_ptr->cIdent();
+ string id = m_data_member_ident_vec[i];
+ out << "/** \\brief Non-const accessor method for " << id << " field." << endl;
+ out << " * \\return " << id << " field" << endl;
+ out << " */" << endl;
+ out << " " << type << "& get" << id
+ << "() { return m_" << id << "; }" << endl;
+ }
+
+ out << endl;
+
+ // Set methods for each field
+ out << " // Mutator methods for each field" << endl;
+ for (int i=0; i < size; i++) {
+ Type* type_ptr = m_data_member_type_vec[i];
+ string type = type_ptr->cIdent();
+ string id = m_data_member_ident_vec[i];
+ out << "/** \\brief Mutator method for " << id << " field */" << endl;
+ out << " void set" << id << "(const " << type << "& local_"
+ << id << ") { m_" << id << " = local_" << id << "; }" << endl;
+ }
+
+ out << endl;
+ } // end of if(!isGlobal())
+
+ out << " void print(ostream& out) const;" << endl;
+ out << "//private:" << endl;
+
+ // Data members for each field
+ for (int i=0; i < size; i++) {
+ if (!m_data_member_pairs_vec[i].exist("abstract")) {
+ out << " ";
+ // global structure
+ if(isGlobal()) out << "static const ";
+
+ Type* type = m_data_member_type_vec[i];
+ string id = m_data_member_ident_vec[i];
+ out << type->cIdent() << " m_" << id;
+
+ // init value
+ string* init_code = m_data_member_init_code_vec[i];
+ if(init_code) {
+ // only global structure can have init value here
+ assert(isGlobal());
+ out << " = " << *init_code << " ";
+ }
+ out << ";";
+ if (m_data_member_pairs_vec[i].exist("desc")) {
+ string desc = m_data_member_pairs_vec[i].lookup("desc");
+ out << " /**< " << desc << "*/";
+ }
+ out << endl;
+ }
+ }
+
+ out << "};" << endl; // End class
+
+ out << "// Output operator declaration" << endl;
+ out << "ostream& operator<<(ostream& out, const " << type_name << "& obj);" << endl;
+ out << endl;
+ out << "// Output operator definition" << endl;
+ out << "extern inline" << endl;
+ out << "ostream& operator<<(ostream& out, const " << type_name << "& obj)" << endl;
+ out << "{" << endl;
+ out << " obj.print(out);" << endl;
+ out << " out << flush;" << endl;
+ out << " return out;" << endl;
+ out << "}" << endl;
+ out << endl;
+ out << "#endif // " << type_name << "_H" << endl;
+
+ // Write it out
+ conditionally_write_file(path + type_name + ".hh", out);
+}
+
+void Type::printTypeC(string path) const
+{
+ ostringstream out;
+ int size = m_data_member_type_vec.size();
+ string type_name = cIdent(); // Identifier for the type in C
+
+ // Header
+ out << "/** \\file " << type_name << ".cc" << endl;
+ out << " * " << endl;
+ out << " * Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< endl;
+ out << " */" << endl;
+ out << endl;
+ out << "#include \"" << type_name << ".hh\"" << endl;
+ out << endl;
+ if (isMessage()) {
+ out << "Allocator<" << type_name << ">* " << type_name << "::s_allocator_ptr = NULL;" << endl;
+ }
+ out << "/** \\brief Print the state of this object */" << endl;
+ out << "void " << type_name << "::print(ostream& out) const" << endl;
+ out << "{" << endl;
+ out << " out << \"[" << type_name << ": \";" << endl;
+
+ // For each field
+ for (int i=0; i < size; i++) {
+ string id = m_data_member_ident_vec[i];
+ out << " out << \"" << id << "=\" << m_" << id << " << \" \";" << endl;
+ }
+
+ if (isMessage()) {
+ out << " out << \"" << "Time" << "=\" << getTime()" << " << \" \";" << endl;
+ }
+
+ // Trailer
+ out << " out << \"]\";" << endl;
+ out << "}" << endl;
+
+ // Write it out
+ conditionally_write_file(path + type_name + ".cc", out);
+}
+
+void Type::printEnumH(string path) const
+{
+ ostringstream out;
+ int size = m_enum_vec.size();
+ string type_name = cIdent(); // Identifier for the type in C
+ string type_desc = desc();
+
+ // Header
+ out << "/** \\file " << type_name << ".hh" << endl;
+ out << " * " << endl;
+ out << " * Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< endl;
+ out << " */" << endl;
+
+ out << "#ifndef " << type_name << "_H" << endl;
+ out << "#define " << type_name << "_H" << endl;
+ out << endl;
+ // Include all of the #includes needed
+ out << "#include \"Global.hh\"" << endl;
+ if (m_isMachineType) {
+ out << "#include \"RubyConfig.hh\"" << endl << endl;
+ }
+ out << endl;
+
+ // Class definition
+ out << "/** \\enum " << type_name << endl;
+ out << " * \\brief " << type_desc << endl;
+ out << " */" << endl;
+ out << "enum " << type_name << " {" << endl;
+
+ out << " " << type_name << "_FIRST," << endl;
+
+ // For each field
+ for(int i = 0; i < size; i++ ) {
+ string id = m_enum_vec[i];
+ string description;
+ if(m_enum_pairs[i].exist("desc")){
+ description = m_enum_pairs[i].lookup("desc");
+ } else {
+ description = "No description avaliable";
+ }
+ if (i == 0) {
+ out << " " << type_name << "_" << id << " = " << type_name << "_FIRST, /**< " << description << " */" << endl;
+ }
+ else {
+ out << " " << type_name << "_" << id << ", /**< " << description << " */" << endl;
+ }
+ }
+ out << " " << type_name << "_NUM" << endl;
+ out << "};" << endl;
+
+ // Code to convert from a string to the enumeration
+ out << type_name << " string_to_" << type_name << "(const string& str);" << endl;
+
+ // Code to convert state to a string
+ out << "string " << type_name << "_to_string(const " << type_name << "& obj);" << endl;
+
+ // Code to increment an enumeration type
+ out << type_name << " &operator++( " << type_name << " &e);" << endl;
+
+ // MachineType hack used to set the base component id for each Machine
+ if (m_isMachineType) {
+ out << "int " << type_name << "_base_level(const " << type_name << "& obj);" << endl;
+ out << "int " << type_name << "_base_number(const " << type_name << "& obj);" << endl;
+ out << "int " << type_name << "_base_count(const " << type_name << "& obj);" << endl;
+ out << "int " << type_name << "_chip_count(const " << type_name << "& obj, NodeID chipID);" << endl;
+
+ for(int i = 0; i < size; i++ ) {
+ string id = m_enum_vec[i];
+ out << "#define MACHINETYPE_" << id << " 1" << endl;
+ }
+ cout << endl;
+ }
+
+ // Trailer
+ out << "ostream& operator<<(ostream& out, const " << type_name << "& obj);" << endl;
+ out << endl;
+ out << "#endif // " << type_name << "_H" << endl;
+
+ // Write the file
+ conditionally_write_file(path + type_name + ".hh", out);
+}
+
+void Type::printEnumC(string path) const
+{
+ ostringstream out;
+ int size = m_enum_vec.size();
+ string type_name = cIdent(); // Identifier for the type in C
+
+ // Header
+ out << "/** \\file " << type_name << ".hh" << endl;
+ out << " * " << endl;
+ out << " * Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< endl;
+ out << " */" << endl;
+
+ out << endl;
+ out << "#include \"" << type_name << ".hh\"" << endl;
+ out << endl;
+
+ // Code for output operator
+ out << "ostream& operator<<(ostream& out, const " << type_name << "& obj)" << endl;
+ out << "{" << endl;
+ out << " out << " << type_name << "_to_string(obj);" << endl;
+ out << " out << flush;" << endl;
+ out << " return out;" << endl;
+ out << "}" << endl;
+
+ // Code to convert state to a string
+ out << endl;
+ out << "string " << type_name << "_to_string(const " << type_name << "& obj)" << endl;
+ out << "{" << endl;
+ out << " switch(obj) {" << endl;
+
+ // For each field
+ for( int i = 0; i<size; i++ ) {
+ out << " case " << type_name << "_" << m_enum_vec[i] << ":" << endl;
+ out << " return \"" << m_enum_vec[i] << "\";" << endl;
+ }
+
+ // Trailer
+ out << " default:" << endl;
+ out << " ERROR_MSG(\"Invalid range for type " << type_name << "\");" << endl;
+ out << " return \"\";" << endl;
+ out << " }" << endl;
+ out << "}" << endl;
+
+ // Code to convert from a string to the enumeration
+ out << endl;
+ out << type_name << " string_to_" << type_name << "(const string& str)" << endl;
+ out << "{" << endl;
+ out << " if (false) {" << endl;
+
+ // For each field
+ for( int i = 0; i<size; i++ ) {
+ out << " } else if (str == \"" << m_enum_vec[i] << "\") {" << endl;
+ out << " return " << type_name << "_" << m_enum_vec[i] << ";" << endl;
+ }
+
+ out << " } else {" << endl;
+ out << " WARN_EXPR(str);" << endl;
+ out << " ERROR_MSG(\"Invalid string conversion for type " << type_name << "\");" << endl;
+ out << " }" << endl;
+ out << "}" << endl;
+
+ // Code to increment an enumeration type
+ out << endl;
+ out << type_name << "& operator++( " << type_name << "& e) {" << endl;
+ out << " assert(e < " << type_name << "_NUM);" << endl;
+ out << " return e = " << type_name << "(e+1);" << endl;
+ out << "}" << endl;
+
+ // MachineType hack used to set the base level and number of components for each Machine
+ if (m_isMachineType) {
+ out << endl;
+ out << "/** \\brief returns the base vector index for each machine type to be used by NetDest " << endl;
+ out << " * " << endl;
+ out << " * \\return the base vector index for each machine type to be used by NetDest" << endl;
+ out << " * \\see NetDest.hh" << endl;
+ out << " */" << endl;
+ out << "int " << type_name << "_base_level(const " << type_name << "& obj)" << endl;
+ out << "{" << endl;
+ out << " switch(obj) {" << endl;
+
+ // For each field
+ Vector < string > MachineNames;
+ for( int i = 0; i<size; i++ ) {
+ out << " case " << type_name << "_" << m_enum_vec[i] << ":" << endl;
+ out << " return " << MachineNames.size() << ";" << endl;
+ MachineNames.insertAtBottom(m_enum_vec[i]);
+ }
+
+ // total num
+ out << " case " << type_name << "_NUM:" << endl;
+ out << " return " << MachineNames.size() << ";" << endl;
+
+ // Trailer
+ out << " default:" << endl;
+ out << " ERROR_MSG(\"Invalid range for type " << type_name << "\");" << endl;
+ out << " return -1;" << endl;
+ out << " }" << endl;
+ out << "}" << endl;
+
+ out << endl;
+ out << "/** \\brief The return value indicates the number of components created" << endl;
+ out << " * before a particular machine's components" << endl;
+ out << " * " << endl;
+ out << " * \\return the base number of components for each machine" << endl;
+ out << " */" << endl;
+ out << "int " << type_name << "_base_number(const " << type_name << "& obj)" << endl;
+ out << "{" << endl;
+ out << " switch(obj) {" << endl;
+
+ // For each field
+ MachineNames.clear();
+ for( int i = 0; i<size; i++ ) {
+ out << " case " << type_name << "_" << m_enum_vec[i] << ":" << endl;
+ out << " return 0";
+ for ( int m = 0; m<MachineNames.size(); m++) {
+ out << "+RubyConfig::numberOf" << MachineNames[m] << "()";
+ }
+ out << ";" << endl;
+ MachineNames.insertAtBottom(m_enum_vec[i]);
+ }
+
+ // total num
+ out << " case " << type_name << "_NUM:" << endl;
+ out << " return 0";
+ for ( int m = 0; m<MachineNames.size(); m++) {
+ out << "+RubyConfig::numberOf" << MachineNames[m] << "()";
+ }
+ out << ";" << endl;
+
+ // Trailer
+ out << " default:" << endl;
+ out << " ERROR_MSG(\"Invalid range for type " << type_name << "\");" << endl;
+ out << " return -1;" << endl;
+ out << " }" << endl;
+ out << "}" << endl;
+
+
+ out << endl;
+ out << "/** \\brief returns the total number of components for each machine" << endl;
+ out << " * \\return the total number of components for each machine" << endl;
+ out << " */" << endl;
+ out << "int " << type_name << "_base_count(const " << type_name << "& obj)" << endl;
+ out << "{" << endl;
+ out << " switch(obj) {" << endl;
+
+ // For each field
+ for( int i = 0; i<size; i++ ) {
+ out << " case " << type_name << "_" << m_enum_vec[i] << ":" << endl;
+ out << " return RubyConfig::numberOf" << m_enum_vec[i] << "();" << endl;
+ }
+
+ // total num
+ out << " case " << type_name << "_NUM:" << endl;
+ // Trailer
+ out << " default:" << endl;
+ out << " ERROR_MSG(\"Invalid range for type " << type_name << "\");" << endl;
+ out << " return -1;" << endl;
+ out << " }" << endl;
+ out << "}" << endl;
+
+ out << endl;
+ out << "/** \\brief returns the total number of components for each machine" << endl;
+ out << " * \\return the total number of components for each machine" << endl;
+ out << " */" << endl;
+ out << "int " << type_name << "_chip_count(const " << type_name << "& obj, NodeID chipID)" << endl;
+ out << "{" << endl;
+ out << " switch(obj) {" << endl;
+
+ // For each field
+ for( int i = 0; i<size; i++ ) {
+ out << " case " << type_name << "_" << m_enum_vec[i] << ":" << endl;
+ out << " return RubyConfig::numberOf" << m_enum_vec[i] << "PerChip(chipID);" << endl;
+ }
+
+ // total num
+ out << " case " << type_name << "_NUM:" << endl;
+ // Trailer
+ out << " default:" << endl;
+ out << " ERROR_MSG(\"Invalid range for type " << type_name << "\");" << endl;
+ out << " return -1;" << endl;
+ out << " }" << endl;
+ out << "}" << endl;
+
+
+ }
+
+ // Write the file
+ conditionally_write_file(path + type_name + ".cc", out);
+}
diff --git a/src/mem/slicc/symbols/Type.hh b/src/mem/slicc/symbols/Type.hh
new file mode 100644
index 000000000..c6f891326
--- /dev/null
+++ b/src/mem/slicc/symbols/Type.hh
@@ -0,0 +1,154 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Type.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ * */
+
+#ifndef TYPE_H
+#define TYPE_H
+
+#include "slicc_global.hh"
+#include "Map.hh"
+#include "Symbol.hh"
+
+class StateMachine;
+
+class Type : public Symbol {
+public:
+ // Constructors
+ Type(string id, const Location& location,
+ const Map<string, string>& pairs,
+ StateMachine* machine_ptr = NULL);
+
+ // Destructor
+ ~Type() {}
+
+ // Public Methods
+ string cIdent() const { return m_c_id; }
+ string desc() const { return m_desc; }
+
+ bool isPrimitive() const { return existPair("primitive"); }
+ bool isNetworkMessage() const { return existPair("networkmessage"); }
+ bool isMessage() const { return existPair("message"); }
+ bool isBuffer() const { return existPair("buffer"); }
+ bool isInPort() const { return existPair("inport"); }
+ bool isOutPort() const { return existPair("outport"); }
+ bool isEnumeration() const { return existPair("enumeration"); }
+ bool isExternal() const { return existPair("external"); }
+ bool isGlobal() const { return existPair("global"); }
+
+ // The data members of this type - only valid for messages and SLICC
+ // declared structures
+ // Return false on error
+ bool dataMemberAdd(string id, Type* type_ptr, Map<string, string>& pairs,
+ string* init_code);
+ bool dataMemberExist(string id) const { return m_data_member_map.exist(id); }
+ Type* dataMemberType(string id) const { return m_data_member_map.lookup(id); }
+
+ // The methods of this type - only valid for external types
+ // Return false on error
+ bool methodAdd(string name, Type* return_type_ptr, const Vector<Type*>& param_type_vec);
+ bool methodExist(string id) const { return m_method_return_type_map.exist(id); }
+
+ string methodId(string name, const Vector<Type*>& param_type_vec);
+ Type* methodReturnType(string id) const { return m_method_return_type_map.lookup(id); }
+ const Vector<Type*>& methodParamType(string id) const { return m_method_param_type_map.lookup(id); }
+
+ // The enumeration idents of this type - only valid for enums
+ // Return false on error
+ bool enumAdd(string id, Map<string, string> pairs);
+ bool enumExist(string id) const { return m_enum_map.exist(id); }
+
+ // Write the C output files
+ void writeCFiles(string path) const;
+
+ bool hasDefault() const { return existPair("default"); }
+ string getDefault() const { return lookupPair("default"); }
+
+ void print(ostream& out) const {}
+private:
+ // Private Methods
+
+ void printTypeH(string path) const;
+ void printTypeC(string path) const;
+ void printEnumC(string path) const;
+ void printEnumH(string path) const;
+
+ // Private copy constructor and assignment operator
+ Type(const Type& obj);
+ Type& operator=(const Type& obj);
+
+ // Data Members (m_ prefix)
+ string m_c_id;
+ string m_desc;
+
+ // Data Members
+ Map<string, Type*> m_data_member_map;
+ Vector<string> m_data_member_ident_vec;
+ Vector<Type*> m_data_member_type_vec;
+ Vector<Map<string, string> > m_data_member_pairs_vec;
+ Vector<string*> m_data_member_init_code_vec;
+ // Needs pairs here
+
+ // Methods
+ Map<string, Type*> m_method_return_type_map;
+ Map<string, Vector<Type*> > m_method_param_type_map;
+ // Needs pairs here
+
+ // Enum
+ Map<string, bool> m_enum_map;
+ Vector<string> m_enum_vec;
+ Vector< Map < string, string > > m_enum_pairs;
+
+ // MachineType Hack
+ bool m_isMachineType;
+
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Type& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Type& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //TYPE_H
diff --git a/src/mem/slicc/symbols/Var.cc b/src/mem/slicc/symbols/Var.cc
new file mode 100644
index 000000000..a16c86967
--- /dev/null
+++ b/src/mem/slicc/symbols/Var.cc
@@ -0,0 +1,57 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * */
+
+#include "Var.hh"
+#include "StateMachine.hh"
+
+Var::Var(string id, const Location& location,
+ Type* type_ptr, string code,
+ const Map<string, string>& pairs,
+ StateMachine* machine_ptr) : Symbol(id, location, pairs)
+{
+ if (machine_ptr == NULL) {
+ m_c_id = id;
+ } else {
+ m_c_id = machine_ptr->toString() + "_" + id; // Append with machine name
+ }
+
+ m_machine_ptr = machine_ptr;
+ m_type_ptr = type_ptr;
+ m_code = code;
+}
+
+void Var::print(ostream& out) const
+{
+ out << "[Var id: " << m_c_id << "]" << endl;
+}
diff --git a/src/mem/slicc/symbols/Var.hh b/src/mem/slicc/symbols/Var.hh
new file mode 100644
index 000000000..277be0f74
--- /dev/null
+++ b/src/mem/slicc/symbols/Var.hh
@@ -0,0 +1,98 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Var.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ * */
+
+#ifndef VAR_H
+#define VAR_H
+
+#include "slicc_global.hh"
+#include "Symbol.hh"
+#include "Type.hh"
+
+class StateMachine;
+
+class Var : public Symbol {
+public:
+ // Constructors
+ Var(string id, const Location& location,
+ Type* type_ptr, string code,
+ const Map<string, string>& pairs,
+ StateMachine* machine_ptr = NULL);
+
+ // Var(string id, const Location& location,
+ // Type* type_ptr, string code) : Symbol(id, location) { m_type_ptr = type_ptr; m_code = code; }
+
+ // Destructor
+ ~Var() {}
+
+ // Public Methods
+ string cIdent() const { return m_c_id; }
+ void writeCFiles(string path) const {}
+ string getCode() const { return m_code; }
+ Type* getType() const { return m_type_ptr; }
+ StateMachine* getMachine() const { return m_machine_ptr; }
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ Var(const Var& obj);
+ Var& operator=(const Var& obj);
+
+ // Data Members (m_ prefix)
+ string m_c_id;
+ Type* m_type_ptr;
+ string m_code;
+ StateMachine* m_machine_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Var& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Var& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //VAR_H