diff options
-rw-r--r-- | src/arch/arm/fastmodel/CortexA76x1/CortexA76x1.lisa | 40 | ||||
-rw-r--r-- | src/arch/arm/fastmodel/CortexA76x1/FastModelCortexA76x1.py | 220 | ||||
-rw-r--r-- | src/arch/arm/fastmodel/CortexA76x1/cortex_a76x1.cc | 409 | ||||
-rw-r--r-- | src/arch/arm/fastmodel/CortexA76x1/cortex_a76x1.hh | 87 | ||||
-rw-r--r-- | src/arch/arm/fastmodel/iris/Iris.py | 6 | ||||
-rw-r--r-- | src/arch/arm/fastmodel/iris/cpu.cc | 9 | ||||
-rw-r--r-- | src/arch/arm/fastmodel/iris/cpu.hh | 4 |
7 files changed, 439 insertions, 336 deletions
diff --git a/src/arch/arm/fastmodel/CortexA76x1/CortexA76x1.lisa b/src/arch/arm/fastmodel/CortexA76x1/CortexA76x1.lisa index 695ae9ef2..feedaaccc 100644 --- a/src/arch/arm/fastmodel/CortexA76x1/CortexA76x1.lisa +++ b/src/arch/arm/fastmodel/CortexA76x1/CortexA76x1.lisa @@ -61,27 +61,27 @@ component CortexA76x1 ambaBridge.amba_pv_m => self.amba; // Connection to the GIC. - self.redistributor =>core.gicv3_redistributor_s[0]; + self.redistributor => core.gicv3_redistributor_s; // Connections from CPU to adapters core.CNTHPIRQ[0] => CNTHPIRQ.sg_signal_s; - CNTHPIRQ.amba_pv_signal_m => self.cnthpirq; + CNTHPIRQ.amba_pv_signal_m => self.cnthpirq[0]; core.CNTHVIRQ[0] => CNTHVIRQ.sg_signal_s; - CNTHVIRQ.amba_pv_signal_m => self.cnthvirq; + CNTHVIRQ.amba_pv_signal_m => self.cnthvirq[0]; core.CNTPNSIRQ[0] => CNTPNSIRQ.sg_signal_s; - CNTPNSIRQ.amba_pv_signal_m => self.cntpnsirq; + CNTPNSIRQ.amba_pv_signal_m => self.cntpnsirq[0]; core.CNTPSIRQ[0] => CNTPSIRQ.sg_signal_s; - CNTPSIRQ.amba_pv_signal_m => self.cntpsirq; + CNTPSIRQ.amba_pv_signal_m => self.cntpsirq[0]; core.CNTVIRQ[0] => CNTVIRQ.sg_signal_s; - CNTVIRQ.amba_pv_signal_m => self.cntvirq; + CNTVIRQ.amba_pv_signal_m => self.cntvirq[0]; core.commirq[0] => COMMIRQ.sg_signal_s; - COMMIRQ.amba_pv_signal_m => self.commirq; + COMMIRQ.amba_pv_signal_m => self.commirq[0]; core.ctidbgirq[0] => CTIDBGIRQ.sg_signal_s; - CTIDBGIRQ.amba_pv_signal_m => self.ctidbgirq; + CTIDBGIRQ.amba_pv_signal_m => self.ctidbgirq[0]; core.pmuirq[0] => PMUIRQ.sg_signal_s; - PMUIRQ.amba_pv_signal_m => self.pmuirq; + PMUIRQ.amba_pv_signal_m => self.pmuirq[0]; core.vcpumntirq[0] => VCPUMNTIRQ.sg_signal_s; - VCPUMNTIRQ.amba_pv_signal_m => self.vcpumntirq; + VCPUMNTIRQ.amba_pv_signal_m => self.vcpumntirq[0]; // Clocks. clock1Hz.clk_out => clockDiv.clk_in; @@ -103,16 +103,16 @@ component CortexA76x1 clockDiv.rate.set64(mul, div); } } - slave port<GICv3Comms> redistributor; + slave port<GICv3Comms> redistributor[1]; // External ports for CPU-to-GIC signals - master port<AMBAPVSignal> cnthpirq; - master port<AMBAPVSignal> cnthvirq; - master port<AMBAPVSignal> cntpsirq; - master port<AMBAPVSignal> cntvirq; - master port<AMBAPVSignal> commirq; - master port<AMBAPVSignal> ctidbgirq; - master port<AMBAPVSignal> pmuirq; - master port<AMBAPVSignal> vcpumntirq; - master port<AMBAPVSignal> cntpnsirq; + master port<AMBAPVSignal> cnthpirq[1]; + master port<AMBAPVSignal> cnthvirq[1]; + master port<AMBAPVSignal> cntpsirq[1]; + master port<AMBAPVSignal> cntvirq[1]; + master port<AMBAPVSignal> commirq[1]; + master port<AMBAPVSignal> ctidbgirq[1]; + master port<AMBAPVSignal> pmuirq[1]; + master port<AMBAPVSignal> vcpumntirq[1]; + master port<AMBAPVSignal> cntpnsirq[1]; } diff --git a/src/arch/arm/fastmodel/CortexA76x1/FastModelCortexA76x1.py b/src/arch/arm/fastmodel/CortexA76x1/FastModelCortexA76x1.py index 055223f9c..f775f569d 100644 --- a/src/arch/arm/fastmodel/CortexA76x1/FastModelCortexA76x1.py +++ b/src/arch/arm/fastmodel/CortexA76x1/FastModelCortexA76x1.py @@ -26,32 +26,129 @@ # Authors: Gabe Black from m5.params import * +from m5.proxy import * from m5.SimObject import SimObject from m5.objects.ArmInterrupts import ArmInterrupts from m5.objects.ArmISA import ArmISA from m5.objects.FastModel import AmbaInitiatorSocket, AmbaTargetSocket from m5.objects.FastModelArch import FastModelArmCPU -from m5.objects.FastModelGIC import Gicv3CommsInitiatorSocket from m5.objects.FastModelGIC import Gicv3CommsTargetSocket from m5.objects.Gic import ArmPPI from m5.objects.SystemC import SystemC_ScModule -class FastModelCortexA76x1(SystemC_ScModule): - type = 'FastModelCortexA76x1' - cxx_class = 'FastModel::CortexA76x1' +class FastModelCortexA76(FastModelArmCPU): + type = 'FastModelCortexA76' + cxx_class = 'FastModel::CortexA76' cxx_header = 'arch/arm/fastmodel/CortexA76x1/cortex_a76x1.hh' - _core_paths = [ 'core.cpu0' ] - cpu_wrapper = FastModelArmCPU( - core_paths=_core_paths, + cntfrq = 0x1800000 - cntfrq = 0x1800000, + # We shouldn't need these, but gem5 gets mad without them. + interrupts = [ ArmInterrupts() ] + isa = [ ArmISA() ] - # We shouldn't need these, but gem5 gets mad without them. - interrupts = [ ArmInterrupts() ], - isa = [ ArmISA() ], - ) + evs = Parent.evs + + redistributor = Gicv3CommsTargetSocket('GIC communication target') + + CFGEND = Param.Bool(False, "Endianness configuration at reset. "\ + "0, little endian. 1, big endian.") + CFGTE = Param.Bool(False, "Instruction set state when resetting "\ + "into AArch32. 0, A32. 1, T32.") + CRYPTODISABLE = Param.Bool(False, "Disable cryptographic features.") + RVBARADDR = Param.Addr(0x0, "Value of RVBAR_ELx register.") + VINITHI = Param.Bool(False, "Reset value of SCTLR.V.") + enable_trace_special_hlt_imm16 = Param.Bool(False, + "Enable usage of parameter trace_special_hlt_imm16") + l2cache_hit_latency = Param.UInt64(0, "L2 Cache timing annotation "\ + "latency for hit. Intended to model the tag-lookup time. This "\ + "is only used when l2cache-state_modelled=true.") + l2cache_maintenance_latency = Param.UInt64(0, "L2 Cache timing "\ + "annotation latency for cache maintenance operations given in "\ + "total ticks. This is only used when dcache-state_modelled=true.") + l2cache_miss_latency = Param.UInt64(0, "L2 Cache timing annotation "\ + "latency for miss. Intended to model the time for failed "\ + "tag-lookup and allocation of intermediate buffers. This is "\ + "only used when l2cache-state_modelled=true.") + l2cache_read_access_latency = Param.UInt64(0, "L2 Cache timing "\ + "annotation latency for read accesses given in ticks per "\ + "access. If this parameter is non-zero, per-access latencies "\ + "will be used instead of per-byte even if l2cache-read_latency "\ + "is set. This is in addition to the hit or miss latency, and "\ + "intended to correspond to the time taken to transfer across the "\ + "cache upstream bus, this is only used when "\ + "l2cache-state_modelled=true.") + l2cache_read_latency = Param.UInt64(0, "L2 Cache timing annotation "\ + "latency for read accesses given in ticks per byte "\ + "accessed.l2cache-read_access_latency must be set to 0 for "\ + "per-byte latencies to be applied. This is in addition to the "\ + "hit or miss latency, and intended to correspond to the time "\ + "taken to transfer across the cache upstream bus. This is only "\ + "used when l2cache-state_modelled=true.") + l2cache_size = Param.MemorySize32('0x80000', "L2 Cache size in bytes.") + l2cache_snoop_data_transfer_latency = Param.UInt64(0, "L2 Cache "\ + "timing annotation latency for received snoop accesses that "\ + "perform a data transfer given in ticks per byte accessed. This "\ + "is only used when dcache-state_modelled=true.") + l2cache_snoop_issue_latency = Param.UInt64(0, "L2 Cache timing "\ + "annotation latency for snoop accesses issued by this cache in "\ + "total ticks. This is only used when dcache-state_modelled=true.") + l2cache_write_access_latency = Param.UInt64(0, "L2 Cache timing "\ + "annotation latency for write accesses given in ticks per "\ + "access. If this parameter is non-zero, per-access latencies "\ + "will be used instead of per-byte even if l2cache-write_latency "\ + "is set. This is only used when l2cache-state_modelled=true.") + l2cache_write_latency = Param.UInt64(0, "L2 Cache timing annotation "\ + "latency for write accesses given in ticks per byte accessed. "\ + "l2cache-write_access_latency must be set to 0 for per-byte "\ + "latencies to be applied. This is only used when "\ + "l2cache-state_modelled=true.") + max_code_cache_mb = Param.MemorySize32('0x100', "Maximum size of "\ + "the simulation code cache (MiB). For platforms with more than 2 "\ + "cores this limit will be scaled down. (e.g 1/8 for 16 or more "\ + "cores)") + min_sync_level = Param.Unsigned(0, "Force minimum syncLevel "\ + "(0=off=default,1=syncState,2=postInsnIO,3=postInsnAll)") + semihosting_A32_HLT = Param.UInt16(0xf000, + "A32 HLT number for semihosting calls.") + semihosting_A64_HLT = Param.UInt16(0xf000, + "A64 HLT number for semihosting calls.") + semihosting_ARM_SVC = Param.UInt32(0x123456, + "A32 SVC number for semihosting calls.") + semihosting_T32_HLT = Param.Unsigned(60, + "T32 HLT number for semihosting calls.") + semihosting_Thumb_SVC = Param.Unsigned(171, + "T32 SVC number for semihosting calls.") + semihosting_cmd_line = Param.String("", + "Command line available to semihosting calls.") + semihosting_cwd = Param.String("", + "Base directory for semihosting file access.") + semihosting_enable = Param.Bool(True, "Enable semihosting SVC/HLT traps.") + semihosting_heap_base = Param.Addr(0x0, "Virtual address of heap base.") + semihosting_heap_limit = Param.Addr(0xf000000, + "Virtual address of top of heap.") + semihosting_stack_base = Param.Addr(0x10000000, + "Virtual address of base of descending stack.") + semihosting_stack_limit = Param.Addr(0xf000000, + "Virtual address of stack limit.") + trace_special_hlt_imm16 = Param.UInt16(0xf000, "For this HLT "\ + "number, IF enable_trace_special_hlt_imm16=true, skip performing "\ + "usual HLT execution but call MTI trace if registered") + vfp_enable_at_reset = Param.Bool(False, "Enable VFP in CPACR, "\ + "CPPWR, NSACR at reset. Warning: Arm recommends going through "\ + "the implementation's suggested VFP power-up sequence!") + +class FastModelCortexA76Cluster(SimObject): + type = 'FastModelCortexA76Cluster' + cxx_class = 'FastModel::CortexA76Cluster' + cxx_header = 'arch/arm/fastmodel/CortexA76x1/cortex_a76x1.hh' + + cores = VectorParam.FastModelCortexA76( + 'Core in a given cluster of CortexA76s') + + evs = Param.SystemC_ScModule( + "Fast mo0del exported virtual subsystem holding cores") cnthpirq = Param.ArmInterruptPin(ArmPPI(num=10), "EL2 physical timer event") @@ -71,7 +168,6 @@ class FastModelCortexA76x1(SystemC_ScModule): "Non-secure physical timer event") amba = AmbaInitiatorSocket(64, 'AMBA initiator socket') - redistributor = Gicv3CommsTargetSocket('GIC communication target') # These parameters are described in "Fast Models Reference Manual" section # 3.4.19, "ARMCortexA7x1CT". @@ -258,92 +354,12 @@ class FastModelCortexA76x1(SystemC_ScModule): walk_cache_latency = Param.UInt64(0, "Walk cache latency for TA (Timing "\ "Annotation), expressed in simulation ticks") - cpu0_CFGEND = Param.Bool(False, "Endianness configuration at reset. "\ - "0, little endian. 1, big endian.") - cpu0_CFGTE = Param.Bool(False, "Instruction set state when resetting "\ - "into AArch32. 0, A32. 1, T32.") - cpu0_CRYPTODISABLE = Param.Bool(False, "Disable cryptographic features.") - cpu0_RVBARADDR = Param.Addr(0x0, "Value of RVBAR_ELx register.") - cpu0_VINITHI = Param.Bool(False, "Reset value of SCTLR.V.") - cpu0_enable_trace_special_hlt_imm16 = Param.Bool(False, - "Enable usage of parameter trace_special_hlt_imm16") - cpu0_l2cache_hit_latency = Param.UInt64(0, "L2 Cache timing annotation "\ - "latency for hit. Intended to model the tag-lookup time. This "\ - "is only used when l2cache-state_modelled=true.") - cpu0_l2cache_maintenance_latency = Param.UInt64(0, "L2 Cache timing "\ - "annotation latency for cache maintenance operations given in "\ - "total ticks. This is only used when dcache-state_modelled=true.") - cpu0_l2cache_miss_latency = Param.UInt64(0, "L2 Cache timing annotation "\ - "latency for miss. Intended to model the time for failed "\ - "tag-lookup and allocation of intermediate buffers. This is "\ - "only used when l2cache-state_modelled=true.") - cpu0_l2cache_read_access_latency = Param.UInt64(0, "L2 Cache timing "\ - "annotation latency for read accesses given in ticks per "\ - "access. If this parameter is non-zero, per-access latencies "\ - "will be used instead of per-byte even if l2cache-read_latency "\ - "is set. This is in addition to the hit or miss latency, and "\ - "intended to correspond to the time taken to transfer across the "\ - "cache upstream bus, this is only used when "\ - "l2cache-state_modelled=true.") - cpu0_l2cache_read_latency = Param.UInt64(0, "L2 Cache timing annotation "\ - "latency for read accesses given in ticks per byte "\ - "accessed.l2cache-read_access_latency must be set to 0 for "\ - "per-byte latencies to be applied. This is in addition to the "\ - "hit or miss latency, and intended to correspond to the time "\ - "taken to transfer across the cache upstream bus. This is only "\ - "used when l2cache-state_modelled=true.") - cpu0_l2cache_size = Param.MemorySize32('0x80000', - "L2 Cache size in bytes.") - cpu0_l2cache_snoop_data_transfer_latency = Param.UInt64(0, "L2 Cache "\ - "timing annotation latency for received snoop accesses that "\ - "perform a data transfer given in ticks per byte accessed. This "\ - "is only used when dcache-state_modelled=true.") - cpu0_l2cache_snoop_issue_latency = Param.UInt64(0, "L2 Cache timing "\ - "annotation latency for snoop accesses issued by this cache in "\ - "total ticks. This is only used when dcache-state_modelled=true.") - cpu0_l2cache_write_access_latency = Param.UInt64(0, "L2 Cache timing "\ - "annotation latency for write accesses given in ticks per "\ - "access. If this parameter is non-zero, per-access latencies "\ - "will be used instead of per-byte even if l2cache-write_latency "\ - "is set. This is only used when l2cache-state_modelled=true.") - cpu0_l2cache_write_latency = Param.UInt64(0, "L2 Cache timing annotation "\ - "latency for write accesses given in ticks per byte accessed. "\ - "l2cache-write_access_latency must be set to 0 for per-byte "\ - "latencies to be applied. This is only used when "\ - "l2cache-state_modelled=true.") - cpu0_max_code_cache_mb = Param.MemorySize32('0x100', "Maximum size of "\ - "the simulation code cache (MiB). For platforms with more than 2 "\ - "cores this limit will be scaled down. (e.g 1/8 for 16 or more "\ - "cores)") - cpu0_min_sync_level = Param.Unsigned(0, "Force minimum syncLevel "\ - "(0=off=default,1=syncState,2=postInsnIO,3=postInsnAll)") - cpu0_semihosting_A32_HLT = Param.UInt16(0xf000, - "A32 HLT number for semihosting calls.") - cpu0_semihosting_A64_HLT = Param.UInt16(0xf000, - "A64 HLT number for semihosting calls.") - cpu0_semihosting_ARM_SVC = Param.UInt32(0x123456, - "A32 SVC number for semihosting calls.") - cpu0_semihosting_T32_HLT = Param.Unsigned(60, - "T32 HLT number for semihosting calls.") - cpu0_semihosting_Thumb_SVC = Param.Unsigned(171, - "T32 SVC number for semihosting calls.") - cpu0_semihosting_cmd_line = Param.String("", - "Command line available to semihosting calls.") - cpu0_semihosting_cwd = Param.String("", - "Base directory for semihosting file access.") - cpu0_semihosting_enable = Param.Bool(True, - "Enable semihosting SVC/HLT traps.") - cpu0_semihosting_heap_base = Param.Addr(0x0, - "Virtual address of heap base.") - cpu0_semihosting_heap_limit = Param.Addr(0xf000000, - "Virtual address of top of heap.") - cpu0_semihosting_stack_base = Param.Addr(0x10000000, - "Virtual address of base of descending stack.") - cpu0_semihosting_stack_limit = Param.Addr(0xf000000, - "Virtual address of stack limit.") - cpu0_trace_special_hlt_imm16 = Param.UInt16(0xf000, "For this HLT "\ - "number, IF enable_trace_special_hlt_imm16=true, skip performing "\ - "usual HLT execution but call MTI trace if registered") - cpu0_vfp_enable_at_reset = Param.Bool(False, "Enable VFP in CPACR, "\ - "CPPWR, NSACR at reset. Warning: Arm recommends going through "\ - "the implementation's suggested VFP power-up sequence!") +class FastModelScxEvsCortexA76x1(SystemC_ScModule): + type = 'FastModelScxEvsCortexA76x1' + cxx_class = 'FastModel::ScxEvsCortexA76x1' + cxx_header = 'arch/arm/fastmodel/CortexA76x1/cortex_a76x1.hh' + +class FastModelCortexA76x1(FastModelCortexA76Cluster): + cores = [ FastModelCortexA76(thread_paths=[ 'core.cpu0' ]) ] + + evs = FastModelScxEvsCortexA76x1() diff --git a/src/arch/arm/fastmodel/CortexA76x1/cortex_a76x1.cc b/src/arch/arm/fastmodel/CortexA76x1/cortex_a76x1.cc index 50d5417a6..898082741 100644 --- a/src/arch/arm/fastmodel/CortexA76x1/cortex_a76x1.cc +++ b/src/arch/arm/fastmodel/CortexA76x1/cortex_a76x1.cc @@ -33,7 +33,6 @@ #include "arch/arm/fastmodel/iris/cpu.hh" #include "base/logging.hh" #include "dev/arm/base_gic.hh" -#include "params/FastModelCortexA76x1.hh" #include "sim/core.hh" #include "systemc/tlm_bridge/gem5_to_tlm.hh" @@ -41,188 +40,210 @@ namespace FastModel { void -CortexA76x1::clockChangeHandler() +CortexA76::setCluster(CortexA76Cluster *_cluster, int _num) { - clockRateControl->set_mul_div(SimClock::Int::s, clockPeriod.value); + cluster = _cluster; + num = _num; + + set_evs_param("CFGEND", params().CFGEND); + set_evs_param("CFGTE", params().CFGTE); + set_evs_param("CRYPTODISABLE", params().CRYPTODISABLE); + set_evs_param("RVBARADDR", params().RVBARADDR); + set_evs_param("VINITHI", params().VINITHI); + set_evs_param("enable_trace_special_hlt_imm16", + params().enable_trace_special_hlt_imm16); + set_evs_param("l2cache-hit_latency", params().l2cache_hit_latency); + set_evs_param("l2cache-maintenance_latency", + params().l2cache_maintenance_latency); + set_evs_param("l2cache-miss_latency", params().l2cache_miss_latency); + set_evs_param("l2cache-read_access_latency", + params().l2cache_read_access_latency); + set_evs_param("l2cache-read_latency", params().l2cache_read_latency); + set_evs_param("l2cache-size", params().l2cache_size); + set_evs_param("l2cache-snoop_data_transfer_latency", + params().l2cache_snoop_data_transfer_latency); + set_evs_param("l2cache-snoop_issue_latency", + params().l2cache_snoop_issue_latency); + set_evs_param("l2cache-write_access_latency", + params().l2cache_write_access_latency); + set_evs_param("l2cache-write_latency", params().l2cache_write_latency); + set_evs_param("max_code_cache_mb", params().max_code_cache_mb); + set_evs_param("min_sync_level", params().min_sync_level); + set_evs_param("semihosting-A32_HLT", params().semihosting_A32_HLT); + set_evs_param("semihosting-A64_HLT", params().semihosting_A64_HLT); + set_evs_param("semihosting-ARM_SVC", params().semihosting_ARM_SVC); + set_evs_param("semihosting-T32_HLT", params().semihosting_T32_HLT); + set_evs_param("semihosting-Thumb_SVC", params().semihosting_Thumb_SVC); + set_evs_param("semihosting-cmd_line", params().semihosting_cmd_line); + set_evs_param("semihosting-cwd", params().semihosting_cwd); + set_evs_param("semihosting-enable", params().semihosting_enable); + set_evs_param("semihosting-heap_base", params().semihosting_heap_base); + set_evs_param("semihosting-heap_limit", params().semihosting_heap_limit); + set_evs_param("semihosting-stack_base", params().semihosting_stack_base); + set_evs_param("semihosting-stack_limit", params().semihosting_stack_limit); + set_evs_param("trace_special_hlt_imm16", params().trace_special_hlt_imm16); + set_evs_param("vfp-enable_at_reset", params().vfp_enable_at_reset); } -CortexA76x1::CortexA76x1(const sc_core::sc_module_name &mod_name, - const FastModelCortexA76x1Params &p) - : scx_evs_CortexA76x1(mod_name), - amba(scx_evs_CortexA76x1::amba, p.name + ".amba", -1), - redistributor(scx_evs_CortexA76x1::redistributor, - p.name + ".redistributor", -1), - cnthpirq("cnthpirq"), - cnthvirq("cnthvirq"), - cntpsirq("cntpsirq"), - cntvirq("cntvirq"), - commirq("commirq"), - ctidbgirq("ctidbgirq"), - pmuirq("pmuirq"), - vcpumntirq("vcpumntirq"), - cntpnsirq("cntpnsirq"), - clockChanged(Iris::ClockEventName.c_str()), - clockPeriod(Iris::PeriodAttributeName.c_str()), - gem5Cpu(Iris::Gem5CpuAttributeName.c_str()), - sendFunctional(Iris::SendFunctionalAttributeName.c_str()), - params(p) +Port & +CortexA76::getPort(const std::string &if_name, PortID idx) { - clockRateControl.bind(clock_rate_s); + if (if_name == "redistributor") + return cluster->getEvs()->gem5_getPort(if_name, num); + else + return ArmCPU::getPort(if_name, idx); +} + +CortexA76Cluster::CortexA76Cluster(Params &p) : + SimObject(&p), _params(p), cores(p.cores), evs(p.evs) +{ + for (int i = 0; i < p.cores.size(); i++) + p.cores[i]->setCluster(this, i); - set_parameter("core.BROADCASTATOMIC", params.BROADCASTATOMIC); - set_parameter("core.BROADCASTCACHEMAINT", params.BROADCASTCACHEMAINT); - set_parameter("core.BROADCASTOUTER", params.BROADCASTOUTER); - set_parameter("core.BROADCASTPERSIST", params.BROADCASTPERSIST); - set_parameter("core.CLUSTER_ID", params.CLUSTER_ID); - set_parameter("core.GICDISABLE", params.GICDISABLE); - set_parameter("core.cpi_div", params.cpi_div); - set_parameter("core.cpi_mul", params.cpi_mul); - set_parameter("core.dcache-hit_latency", params.dcache_hit_latency); - set_parameter("core.dcache-maintenance_latency", - params.dcache_maintenance_latency); - set_parameter("core.dcache-miss_latency", params.dcache_miss_latency); - set_parameter("core.dcache-prefetch_enabled", - params.dcache_prefetch_enabled); - set_parameter("core.dcache-read_access_latency", - params.dcache_read_access_latency); - set_parameter("core.dcache-read_latency", params.dcache_read_latency); - set_parameter("core.dcache-snoop_data_transfer_latency", - params.dcache_snoop_data_transfer_latency); - set_parameter("core.dcache-state_modelled", params.dcache_state_modelled); - set_parameter("core.dcache-write_access_latency", - params.dcache_write_access_latency); - set_parameter("core.dcache-write_latency", params.dcache_write_latency); - set_parameter("core.default_opmode", params.default_opmode); - set_parameter("core.diagnostics", params.diagnostics); - set_parameter("core.enable_simulation_performance_optimizations", - params.enable_simulation_performance_optimizations); - set_parameter("core.ext_abort_device_read_is_sync", - params.ext_abort_device_read_is_sync); - set_parameter("core.ext_abort_device_write_is_sync", - params.ext_abort_device_write_is_sync); - set_parameter("core.ext_abort_so_read_is_sync", - params.ext_abort_so_read_is_sync); - set_parameter("core.ext_abort_so_write_is_sync", - params.ext_abort_so_write_is_sync); - set_parameter("core.gicv3.cpuintf-mmap-access-level", - params.gicv3_cpuintf_mmap_access_level); - set_parameter("core.has_peripheral_port", params.has_peripheral_port); - set_parameter("core.has_statistical_profiling", - params.has_statistical_profiling); - set_parameter("core.icache-hit_latency", params.icache_hit_latency); - set_parameter("core.icache-maintenance_latency", - params.icache_maintenance_latency); - set_parameter("core.icache-miss_latency", params.icache_miss_latency); - set_parameter("core.icache-prefetch_enabled", - params.icache_prefetch_enabled); - set_parameter("core.icache-read_access_latency", - params.icache_read_access_latency); - set_parameter("core.icache-read_latency", params.icache_read_latency); - set_parameter("core.icache-state_modelled", params.icache_state_modelled); - set_parameter("core.l3cache-hit_latency", params.l3cache_hit_latency); - set_parameter("core.l3cache-maintenance_latency", - params.l3cache_maintenance_latency); - set_parameter("core.l3cache-miss_latency", params.l3cache_miss_latency); - set_parameter("core.l3cache-read_access_latency", - params.l3cache_read_access_latency); - set_parameter("core.l3cache-read_latency", params.l3cache_read_latency); - set_parameter("core.l3cache-size", params.l3cache_size); - set_parameter("core.l3cache-snoop_data_transfer_latency", - params.l3cache_snoop_data_transfer_latency); - set_parameter("core.l3cache-snoop_issue_latency", - params.l3cache_snoop_issue_latency); - set_parameter("core.l3cache-write_access_latency", - params.l3cache_write_access_latency); - set_parameter("core.l3cache-write_latency", params.l3cache_write_latency); - set_parameter("core.pchannel_treat_simreset_as_poreset", - params.pchannel_treat_simreset_as_poreset); - set_parameter("core.periph_address_end", params.periph_address_end); - set_parameter("core.periph_address_start", params.periph_address_start); - set_parameter("core.ptw_latency", params.ptw_latency); - set_parameter("core.tlb_latency", params.tlb_latency); - set_parameter("core.treat-dcache-cmos-to-pou-as-nop", - params.treat_dcache_cmos_to_pou_as_nop); - set_parameter("core.walk_cache_latency", params.walk_cache_latency); + sc_core::sc_attr_base *base; - set_parameter("core.cpu0.CFGEND", params.cpu0_CFGEND); - set_parameter("core.cpu0.CFGTE", params.cpu0_CFGTE); - set_parameter("core.cpu0.CRYPTODISABLE", params.cpu0_CRYPTODISABLE); - set_parameter("core.cpu0.RVBARADDR", params.cpu0_RVBARADDR); - set_parameter("core.cpu0.VINITHI", params.cpu0_VINITHI); - set_parameter("core.cpu0.enable_trace_special_hlt_imm16", - params.cpu0_enable_trace_special_hlt_imm16); - set_parameter("core.cpu0.l2cache-hit_latency", - params.cpu0_l2cache_hit_latency); - set_parameter("core.cpu0.l2cache-maintenance_latency", - params.cpu0_l2cache_maintenance_latency); - set_parameter("core.cpu0.l2cache-miss_latency", - params.cpu0_l2cache_miss_latency); - set_parameter("core.cpu0.l2cache-read_access_latency", - params.cpu0_l2cache_read_access_latency); - set_parameter("core.cpu0.l2cache-read_latency", - params.cpu0_l2cache_read_latency); - set_parameter("core.cpu0.l2cache-size", params.cpu0_l2cache_size); - set_parameter("core.cpu0.l2cache-snoop_data_transfer_latency", - params.cpu0_l2cache_snoop_data_transfer_latency); - set_parameter("core.cpu0.l2cache-snoop_issue_latency", - params.cpu0_l2cache_snoop_issue_latency); - set_parameter("core.cpu0.l2cache-write_access_latency", - params.cpu0_l2cache_write_access_latency); - set_parameter("core.cpu0.l2cache-write_latency", - params.cpu0_l2cache_write_latency); - set_parameter("core.cpu0.max_code_cache_mb", - params.cpu0_max_code_cache_mb); - set_parameter("core.cpu0.min_sync_level", params.cpu0_min_sync_level); - set_parameter("core.cpu0.semihosting-A32_HLT", - params.cpu0_semihosting_A32_HLT); - set_parameter("core.cpu0.semihosting-A64_HLT", - params.cpu0_semihosting_A64_HLT); - set_parameter("core.cpu0.semihosting-ARM_SVC", - params.cpu0_semihosting_ARM_SVC); - set_parameter("core.cpu0.semihosting-T32_HLT", - params.cpu0_semihosting_T32_HLT); - set_parameter("core.cpu0.semihosting-Thumb_SVC", - params.cpu0_semihosting_Thumb_SVC); - set_parameter("core.cpu0.semihosting-cmd_line", - params.cpu0_semihosting_cmd_line); - set_parameter("core.cpu0.semihosting-cwd", params.cpu0_semihosting_cwd); - set_parameter("core.cpu0.semihosting-enable", - params.cpu0_semihosting_enable); - set_parameter("core.cpu0.semihosting-heap_base", - params.cpu0_semihosting_heap_base); - set_parameter("core.cpu0.semihosting-heap_limit", - params.cpu0_semihosting_heap_limit); - set_parameter("core.cpu0.semihosting-stack_base", - params.cpu0_semihosting_stack_base); - set_parameter("core.cpu0.semihosting-stack_limit", - params.cpu0_semihosting_stack_limit); - set_parameter("core.cpu0.trace_special_hlt_imm16", - params.cpu0_trace_special_hlt_imm16); - set_parameter("core.cpu0.vfp-enable_at_reset", - params.cpu0_vfp_enable_at_reset); + base = evs->get_attribute(Iris::Gem5CpuClusterAttributeName); + auto *gem5_cluster_attr = + dynamic_cast<sc_core::sc_attribute<CortexA76Cluster *> *>(base); + panic_if(base && !gem5_cluster_attr, + "The EVS gem5 CPU cluster attribute was not of type " + "sc_attribute<FastModel::CortexA76Cluster *>."); + if (gem5_cluster_attr) + gem5_cluster_attr->value = this; - add_attribute(gem5Cpu); + set_evs_param("core.BROADCASTATOMIC", p.BROADCASTATOMIC); + set_evs_param("core.BROADCASTCACHEMAINT", p.BROADCASTCACHEMAINT); + set_evs_param("core.BROADCASTOUTER", p.BROADCASTOUTER); + set_evs_param("core.BROADCASTPERSIST", p.BROADCASTPERSIST); + set_evs_param("core.CLUSTER_ID", p.CLUSTER_ID); + set_evs_param("core.GICDISABLE", p.GICDISABLE); + set_evs_param("core.cpi_div", p.cpi_div); + set_evs_param("core.cpi_mul", p.cpi_mul); + set_evs_param("core.dcache-hit_latency", p.dcache_hit_latency); + set_evs_param("core.dcache-maintenance_latency", + p.dcache_maintenance_latency); + set_evs_param("core.dcache-miss_latency", p.dcache_miss_latency); + set_evs_param("core.dcache-prefetch_enabled", + p.dcache_prefetch_enabled); + set_evs_param("core.dcache-read_access_latency", + p.dcache_read_access_latency); + set_evs_param("core.dcache-read_latency", p.dcache_read_latency); + set_evs_param("core.dcache-snoop_data_transfer_latency", + p.dcache_snoop_data_transfer_latency); + set_evs_param("core.dcache-state_modelled", p.dcache_state_modelled); + set_evs_param("core.dcache-write_access_latency", + p.dcache_write_access_latency); + set_evs_param("core.dcache-write_latency", p.dcache_write_latency); + set_evs_param("core.default_opmode", p.default_opmode); + set_evs_param("core.diagnostics", p.diagnostics); + set_evs_param("core.enable_simulation_performance_optimizations", + p.enable_simulation_performance_optimizations); + set_evs_param("core.ext_abort_device_read_is_sync", + p.ext_abort_device_read_is_sync); + set_evs_param("core.ext_abort_device_write_is_sync", + p.ext_abort_device_write_is_sync); + set_evs_param("core.ext_abort_so_read_is_sync", + p.ext_abort_so_read_is_sync); + set_evs_param("core.ext_abort_so_write_is_sync", + p.ext_abort_so_write_is_sync); + set_evs_param("core.gicv3.cpuintf-mmap-access-level", + p.gicv3_cpuintf_mmap_access_level); + set_evs_param("core.has_peripheral_port", p.has_peripheral_port); + set_evs_param("core.has_statistical_profiling", + p.has_statistical_profiling); + set_evs_param("core.icache-hit_latency", p.icache_hit_latency); + set_evs_param("core.icache-maintenance_latency", + p.icache_maintenance_latency); + set_evs_param("core.icache-miss_latency", p.icache_miss_latency); + set_evs_param("core.icache-prefetch_enabled", + p.icache_prefetch_enabled); + set_evs_param("core.icache-read_access_latency", + p.icache_read_access_latency); + set_evs_param("core.icache-read_latency", p.icache_read_latency); + set_evs_param("core.icache-state_modelled", p.icache_state_modelled); + set_evs_param("core.l3cache-hit_latency", p.l3cache_hit_latency); + set_evs_param("core.l3cache-maintenance_latency", + p.l3cache_maintenance_latency); + set_evs_param("core.l3cache-miss_latency", p.l3cache_miss_latency); + set_evs_param("core.l3cache-read_access_latency", + p.l3cache_read_access_latency); + set_evs_param("core.l3cache-read_latency", p.l3cache_read_latency); + set_evs_param("core.l3cache-size", p.l3cache_size); + set_evs_param("core.l3cache-snoop_data_transfer_latency", + p.l3cache_snoop_data_transfer_latency); + set_evs_param("core.l3cache-snoop_issue_latency", + p.l3cache_snoop_issue_latency); + set_evs_param("core.l3cache-write_access_latency", + p.l3cache_write_access_latency); + set_evs_param("core.l3cache-write_latency", p.l3cache_write_latency); + set_evs_param("core.pchannel_treat_simreset_as_poreset", + p.pchannel_treat_simreset_as_poreset); + set_evs_param("core.periph_address_end", p.periph_address_end); + set_evs_param("core.periph_address_start", p.periph_address_start); + set_evs_param("core.ptw_latency", p.ptw_latency); + set_evs_param("core.tlb_latency", p.tlb_latency); + set_evs_param("core.treat-dcache-cmos-to-pou-as-nop", + p.treat_dcache_cmos_to_pou_as_nop); + set_evs_param("core.walk_cache_latency", p.walk_cache_latency); +} + +Port & +CortexA76Cluster::getPort(const std::string &if_name, PortID idx) +{ + if (if_name == "amba") { + return evs->gem5_getPort(if_name, idx); + } else { + return SimObject::getPort(if_name, idx); + } +} + +void +ScxEvsCortexA76x1::clockChangeHandler() +{ + clockRateControl->set_mul_div(SimClock::Int::s, clockPeriod.value); +} + +ScxEvsCortexA76x1::ScxEvsCortexA76x1(const sc_core::sc_module_name &mod_name, + const Params &p) : + scx_evs_CortexA76x1(mod_name), + amba(scx_evs_CortexA76x1::amba, p.name + ".amba", -1), + redist { + new TlmGicTarget(redistributor[0], + csprintf("%s.redistributor[%d]", name(), 0), 0) + }, + cnthpirq("cnthpirq"), cnthvirq("cnthvirq"), cntpsirq("cntpsirq"), + cntvirq("cntvirq"), commirq("commirq"), ctidbgirq("ctidbgirq"), + pmuirq("pmuirq"), vcpumntirq("vcpumntirq"), cntpnsirq("cntpnsirq"), + clockChanged(Iris::ClockEventName.c_str()), + clockPeriod(Iris::PeriodAttributeName.c_str()), + gem5CpuCluster(Iris::Gem5CpuClusterAttributeName.c_str()), + sendFunctional(Iris::SendFunctionalAttributeName.c_str()), + params(p) +{ + clockRateControl.bind(clock_rate_s); + + add_attribute(gem5CpuCluster); add_attribute(clockPeriod); SC_METHOD(clockChangeHandler); dont_initialize(); sensitive << clockChanged; + scx_evs_CortexA76x1::cnthpirq[0].bind(cnthpirq.signal_in); + scx_evs_CortexA76x1::cnthvirq[0].bind(cnthvirq.signal_in); + scx_evs_CortexA76x1::cntpsirq[0].bind(cntpsirq.signal_in); + scx_evs_CortexA76x1::cntvirq[0].bind(cntvirq.signal_in); + scx_evs_CortexA76x1::commirq[0].bind(commirq.signal_in); + scx_evs_CortexA76x1::ctidbgirq[0].bind(ctidbgirq.signal_in); + scx_evs_CortexA76x1::pmuirq[0].bind(pmuirq.signal_in); + scx_evs_CortexA76x1::vcpumntirq[0].bind(vcpumntirq.signal_in); + scx_evs_CortexA76x1::cntpnsirq[0].bind(cntpnsirq.signal_in); + sendFunctional.value = [this](PacketPtr pkt) { sendFunc(pkt); }; add_attribute(sendFunctional); - - scx_evs_CortexA76x1::cnthpirq.bind(cnthpirq.signal_in); - scx_evs_CortexA76x1::cnthvirq.bind(cnthvirq.signal_in); - scx_evs_CortexA76x1::cntpsirq.bind(cntpsirq.signal_in); - scx_evs_CortexA76x1::cntvirq.bind(cntvirq.signal_in); - scx_evs_CortexA76x1::commirq.bind(commirq.signal_in); - scx_evs_CortexA76x1::ctidbgirq.bind(ctidbgirq.signal_in); - scx_evs_CortexA76x1::pmuirq.bind(pmuirq.signal_in); - scx_evs_CortexA76x1::vcpumntirq.bind(vcpumntirq.signal_in); - scx_evs_CortexA76x1::cntpnsirq.bind(cntpnsirq.signal_in); } void -CortexA76x1::sendFunc(PacketPtr pkt) +ScxEvsCortexA76x1::sendFunc(PacketPtr pkt) { auto *trans = sc_gem5::packet2payload(pkt); panic_if(scx_evs_CortexA76x1::amba->transport_dbg(*trans) != @@ -231,16 +252,16 @@ CortexA76x1::sendFunc(PacketPtr pkt) } void -CortexA76x1::before_end_of_elaboration() +ScxEvsCortexA76x1::before_end_of_elaboration() { scx_evs_CortexA76x1::before_end_of_elaboration(); - auto *gem5_cpu = gem5Cpu.value; - auto set_on_change = [gem5_cpu](SignalReceiver &recv, - ArmInterruptPinGen *gen, - int ctx_num) + auto *cluster = gem5CpuCluster.value; + + auto set_on_change = [cluster]( + SignalReceiver &recv, ArmInterruptPinGen *gen, int num) { - auto *pin = gen->get(gem5_cpu->getContext(ctx_num)); + auto *pin = gen->get(cluster->getCore(num)->getContext(0)); auto handler = [pin](bool status) { status ? pin->raise() : pin->clear(); @@ -248,32 +269,44 @@ CortexA76x1::before_end_of_elaboration() recv.onChange(handler); }; - set_on_change(cnthpirq, params.cnthpirq, 0); - set_on_change(cnthvirq, params.cnthvirq, 0); - set_on_change(cntpsirq, params.cntpsirq, 0); - set_on_change(cntvirq, params.cntvirq, 0); - set_on_change(commirq, params.commirq, 0); - set_on_change(ctidbgirq, params.ctidbgirq, 0); - set_on_change(pmuirq, params.pmuirq, 0); - set_on_change(vcpumntirq, params.vcpumntirq, 0); - set_on_change(cntpnsirq, params.cntpnsirq, 0); + set_on_change(cnthpirq, cluster->params().cnthpirq, 0); + set_on_change(cnthvirq, cluster->params().cnthvirq, 0); + set_on_change(cntpsirq, cluster->params().cntpsirq, 0); + set_on_change(cntvirq, cluster->params().cntvirq, 0); + set_on_change(commirq, cluster->params().commirq, 0); + set_on_change(ctidbgirq, cluster->params().ctidbgirq, 0); + set_on_change(pmuirq, cluster->params().pmuirq, 0); + set_on_change(vcpumntirq, cluster->params().vcpumntirq, 0); + set_on_change(cntpnsirq, cluster->params().cntpnsirq, 0); } Port & -CortexA76x1::gem5_getPort(const std::string &if_name, int idx) +ScxEvsCortexA76x1::gem5_getPort(const std::string &if_name, int idx) { - if (if_name == "amba") + if (if_name == "redistributor") + return *redist.at(idx); + else if (if_name == "amba") return amba; - else if (if_name == "redistributor") - return redistributor; else return scx_evs_CortexA76x1::gem5_getPort(if_name, idx); } } // namespace FastModel -FastModel::CortexA76x1 * -FastModelCortexA76x1Params::create() +FastModel::CortexA76 * +FastModelCortexA76Params::create() +{ + return new FastModel::CortexA76(*this); +} + +FastModel::CortexA76Cluster * +FastModelCortexA76ClusterParams::create() +{ + return new FastModel::CortexA76Cluster(*this); +} + +FastModel::ScxEvsCortexA76x1 * +FastModelScxEvsCortexA76x1Params::create() { - return new FastModel::CortexA76x1(name.c_str(), *this); + return new FastModel::ScxEvsCortexA76x1(name.c_str(), *this); } diff --git a/src/arch/arm/fastmodel/CortexA76x1/cortex_a76x1.hh b/src/arch/arm/fastmodel/CortexA76x1/cortex_a76x1.hh index c4ea98317..096991ad8 100644 --- a/src/arch/arm/fastmodel/CortexA76x1/cortex_a76x1.hh +++ b/src/arch/arm/fastmodel/CortexA76x1/cortex_a76x1.hh @@ -31,10 +31,13 @@ #define __ARCH_ARM_FASTMODEL_CORTEXA76X1_CORETEX_A76X1_HH__ #include "arch/arm/fastmodel/amba_ports.hh" +#include "arch/arm/fastmodel/arm/cpu.hh" #include "arch/arm/fastmodel/common/signal_receiver.hh" #include "arch/arm/fastmodel/protocol/exported_clock_rate_control.hh" #include "mem/port_proxy.hh" -#include "params/FastModelCortexA76x1.hh" +#include "params/FastModelCortexA76.hh" +#include "params/FastModelCortexA76Cluster.hh" +#include "params/FastModelScxEvsCortexA76x1.hh" #include "scx_evs_CortexA76x1.h" #include "systemc/ext/core/sc_event.hh" #include "systemc/ext/core/sc_module.hh" @@ -49,10 +52,64 @@ namespace FastModel // ports and interface with its peer gem5 CPU. The gem5 CPU inherits from the // gem5 BaseCPU class and implements its API, while this class actually does // the work. -class CortexA76x1 : public scx_evs_CortexA76x1 +class CortexA76Cluster; + +class CortexA76 : public ArmCPU +{ + protected: + typedef FastModelCortexA76Params Params; + const Params &_params; + + CortexA76Cluster *cluster = nullptr; + int num = 0; + + const Params ¶ms() { return _params; } + + public: + CortexA76(Params &p) : ArmCPU(&p), _params(p) {} + + template <class T> + void set_evs_param(const std::string &n, T val); + + void setCluster(CortexA76Cluster *_cluster, int _num); + + Port &getPort(const std::string &if_name, + PortID idx=InvalidPortID) override; +}; + +class CortexA76Cluster : public SimObject { private: - SC_HAS_PROCESS(CortexA76x1); + typedef FastModelCortexA76ClusterParams Params; + const Params &_params; + + std::vector<CortexA76 *> cores; + sc_core::sc_module *evs; + + public: + template <class T> + void + set_evs_param(const std::string &n, T val) + { + scx::scx_set_parameter(evs->name() + std::string(".") + n, val); + } + + CortexA76 *getCore(int num) { return cores.at(num); } + sc_core::sc_module *getEvs() { return evs; } + + CortexA76Cluster(Params &p); + const Params ¶ms() { return _params; } + + Port &getPort(const std::string &if_name, + PortID idx=InvalidPortID) override; +}; + +class ScxEvsCortexA76x1 : public scx_evs_CortexA76x1 +{ + private: + SC_HAS_PROCESS(ScxEvsCortexA76x1); + + ClockRateControlInitiatorSocket clockRateControl; typedef sc_gem5::TlmTargetBaseWrapper< 64, svp_gicv3_comms::gicv3_comms_fw_if, @@ -60,9 +117,7 @@ class CortexA76x1 : public scx_evs_CortexA76x1 sc_core::SC_ONE_OR_MORE_BOUND> TlmGicTarget; AmbaInitiator amba; - TlmGicTarget redistributor; - - ClockRateControlInitiatorSocket clockRateControl; + std::vector<TlmGicTarget *> redist; SignalReceiver cnthpirq; SignalReceiver cnthvirq; @@ -76,22 +131,22 @@ class CortexA76x1 : public scx_evs_CortexA76x1 sc_core::sc_event clockChanged; sc_core::sc_attribute<Tick> clockPeriod; - sc_core::sc_attribute<::BaseCPU *> gem5Cpu; + sc_core::sc_attribute<CortexA76Cluster *> gem5CpuCluster; sc_core::sc_attribute<PortProxy::SendFunctionalFunc> sendFunctional; void sendFunc(PacketPtr pkt); void clockChangeHandler(); - const FastModelCortexA76x1Params ¶ms; + typedef FastModelScxEvsCortexA76x1Params Params; + const Params ¶ms; public: - CortexA76x1(const sc_core::sc_module_name &mod_name, - const FastModelCortexA76x1Params &p); - - Port &gem5_getPort(const std::string &if_name, int idx=-1) override; + ScxEvsCortexA76x1( + const sc_core::sc_module_name &mod_name, const Params &p); void before_end_of_elaboration() override; + Port &gem5_getPort(const std::string &if_name, int idx) override; void end_of_elaboration() override @@ -102,6 +157,14 @@ class CortexA76x1 : public scx_evs_CortexA76x1 void start_of_simulation() override {} }; +template <class T> +inline void +CortexA76::set_evs_param(const std::string &n, T val) +{ + for (auto &path: params().thread_paths) + cluster->set_evs_param(path + "." + n, val); +} + } // namespace FastModel #endif // __ARCH_ARM_FASTMODEL_CORTEXA76X1_CORETEX_A76X1_HH__ diff --git a/src/arch/arm/fastmodel/iris/Iris.py b/src/arch/arm/fastmodel/iris/Iris.py index 781a146ad..b6dbf8726 100644 --- a/src/arch/arm/fastmodel/iris/Iris.py +++ b/src/arch/arm/fastmodel/iris/Iris.py @@ -49,7 +49,7 @@ class IrisBaseCPU(BaseCPU): #TODO Make this work. return False - evs = Param.SystemC_ScModule(Parent.any, + evs = Param.SystemC_ScModule( "Fast model exported virtual subsystem holding cores") - core_paths = VectorParam.String( - "Sub-paths to elements in the EVS which are cores") + thread_paths = VectorParam.String( + "Sub-paths to elements in the EVS which support a thread context") diff --git a/src/arch/arm/fastmodel/iris/cpu.cc b/src/arch/arm/fastmodel/iris/cpu.cc index 63cd731dc..8284d1717 100644 --- a/src/arch/arm/fastmodel/iris/cpu.cc +++ b/src/arch/arm/fastmodel/iris/cpu.cc @@ -40,15 +40,6 @@ BaseCPU::BaseCPU(BaseCPUParams *params, sc_core::sc_module *_evs) : { sc_core::sc_attr_base *base; - base = evs->get_attribute(Gem5CpuAttributeName); - auto *gem5_cpu_attr = - dynamic_cast<sc_core::sc_attribute<::BaseCPU *> *>(base); - panic_if(base && !gem5_cpu_attr, - "The EVS gem5 CPU attribute was not of type " - "sc_attribute<::BaesCPU *>."); - if (gem5_cpu_attr) - gem5_cpu_attr->value = this; - const auto &event_vec = evs->get_child_events(); auto event_it = std::find_if(event_vec.begin(), event_vec.end(), [](const sc_core::sc_event *e) -> bool { diff --git a/src/arch/arm/fastmodel/iris/cpu.hh b/src/arch/arm/fastmodel/iris/cpu.hh index f7be5cb76..911743b74 100644 --- a/src/arch/arm/fastmodel/iris/cpu.hh +++ b/src/arch/arm/fastmodel/iris/cpu.hh @@ -48,7 +48,7 @@ static const std::string ClockEventName = "gem5_clock_period_event"; static const std::string PeriodAttributeName = "gem5_clock_period_attribute"; // The name of the attribute the subsystem should create which will be set to // a pointer to its corresponding gem5 CPU. -static const std::string Gem5CpuAttributeName = "gem5_cpu"; +static const std::string Gem5CpuClusterAttributeName = "gem5_cpu_cluster"; // The name of the attribute the subsystem should create to hold the // sendFunctional delegate for port proxies. static const std::string SendFunctionalAttributeName = "gem5_send_functional"; @@ -133,7 +133,7 @@ class CPU : public Iris::BaseCPU System *sys = params->system; int thread_id = 0; - for (const std::string &sub_path: params->core_paths) { + for (const std::string &sub_path: params->thread_paths) { std::string path = parent_path + "." + sub_path; auto *tc = new TC(this, thread_id++, sys, iris_if, path); threadContexts.push_back(tc); |