diff options
Diffstat (limited to 'src/python')
-rw-r--r-- | src/python/SConscript | 21 | ||||
-rw-r--r-- | src/python/m5/__init__.py | 131 | ||||
-rw-r--r-- | src/python/m5/attrdict.py | 61 | ||||
-rw-r--r-- | src/python/m5/config.py | 542 | ||||
-rw-r--r-- | src/python/m5/main.py | 321 | ||||
-rw-r--r-- | src/python/m5/objects/BaseCPU.py | 2 | ||||
-rw-r--r-- | src/python/m5/objects/BaseCache.py | 12 | ||||
-rw-r--r-- | src/python/m5/objects/Bridge.py | 2 | ||||
-rw-r--r-- | src/python/m5/objects/Bus.py | 2 | ||||
-rw-r--r-- | src/python/m5/objects/Device.py | 2 | ||||
-rw-r--r-- | src/python/m5/objects/FuncUnit.py | 17 | ||||
-rw-r--r-- | src/python/m5/objects/O3CPU.py (renamed from src/python/m5/objects/AlphaFullCPU.py) | 17 | ||||
-rw-r--r-- | src/python/m5/objects/OzoneCPU.py | 6 | ||||
-rw-r--r-- | src/python/m5/objects/Pci.py | 10 | ||||
-rw-r--r-- | src/python/m5/objects/PhysicalMemory.py | 1 | ||||
-rw-r--r-- | src/python/m5/objects/System.py | 3 |
16 files changed, 876 insertions, 274 deletions
diff --git a/src/python/SConscript b/src/python/SConscript index 7b0f591eb..c9e713199 100644 --- a/src/python/SConscript +++ b/src/python/SConscript @@ -75,24 +75,35 @@ def addPkg(pkgdir): # build_env flags. def MakeDefinesPyFile(target, source, env): f = file(str(target[0]), 'w') - print >>f, "m5_build_env = ", - print >>f, source[0] + print >>f, "m5_build_env = ", source[0] f.close() optionDict = dict([(opt, env[opt]) for opt in env.ExportOptions]) env.Command('m5/defines.py', Value(optionDict), MakeDefinesPyFile) +def MakeInfoPyFile(target, source, env): + f = file(str(target[0]), 'w') + for src in source: + data = ''.join(file(src.srcnode().abspath, 'r').xreadlines()) + print >>f, "%s = %s" % (src, repr(data)) + f.close() + +env.Command('m5/info.py', + [ '#/AUTHORS', '#/LICENSE', '#/README', '#/RELEASE_NOTES' ], + MakeInfoPyFile) + # Now specify the packages & files for the zip archive. addPkg('m5') pyzip_files.append('m5/defines.py') +pyzip_files.append('m5/info.py') pyzip_files.append(join(env['ROOT'], 'util/pbs/jobfile.py')) -env.Command(['swig/main_wrap.cc', 'm5/main.py'], - 'swig/main.i', +env.Command(['swig/cc_main_wrap.cc', 'm5/cc_main.py'], + 'swig/cc_main.i', '$SWIG $SWIGFLAGS -outdir ${TARGETS[1].dir} ' '-o ${TARGETS[0]} $SOURCES') -pyzip_dep_files.append('m5/main.py') +pyzip_dep_files.append('m5/cc_main.py') # Action function to build the zip archive. Uses the PyZipFile module # included in the standard Python library. diff --git a/src/python/m5/__init__.py b/src/python/m5/__init__.py index 60a61d66e..3d0e3defa 100644 --- a/src/python/m5/__init__.py +++ b/src/python/m5/__init__.py @@ -27,14 +27,14 @@ # Authors: Nathan Binkert # Steve Reinhardt -import sys, os, time, atexit, optparse +import atexit, os, sys # import the SWIG-wrapped main C++ functions -import main +import cc_main # import a few SWIG-wrapped items (those that are likely to be used # directly by user scripts) completely into this module for # convenience -from main import simulate, SimLoopExitEvent +from cc_main import simulate, SimLoopExitEvent # import the m5 compile options import defines @@ -57,20 +57,6 @@ def AddToPath(path): # so place the new dir right after that. sys.path.insert(1, path) - -# Callback to set trace flags. Not necessarily the best way to do -# things in the long run (particularly if we change how these global -# options are handled). -def setTraceFlags(option, opt_str, value, parser): - objects.Trace.flags = value - -# Standard optparse options. Need to be explicitly included by the -# user script when it calls optparse.OptionParser(). -standardOptions = [ - optparse.make_option("--traceflags", type="string", action="callback", - callback=setTraceFlags) - ] - # make a SmartDict out of the build options for our local use import smartdict build_env = smartdict.SmartDict() @@ -80,16 +66,26 @@ build_env.update(defines.m5_build_env) env = smartdict.SmartDict() env.update(os.environ) +# Function to provide to C++ so it can look up instances based on paths +def resolveSimObject(name): + obj = config.instanceDict[name] + return obj.getCCObject() + +from main import options, arguments, main + # The final hook to generate .ini files. Called from the user script # once the config is built. def instantiate(root): config.ticks_per_sec = float(root.clock.frequency) # ugly temporary hack to get output to config.ini - sys.stdout = file('config.ini', 'w') + sys.stdout = file(os.path.join(options.outdir, 'config.ini'), 'w') root.print_ini() sys.stdout.close() # close config.ini sys.stdout = sys.__stdout__ # restore to original - main.initialize() # load config.ini into C++ and process it + cc_main.loadIniFile(resolveSimObject) # load config.ini into C++ + root.createCCObject() + root.connectPorts() + cc_main.finalInit() noDot = True # temporary until we fix dot if not noDot: dot = pydot.Dot() @@ -103,12 +99,105 @@ def instantiate(root): # Export curTick to user script. def curTick(): - return main.cvar.curTick + return cc_main.cvar.curTick # register our C++ exit callback function with Python -atexit.register(main.doExitCleanup) +atexit.register(cc_main.doExitCleanup) # This import allows user scripts to reference 'm5.objects.Foo' after # just doing an 'import m5' (without an 'import m5.objects'). May not # matter since most scripts will probably 'from m5.objects import *'. import objects + +# This loops until all objects have been fully drained. +def doDrain(root): + all_drained = drain(root) + while (not all_drained): + all_drained = drain(root) + +# Tries to drain all objects. Draining might not be completed unless +# all objects return that they are drained on the first call. This is +# because as objects drain they may cause other objects to no longer +# be drained. +def drain(root): + all_drained = False + drain_event = cc_main.createCountedDrain() + unready_objects = root.startDrain(drain_event, True) + # If we've got some objects that can't drain immediately, then simulate + if unready_objects > 0: + drain_event.setCount(unready_objects) + simulate() + else: + all_drained = True + cc_main.cleanupCountedDrain(drain_event) + return all_drained + +def resume(root): + root.resume() + +def checkpoint(root, dir): + if not isinstance(root, objects.Root): + raise TypeError, "Object is not a root object. Checkpoint must be called on a root object." + doDrain(root) + print "Writing checkpoint" + cc_main.serializeAll(dir) + resume(root) + +def restoreCheckpoint(root, dir): + print "Restoring from checkpoint" + cc_main.unserializeAll(dir) + resume(root) + +def changeToAtomic(system): + if not isinstance(system, objects.Root) and not isinstance(system, System): + raise TypeError, "Object is not a root or system object. Checkpoint must be " + "called on a root object." + doDrain(system) + print "Changing memory mode to atomic" + system.changeTiming(cc_main.SimObject.Atomic) + resume(system) + +def changeToTiming(system): + if not isinstance(system, objects.Root) and not isinstance(system, System): + raise TypeError, "Object is not a root or system object. Checkpoint must be " + "called on a root object." + doDrain(system) + print "Changing memory mode to timing" + system.changeTiming(cc_main.SimObject.Timing) + resume(system) + +def switchCpus(cpuList): + if not isinstance(cpuList, list): + raise RuntimeError, "Must pass a list to this function" + for i in cpuList: + if not isinstance(i, tuple): + raise RuntimeError, "List must have tuples of (oldCPU,newCPU)" + + [old_cpus, new_cpus] = zip(*cpuList) + + for cpu in old_cpus: + if not isinstance(cpu, objects.BaseCPU): + raise TypeError, "%s is not of type BaseCPU", cpu + for cpu in new_cpus: + if not isinstance(cpu, objects.BaseCPU): + raise TypeError, "%s is not of type BaseCPU", cpu + + # Drain all of the individual CPUs + drain_event = cc_main.createCountedDrain() + unready_cpus = 0 + for old_cpu in old_cpus: + unready_cpus += old_cpu.startDrain(drain_event, False) + # If we've got some objects that can't drain immediately, then simulate + if unready_cpus > 0: + drain_event.setCount(unready_cpus) + simulate() + cc_main.cleanupCountedDrain(drain_event) + # Now all of the CPUs are ready to be switched out + for old_cpu in old_cpus: + old_cpu._ccObject.switchOut() + index = 0 + print "Switching CPUs" + for new_cpu in new_cpus: + new_cpu.takeOverFrom(old_cpus[index]) + new_cpu._ccObject.resume() + index += 1 diff --git a/src/python/m5/attrdict.py b/src/python/m5/attrdict.py new file mode 100644 index 000000000..4ee7f1b8c --- /dev/null +++ b/src/python/m5/attrdict.py @@ -0,0 +1,61 @@ +# Copyright (c) 2006 The Regents of The University of Michigan +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Authors: Nathan Binkert + +__all__ = [ 'attrdict' ] + +class attrdict(dict): + def __getattr__(self, attr): + if attr in self: + return self.__getitem__(attr) + return super(attrdict, self).__getattribute__(attr) + + def __setattr__(self, attr, value): + if attr in dir(self): + return super(attrdict, self).__setattr__(attr, value) + return self.__setitem__(attr, value) + + def __delattr__(self, attr): + if attr in self: + return self.__delitem__(attr) + return super(attrdict, self).__delattr__(attr, value) + +if __name__ == '__main__': + x = attrdict() + x.y = 1 + x['z'] = 2 + print x['y'], x.y + print x['z'], x.z + print dir(x) + print x + + print + + del x['y'] + del x.z + print dir(x) + print(x) diff --git a/src/python/m5/config.py b/src/python/m5/config.py index 97e13c900..8eed28dcc 100644 --- a/src/python/m5/config.py +++ b/src/python/m5/config.py @@ -1,4 +1,4 @@ -# Copyright (c) 2004-2005 The Regents of The University of Michigan +# Copyright (c) 2004-2006 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -27,10 +27,10 @@ # Authors: Steve Reinhardt # Nathan Binkert -import os, re, sys, types, inspect +import os, re, sys, types, inspect, copy import m5 -from m5 import panic +from m5 import panic, cc_main from convert import * from multidict import multidict @@ -84,65 +84,22 @@ class Singleton(type): # # Once a set of Python objects have been instantiated in a hierarchy, # calling 'instantiate(obj)' (where obj is the root of the hierarchy) -# will generate a .ini file. See simple-4cpu.py for an example -# (corresponding to m5-test/simple-4cpu.ini). +# will generate a .ini file. # ##################################################################### -##################################################################### -# -# ConfigNode/SimObject classes -# -# The Python class hierarchy rooted by ConfigNode (which is the base -# class of SimObject, which in turn is the base class of all other M5 -# SimObject classes) has special attribute behavior. In general, an -# object in this hierarchy has three categories of attribute-like -# things: -# -# 1. Regular Python methods and variables. These must start with an -# underscore to be treated normally. -# -# 2. SimObject parameters. These values are stored as normal Python -# attributes, but all assignments to these attributes are checked -# against the pre-defined set of parameters stored in the class's -# _params dictionary. Assignments to attributes that do not -# correspond to predefined parameters, or that are not of the correct -# type, incur runtime errors. +# dict to look up SimObjects based on path +instanceDict = {} + +############################# # -# 3. Hierarchy children. The child nodes of a ConfigNode are stored -# in the node's _children dictionary, but can be accessed using the -# Python attribute dot-notation (just as they are printed out by the -# simulator). Children cannot be created using attribute assigment; -# they must be added by specifying the parent node in the child's -# constructor or using the '+=' operator. - -# The SimObject parameters are the most complex, for a few reasons. -# First, both parameter descriptions and parameter values are -# inherited. Thus parameter description lookup must go up the -# inheritance chain like normal attribute lookup, but this behavior -# must be explicitly coded since the lookup occurs in each class's -# _params attribute. Second, because parameter values can be set -# on SimObject classes (to implement default values), the parameter -# checking behavior must be enforced on class attribute assignments as -# well as instance attribute assignments. Finally, because we allow -# class specialization via inheritance (e.g., see the L1Cache class in -# the simple-4cpu.py example), we must do parameter checking even on -# class instantiation. To provide all these features, we use a -# metaclass to define most of the SimObject parameter behavior for -# this class hierarchy. +# Utility methods # -##################################################################### +############################# def isSimObject(value): return isinstance(value, SimObject) -def isSimObjectClass(value): - try: - return issubclass(value, SimObject) - except TypeError: - # happens if value is not a class at all - return False - def isSimObjectSequence(value): if not isinstance(value, (list, tuple)) or len(value) == 0: return False @@ -153,22 +110,9 @@ def isSimObjectSequence(value): return True -def isSimObjectClassSequence(value): - if not isinstance(value, (list, tuple)) or len(value) == 0: - return False - - for val in value: - if not isNullPointer(val) and not isSimObjectClass(val): - return False - - return True - def isSimObjectOrSequence(value): return isSimObject(value) or isSimObjectSequence(value) -def isSimObjectClassOrSequence(value): - return isSimObjectClass(value) or isSimObjectClassSequence(value) - def isNullPointer(value): return isinstance(value, NullSimObject) @@ -188,40 +132,36 @@ def applyOrMap(objOrSeq, meth, *args, **kwargs): return [applyMethod(o, meth, *args, **kwargs) for o in objOrSeq] -# The metaclass for ConfigNode (and thus for everything that derives -# from ConfigNode, including SimObject). This class controls how new -# classes that derive from ConfigNode are instantiated, and provides -# inherited class behavior (just like a class controls how instances -# of that class are instantiated, and provides inherited instance -# behavior). +# The metaclass for SimObject. This class controls how new classes +# that derive from SimObject are instantiated, and provides inherited +# class behavior (just like a class controls how instances of that +# class are instantiated, and provides inherited instance behavior). class MetaSimObject(type): # Attributes that can be set only at initialization time init_keywords = { 'abstract' : types.BooleanType, 'type' : types.StringType } # Attributes that can be set any time - keywords = { 'check' : types.FunctionType, - 'children' : types.ListType } + keywords = { 'check' : types.FunctionType } # __new__ is called before __init__, and is where the statements # in the body of the class definition get loaded into the class's - # __dict__. We intercept this to filter out parameter assignments + # __dict__. We intercept this to filter out parameter & port assignments # and only allow "private" attributes to be passed to the base # __new__ (starting with underscore). def __new__(mcls, name, bases, dict): - if dict.has_key('_init_dict'): - # must have been called from makeSubclass() rather than - # via Python class declaration; bypass filtering process. - cls_dict = dict - else: - # Copy "private" attributes (including special methods - # such as __new__) to the official dict. Everything else - # goes in _init_dict to be filtered in __init__. - cls_dict = {} - for key,val in dict.items(): - if key.startswith('_'): - cls_dict[key] = val - del dict[key] - cls_dict['_init_dict'] = dict + # Copy "private" attributes, functions, and classes to the + # official dict. Everything else goes in _init_dict to be + # filtered in __init__. + cls_dict = {} + value_dict = {} + for key,val in dict.items(): + if key.startswith('_') or isinstance(val, (types.FunctionType, + types.TypeType)): + cls_dict[key] = val + else: + # must be a param/port setting + value_dict[key] = val + cls_dict['_value_dict'] = value_dict return super(MetaSimObject, mcls).__new__(mcls, name, bases, cls_dict) # subclass initialization @@ -231,10 +171,15 @@ class MetaSimObject(type): super(MetaSimObject, cls).__init__(name, bases, dict) # initialize required attributes - cls._params = multidict() - cls._values = multidict() - cls._instantiated = False # really instantiated or subclassed - cls._anon_subclass_counter = 0 + + # class-only attributes + cls._params = multidict() # param descriptions + cls._ports = multidict() # port descriptions + + # class or instance attributes + cls._values = multidict() # param values + cls._port_map = multidict() # port bindings + cls._instantiated = False # really instantiated, cloned, or subclassed # We don't support multiple inheritance. If you want to, you # must fix multidict to deal with it properly. @@ -243,22 +188,34 @@ class MetaSimObject(type): base = bases[0] - # the only time the following is not true is when we define - # the SimObject class itself + # Set up general inheritance via multidicts. A subclass will + # inherit all its settings from the base class. The only time + # the following is not true is when we define the SimObject + # class itself (in which case the multidicts have no parent). if isinstance(base, MetaSimObject): cls._params.parent = base._params + cls._ports.parent = base._ports cls._values.parent = base._values + cls._port_map.parent = base._port_map + # mark base as having been subclassed base._instantiated = True - # now process the _init_dict items - for key,val in cls._init_dict.items(): - if isinstance(val, (types.FunctionType, types.TypeType)): - type.__setattr__(cls, key, val) - + # Now process the _value_dict items. They could be defining + # new (or overriding existing) parameters or ports, setting + # class keywords (e.g., 'abstract'), or setting parameter + # values or port bindings. The first 3 can only be set when + # the class is defined, so we handle them here. The others + # can be set later too, so just emulate that by calling + # setattr(). + for key,val in cls._value_dict.items(): # param descriptions - elif isinstance(val, ParamDesc): + if isinstance(val, ParamDesc): cls._new_param(key, val) + # port objects + elif isinstance(val, Port): + cls._ports[key] = val + # init-time-only keywords elif cls.init_keywords.has_key(key): cls._set_keyword(key, val, cls.init_keywords[key]) @@ -267,27 +224,6 @@ class MetaSimObject(type): else: setattr(cls, key, val) - # Pull the deep-copy memoization dict out of the class dict if - # it's there... - memo = cls.__dict__.get('_memo', {}) - - # Handle SimObject values - for key,val in cls._values.iteritems(): - # SimObject instances need to be promoted to classes. - # Existing classes should not have any instance values, so - # these can only occur at the lowest level dict (the - # parameters just being set in this class definition). - if isSimObjectOrSequence(val): - assert(val == cls._values.local[key]) - cls._values[key] = applyOrMap(val, 'makeClass', memo) - # SimObject classes need to be subclassed so that - # parameters that get set at this level only affect this - # level and derivatives. - elif isSimObjectClassOrSequence(val): - assert(not cls._values.local.has_key(key)) - cls._values[key] = applyOrMap(val, 'makeSubclass', {}, memo) - - def _set_keyword(cls, keyword, val, kwtype): if not isinstance(val, kwtype): raise TypeError, 'keyword %s has bad type %s (expecting %s)' % \ @@ -313,15 +249,19 @@ class MetaSimObject(type): cls._set_keyword(attr, value, cls.keywords[attr]) return - # must be SimObject param - param = cls._params.get(attr, None) - if param: - # It's ok: set attribute by delegating to 'object' class. - if isSimObjectOrSequence(value) and cls._instantiated: - raise AttributeError, \ - "Cannot set SimObject parameter '%s' after\n" \ + if cls._ports.has_key(attr): + self._ports[attr].connect(self, attr, value) + return + + if isSimObjectOrSequence(value) and cls._instantiated: + raise RuntimeError, \ + "cannot set SimObject parameter '%s' after\n" \ " class %s has been instantiated or subclassed" \ % (attr, cls.__name__) + + # check for param + param = cls._params.get(attr, None) + if param: try: cls._values[attr] = param.convert(value) except Exception, e: @@ -329,12 +269,12 @@ class MetaSimObject(type): (e, cls.__name__, attr, value) e.args = (msg, ) raise - # I would love to get rid of this elif isSimObjectOrSequence(value): - cls._values[attr] = value + # if RHS is a SimObject, it's an implicit child assignment + cls._values[attr] = value else: raise AttributeError, \ - "Class %s has no parameter %s" % (cls.__name__, attr) + "Class %s has no parameter \'%s\'" % (cls.__name__, attr) def __getattr__(cls, attr): if cls._values.has_key(attr): @@ -343,23 +283,7 @@ class MetaSimObject(type): raise AttributeError, \ "object '%s' has no attribute '%s'" % (cls.__name__, attr) - # Create a subclass of this class. Basically a function interface - # to the standard Python class definition mechanism, primarily for - # internal use. 'memo' dict param supports "deep copy" (really - # "deep subclass") operations... within a given operation, - # multiple references to a class should result in a single - # subclass object with multiple references to it (as opposed to - # mutiple unique subclasses). - def makeSubclass(cls, init_dict, memo = {}): - subcls = memo.get(cls) - if not subcls: - name = cls.__name__ + '_' + str(cls._anon_subclass_counter) - cls._anon_subclass_counter += 1 - subcls = MetaSimObject(name, (cls,), - { '_init_dict': init_dict, '_memo': memo }) - return subcls - -# The ConfigNode class is the root of the special hierarchy. Most of +# The SimObject class is the root of the special hierarchy. Most of # the code in this class deals with the configuration hierarchy itself # (parent/child node relationships). class SimObject(object): @@ -367,82 +291,79 @@ class SimObject(object): # get this metaclass. __metaclass__ = MetaSimObject - # __new__ operator allocates new instances of the class. We - # override it here just to support "deep instantiation" operation - # via the _memo dict. When recursively instantiating an object - # hierarchy we want to make sure that each class is instantiated - # only once, and that if there are multiple references to the same - # original class, we end up with the corresponding instantiated - # references all pointing to the same instance. - def __new__(cls, _memo = None, **kwargs): - if _memo is not None and _memo.has_key(cls): - # return previously instantiated object - assert(len(kwargs) == 0) - return _memo[cls] - else: - # Need a new one... if it needs to be memoized, this will - # happen in __init__. We defer the insertion until then - # so __init__ can use the memo dict to tell whether or not - # to perform the initialization. - return super(SimObject, cls).__new__(cls, **kwargs) - - # Initialize new instance previously allocated by __new__. For - # objects with SimObject-valued params, we need to recursively - # instantiate the classes represented by those param values as - # well (in a consistent "deep copy"-style fashion; see comment - # above). - def __init__(self, _memo = None, **kwargs): - if _memo is not None: - # We're inside a "deep instantiation" - assert(isinstance(_memo, dict)) - assert(len(kwargs) == 0) - if _memo.has_key(self.__class__): - # __new__ returned an existing, already initialized - # instance, so there's nothing to do here - assert(_memo[self.__class__] == self) - return - # no pre-existing object, so remember this one here - _memo[self.__class__] = self - else: - # This is a new top-level instantiation... don't memoize - # this objcet, but prepare to memoize any recursively - # instantiated objects. - _memo = {} - - self.__class__._instantiated = True + # Initialize new instance. For objects with SimObject-valued + # children, we need to recursively clone the classes represented + # by those param values as well in a consistent "deep copy"-style + # fashion. That is, we want to make sure that each instance is + # cloned only once, and that if there are multiple references to + # the same original object, we end up with the corresponding + # cloned references all pointing to the same cloned instance. + def __init__(self, **kwargs): + ancestor = kwargs.get('_ancestor') + memo_dict = kwargs.get('_memo') + if memo_dict is None: + # prepare to memoize any recursively instantiated objects + memo_dict = {} + elif ancestor: + # memoize me now to avoid problems with recursive calls + memo_dict[ancestor] = self + + if not ancestor: + ancestor = self.__class__ + ancestor._instantiated = True + # initialize required attributes + self._parent = None self._children = {} + self._ccObject = None # pointer to C++ object + self._instantiated = False # really "cloned" + # Inherit parameter values from class using multidict so # individual value settings can be overridden. - self._values = multidict(self.__class__._values) - # For SimObject-valued parameters, the class should have - # classes (not instances) for the values. We need to - # instantiate these classes rather than just inheriting the - # class object. - for key,val in self.__class__._values.iteritems(): - if isSimObjectClass(val): - setattr(self, key, val(_memo)) - elif isSimObjectClassSequence(val) and len(val): - setattr(self, key, [ v(_memo) for v in val ]) + self._values = multidict(ancestor._values) + # clone SimObject-valued parameters + for key,val in ancestor._values.iteritems(): + if isSimObject(val): + setattr(self, key, val(_memo=memo_dict)) + elif isSimObjectSequence(val) and len(val): + setattr(self, key, [ v(_memo=memo_dict) for v in val ]) + # clone port references. no need to use a multidict here + # since we will be creating new references for all ports. + self._port_map = {} + for key,val in ancestor._port_map.iteritems(): + self._port_map[key] = applyOrMap(val, 'clone', memo_dict) # apply attribute assignments from keyword args, if any for key,val in kwargs.iteritems(): setattr(self, key, val) - # Use this instance as a template to create a new class. - def makeClass(self, memo = {}): - cls = memo.get(self) - if not cls: - cls = self.__class__.makeSubclass(self._values.local) - memo[self] = cls - return cls - - # Direct instantiation of instances (cloning) is no longer - # allowed; must generate class from instance first. + # "Clone" the current instance by creating another instance of + # this instance's class, but that inherits its parameter values + # and port mappings from the current instance. If we're in a + # "deep copy" recursive clone, check the _memo dict to see if + # we've already cloned this instance. def __call__(self, **kwargs): - raise TypeError, "cannot instantiate SimObject; "\ - "use makeClass() to make class first" + memo_dict = kwargs.get('_memo') + if memo_dict is None: + # no memo_dict: must be top-level clone operation. + # this is only allowed at the root of a hierarchy + if self._parent: + raise RuntimeError, "attempt to clone object %s " \ + "not at the root of a tree (parent = %s)" \ + % (self, self._parent) + # create a new dict and use that. + memo_dict = {} + kwargs['_memo'] = memo_dict + elif memo_dict.has_key(self): + # clone already done & memoized + return memo_dict[self] + return self.__class__(_ancestor = self, **kwargs) def __getattr__(self, attr): + if self._ports.has_key(attr): + # return reference that can be assigned to another port + # via __setattr__ + return self._ports[attr].makeRef(self, attr) + if self._values.has_key(attr): return self._values[attr] @@ -457,10 +378,19 @@ class SimObject(object): object.__setattr__(self, attr, value) return + if self._ports.has_key(attr): + # set up port connection + self._ports[attr].connect(self, attr, value) + return + + if isSimObjectOrSequence(value) and self._instantiated: + raise RuntimeError, \ + "cannot set SimObject parameter '%s' after\n" \ + " instance been cloned %s" % (attr, `self`) + # must be SimObject param param = self._params.get(attr, None) if param: - # It's ok: set attribute by delegating to 'object' class. try: value = param.convert(value) except Exception, e: @@ -468,7 +398,6 @@ class SimObject(object): (e, self.__class__.__name__, attr, value) e.args = (msg, ) raise - # I would love to get rid of this elif isSimObjectOrSequence(value): pass else: @@ -507,13 +436,13 @@ class SimObject(object): self._children[name] = value def set_path(self, parent, name): - if not hasattr(self, '_parent'): + if not self._parent: self._parent = parent self._name = name parent.add_child(name, self) def path(self): - if not hasattr(self, '_parent'): + if not self._parent: return 'root' ppath = self._parent.path() if ppath == 'root': @@ -554,6 +483,8 @@ class SimObject(object): def print_ini(self): print '[' + self.path() + ']' # .ini section header + instanceDict[self.path()] = self + if hasattr(self, 'type') and not isinstance(self, ParamContext): print 'type=%s' % self.type @@ -585,6 +516,59 @@ class SimObject(object): for child in child_names: self._children[child].print_ini() + # Call C++ to create C++ object corresponding to this object and + # (recursively) all its children + def createCCObject(self): + self.getCCObject() # force creation + for child in self._children.itervalues(): + child.createCCObject() + + # Get C++ object corresponding to this object, calling C++ if + # necessary to construct it. Does *not* recursively create + # children. + def getCCObject(self): + if not self._ccObject: + self._ccObject = -1 # flag to catch cycles in recursion + self._ccObject = cc_main.createSimObject(self.path()) + elif self._ccObject == -1: + raise RuntimeError, "%s: recursive call to getCCObject()" \ + % self.path() + return self._ccObject + + # Create C++ port connections corresponding to the connections in + # _port_map (& recursively for all children) + def connectPorts(self): + for portRef in self._port_map.itervalues(): + applyOrMap(portRef, 'ccConnect') + for child in self._children.itervalues(): + child.connectPorts() + + def startDrain(self, drain_event, recursive): + count = 0 + # ParamContexts don't serialize + if isinstance(self, SimObject) and not isinstance(self, ParamContext): + count += self._ccObject.drain(drain_event) + if recursive: + for child in self._children.itervalues(): + count += child.startDrain(drain_event, True) + return count + + def resume(self): + if isinstance(self, SimObject) and not isinstance(self, ParamContext): + self._ccObject.resume() + for child in self._children.itervalues(): + child.resume() + + def changeTiming(self, mode): + if isinstance(self, System): + self._ccObject.setMemoryMode(mode) + for child in self._children.itervalues(): + child.changeTiming(mode) + + def takeOverFrom(self, old_cpu): + cpu_ptr = cc_main.convertToBaseCPUPtr(old_cpu._ccObject) + self._ccObject.takeOverFrom(cpu_ptr) + # generate output file for 'dot' to display as a pretty graph. # this code is currently broken. def outputDot(self, dot): @@ -675,9 +659,9 @@ class BaseProxy(object): if self._search_up: while not done: - try: obj = obj._parent - except: break - + obj = obj._parent + if not obj: + break result, done = self.find(obj) if not done: @@ -793,16 +777,16 @@ Self = ProxyFactory(search_self = True, search_up = False) # # Parameter description classes # -# The _params dictionary in each class maps parameter names to -# either a Param or a VectorParam object. These objects contain the +# The _params dictionary in each class maps parameter names to either +# a Param or a VectorParam object. These objects contain the # parameter description string, the parameter type, and the default -# value (loaded from the PARAM section of the .odesc files). The -# _convert() method on these objects is used to force whatever value -# is assigned to the parameter to the appropriate type. +# value (if any). The convert() method on these objects is used to +# force whatever value is assigned to the parameter to the appropriate +# type. # # Note that the default values are loaded into the class's attribute # space when the parameter dictionary is initialized (in -# MetaConfigNode._setparams()); after that point they aren't used. +# MetaSimObject._new_param()); after that point they aren't used. # ##################################################################### @@ -1419,6 +1403,107 @@ MaxAddr = Addr.max MaxTick = Tick.max AllMemory = AddrRange(0, MaxAddr) + +##################################################################### +# +# Port objects +# +# Ports are used to interconnect objects in the memory system. +# +##################################################################### + +# Port reference: encapsulates a reference to a particular port on a +# particular SimObject. +class PortRef(object): + def __init__(self, simobj, name, isVec): + assert(isSimObject(simobj)) + self.simobj = simobj + self.name = name + self.index = -1 + self.isVec = isVec # is this a vector port? + self.peer = None # not associated with another port yet + self.ccConnected = False # C++ port connection done? + + # Set peer port reference. Called via __setattr__ as a result of + # a port assignment, e.g., "obj1.port1 = obj2.port2". + def setPeer(self, other): + if self.isVec: + curMap = self.simobj._port_map.get(self.name, []) + self.index = len(curMap) + curMap.append(other) + else: + curMap = self.simobj._port_map.get(self.name) + if curMap and not self.isVec: + print "warning: overwriting port", self.simobj, self.name + curMap = other + self.simobj._port_map[self.name] = curMap + self.peer = other + + def clone(self, memo): + newRef = copy.copy(self) + assert(isSimObject(newRef.simobj)) + newRef.simobj = newRef.simobj(_memo=memo) + # Tricky: if I'm the *second* PortRef in the pair to be + # cloned, then my peer is still in the middle of its clone + # method, and thus hasn't returned to its owner's + # SimObject.__init__ to get installed in _port_map. As a + # result I have no way of finding the *new* peer object. So I + # mark myself as "waiting" for my peer, and I let the *first* + # PortRef clone call set up both peer pointers after I return. + newPeer = newRef.simobj._port_map.get(self.name) + if newPeer: + if self.isVec: + assert(self.index != -1) + newPeer = newPeer[self.index] + # other guy is all set up except for his peer pointer + assert(newPeer.peer == -1) # peer must be waiting for handshake + newPeer.peer = newRef + newRef.peer = newPeer + else: + # other guy is in clone; just wait for him to do the work + newRef.peer = -1 # mark as waiting for handshake + return newRef + + # Call C++ to create corresponding port connection between C++ objects + def ccConnect(self): + if self.ccConnected: # already done this + return + peer = self.peer + cc_main.connectPorts(self.simobj.getCCObject(), self.name, self.index, + peer.simobj.getCCObject(), peer.name, peer.index) + self.ccConnected = True + peer.ccConnected = True + +# Port description object. Like a ParamDesc object, this represents a +# logical port in the SimObject class, not a particular port on a +# SimObject instance. The latter are represented by PortRef objects. +class Port(object): + def __init__(self, desc): + self.desc = desc + self.isVec = False + + # Generate a PortRef for this port on the given SimObject with the + # given name + def makeRef(self, simobj, name): + return PortRef(simobj, name, self.isVec) + + # Connect an instance of this port (on the given SimObject with + # the given name) with the port described by the supplied PortRef + def connect(self, simobj, name, ref): + if not isinstance(ref, PortRef): + raise TypeError, \ + "assigning non-port reference port '%s'" % name + myRef = self.makeRef(simobj, name) + myRef.setPeer(ref) + ref.setPeer(myRef) + +# VectorPort description object. Like Port, but represents a vector +# of connections (e.g., as on a Bus). +class VectorPort(Port): + def __init__(self, desc): + Port.__init__(self, desc) + self.isVec = True + ##################################################################### # __all__ defines the list of symbols that get exported when @@ -1436,5 +1521,6 @@ __all__ = ['SimObject', 'ParamContext', 'Param', 'VectorParam', 'NetworkBandwidth', 'MemoryBandwidth', 'Range', 'AddrRange', 'MaxAddr', 'MaxTick', 'AllMemory', 'Null', 'NULL', - 'NextEthernetAddr'] + 'NextEthernetAddr', + 'Port', 'VectorPort'] diff --git a/src/python/m5/main.py b/src/python/m5/main.py new file mode 100644 index 000000000..afe73d94c --- /dev/null +++ b/src/python/m5/main.py @@ -0,0 +1,321 @@ +# Copyright (c) 2005 The Regents of The University of Michigan +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Authors: Nathan Binkert + +import code, optparse, os, socket, sys +from datetime import datetime +from attrdict import attrdict + +try: + import info +except ImportError: + info = None + +__all__ = [ 'options', 'arguments', 'main' ] + +usage="%prog [m5 options] script.py [script options]" +version="%prog 2.0" +brief_copyright=''' +Copyright (c) 2001-2006 +The Regents of The University of Michigan +All Rights Reserved +''' + +# there's only one option parsing done, so make it global and add some +# helper functions to make it work well. +parser = optparse.OptionParser(usage=usage, version=version, + description=brief_copyright, + formatter=optparse.TitledHelpFormatter()) +parser.disable_interspersed_args() + +# current option group +group = None + +def set_group(*args, **kwargs): + '''set the current option group''' + global group + if not args and not kwargs: + group = None + else: + group = parser.add_option_group(*args, **kwargs) + +class splitter(object): + def __init__(self, split): + self.split = split + def __call__(self, option, opt_str, value, parser): + getattr(parser.values, option.dest).extend(value.split(self.split)) + +def add_option(*args, **kwargs): + '''add an option to the current option group, or global none set''' + + # if action=split, but allows the option arguments + # themselves to be lists separated by the split variable''' + + if kwargs.get('action', None) == 'append' and 'split' in kwargs: + split = kwargs.pop('split') + kwargs['default'] = [] + kwargs['type'] = 'string' + kwargs['action'] = 'callback' + kwargs['callback'] = splitter(split) + + if group: + return group.add_option(*args, **kwargs) + + return parser.add_option(*args, **kwargs) + +def bool_option(name, default, help): + '''add a boolean option called --name and --no-name. + Display help depending on which is the default''' + + tname = '--%s' % name + fname = '--no-%s' % name + dest = name.replace('-', '_') + if default: + thelp = optparse.SUPPRESS_HELP + fhelp = help + else: + thelp = help + fhelp = optparse.SUPPRESS_HELP + + add_option(tname, action="store_true", default=default, help=thelp) + add_option(fname, action="store_false", dest=dest, help=fhelp) + +# Help options +add_option('-A', "--authors", action="store_true", default=False, + help="Show author information") +add_option('-C', "--copyright", action="store_true", default=False, + help="Show full copyright information") +add_option('-R', "--readme", action="store_true", default=False, + help="Show the readme") +add_option('-N', "--release-notes", action="store_true", default=False, + help="Show the release notes") + +# Options for configuring the base simulator +add_option('-d', "--outdir", metavar="DIR", default=".", + help="Set the output directory to DIR [Default: %default]") +add_option('-i', "--interactive", action="store_true", default=False, + help="Invoke the interactive interpreter after running the script") +add_option("--pdb", action="store_true", default=False, + help="Invoke the python debugger before running the script") +add_option('-p', "--path", metavar="PATH[:PATH]", action='append', split=':', + help="Prepend PATH to the system path when invoking the script") +add_option('-q', "--quiet", action="count", default=0, + help="Reduce verbosity") +add_option('-v', "--verbose", action="count", default=0, + help="Increase verbosity") + +# Statistics options +set_group("Statistics Options") +add_option("--stats-file", metavar="FILE", default="m5stats.txt", + help="Sets the output file for statistics [Default: %default]") + +# Debugging options +set_group("Debugging Options") +add_option("--debug-break", metavar="TIME[,TIME]", action='append', split=',', + help="Cycle to create a breakpoint") + +# Tracing options +set_group("Trace Options") +add_option("--trace-flags", metavar="FLAG[,FLAG]", action='append', split=',', + help="Sets the flags for tracing") +add_option("--trace-start", metavar="TIME", default='0s', + help="Start tracing at TIME (must have units)") +add_option("--trace-file", metavar="FILE", default="cout", + help="Sets the output file for tracing [Default: %default]") +add_option("--trace-circlebuf", metavar="SIZE", type="int", default=0, + help="If SIZE is non-zero, turn on the circular buffer with SIZE lines") +add_option("--no-trace-circlebuf", action="store_const", const=0, + dest='trace_circlebuf', help=optparse.SUPPRESS_HELP) +bool_option("trace-dumponexit", default=False, + help="Dump trace buffer on exit") +add_option("--trace-ignore", metavar="EXPR", action='append', split=':', + help="Ignore EXPR sim objects") + +# Execution Trace options +set_group("Execution Trace Options") +bool_option("speculative", default=True, + help="Don't capture speculative instructions") +bool_option("print-cycle", default=True, + help="Don't print cycle numbers in trace output") +bool_option("print-symbol", default=True, + help="Disable PC symbols in trace output") +bool_option("print-opclass", default=True, + help="Don't print op class type in trace output") +bool_option("print-thread", default=True, + help="Don't print thread number in trace output") +bool_option("print-effaddr", default=True, + help="Don't print effective address in trace output") +bool_option("print-data", default=True, + help="Don't print result data in trace output") +bool_option("print-iregs", default=False, + help="Print fetch sequence numbers in trace output") +bool_option("print-fetch-seq", default=False, + help="Print fetch sequence numbers in trace output") +bool_option("print-cpseq", default=False, + help="Print correct path sequence numbers in trace output") + +options = attrdict() +arguments = [] + +def usage(exitcode=None): + parser.print_help() + if exitcode is not None: + sys.exit(exitcode) + +def parse_args(): + _opts,args = parser.parse_args() + opts = attrdict(_opts.__dict__) + + # setting verbose and quiet at the same time doesn't make sense + if opts.verbose > 0 and opts.quiet > 0: + usage(2) + + # store the verbosity in a single variable. 0 is default, + # negative numbers represent quiet and positive values indicate verbose + opts.verbose -= opts.quiet + + del opts.quiet + + options.update(opts) + arguments.extend(args) + return opts,args + +def main(): + import cc_main + + parse_args() + + done = False + if options.copyright: + done = True + print info.LICENSE + print + + if options.authors: + done = True + print 'Author information:' + print + print info.AUTHORS + print + + if options.readme: + done = True + print 'Readme:' + print + print info.README + print + + if options.release_notes: + done = True + print 'Release Notes:' + print + print info.RELEASE_NOTES + print + + if done: + sys.exit(0) + + if options.verbose >= 0: + print "M5 Simulator System" + print brief_copyright + print + print "M5 compiled %s" % cc_main.cvar.compileDate; + print "M5 started %s" % datetime.now().ctime() + print "M5 executing on %s" % socket.gethostname() + + # check to make sure we can find the listed script + if not arguments or not os.path.isfile(arguments[0]): + usage(2) + + # tell C++ about output directory + cc_main.setOutputDir(options.outdir) + + # update the system path with elements from the -p option + sys.path[0:0] = options.path + + import objects + + # set stats options + objects.Statistics.text_file = options.stats_file + + # set debugging options + objects.Debug.break_cycles = options.debug_break + + # set tracing options + objects.Trace.flags = options.trace_flags + objects.Trace.start = options.trace_start + objects.Trace.file = options.trace_file + objects.Trace.bufsize = options.trace_circlebuf + objects.Trace.dump_on_exit = options.trace_dumponexit + objects.Trace.ignore = options.trace_ignore + + # set execution trace options + objects.ExecutionTrace.speculative = options.speculative + objects.ExecutionTrace.print_cycle = options.print_cycle + objects.ExecutionTrace.pc_symbol = options.print_symbol + objects.ExecutionTrace.print_opclass = options.print_opclass + objects.ExecutionTrace.print_thread = options.print_thread + objects.ExecutionTrace.print_effaddr = options.print_effaddr + objects.ExecutionTrace.print_data = options.print_data + objects.ExecutionTrace.print_iregs = options.print_iregs + objects.ExecutionTrace.print_fetchseq = options.print_fetch_seq + objects.ExecutionTrace.print_cpseq = options.print_cpseq + + sys.argv = arguments + sys.path = [ os.path.dirname(sys.argv[0]) ] + sys.path + + scope = { '__file__' : sys.argv[0] } + + # we want readline if we're doing anything interactive + if options.interactive or options.pdb: + exec("import readline", scope) + + # if pdb was requested, execfile the thing under pdb, otherwise, + # just do the execfile normally + if options.pdb: + from pdb import Pdb + debugger = Pdb() + debugger.run('execfile("%s")' % sys.argv[0], scope) + else: + execfile(sys.argv[0], scope) + + # once the script is done + if options.interactive: + interact = code.InteractiveConsole(scope) + interact.interact("M5 Interactive Console") + +if __name__ == '__main__': + from pprint import pprint + + parse_args() + + print 'opts:' + pprint(options, indent=4) + print + + print 'args:' + pprint(arguments, indent=4) diff --git a/src/python/m5/objects/BaseCPU.py b/src/python/m5/objects/BaseCPU.py index 2e78578df..5bf98be9c 100644 --- a/src/python/m5/objects/BaseCPU.py +++ b/src/python/m5/objects/BaseCPU.py @@ -6,10 +6,10 @@ class BaseCPU(SimObject): abstract = True mem = Param.MemObject("memory") + system = Param.System(Parent.any, "system object") if build_env['FULL_SYSTEM']: dtb = Param.AlphaDTB("Data TLB") itb = Param.AlphaITB("Instruction TLB") - system = Param.System(Parent.any, "system object") cpu_id = Param.Int(-1, "CPU identifier") else: workload = VectorParam.Process("processes to run") diff --git a/src/python/m5/objects/BaseCache.py b/src/python/m5/objects/BaseCache.py index 33f44759b..497b2b038 100644 --- a/src/python/m5/objects/BaseCache.py +++ b/src/python/m5/objects/BaseCache.py @@ -1,29 +1,26 @@ from m5.config import * -from BaseMem import BaseMem +from MemObject import MemObject class Prefetch(Enum): vals = ['none', 'tagged', 'stride', 'ghb'] -class BaseCache(BaseMem): +class BaseCache(MemObject): type = 'BaseCache' adaptive_compression = Param.Bool(False, "Use an adaptive compression scheme") assoc = Param.Int("associativity") block_size = Param.Int("block size in bytes") + latency = Param.Int("Latency") compressed_bus = Param.Bool(False, "This cache connects to a compressed memory") compression_latency = Param.Latency('0ns', "Latency in cycles of compression algorithm") do_copy = Param.Bool(False, "perform fast copies in the cache") hash_delay = Param.Int(1, "time in cycles of hash access") - in_bus = Param.Bus(NULL, "incoming bus object") lifo = Param.Bool(False, "whether this NIC partition should use LIFO repl. policy") max_miss_count = Param.Counter(0, "number of misses to handle before calling exit") - mem_trace = Param.MemTraceWriter(NULL, - "memory trace writer to record accesses") mshrs = Param.Int("number of MSHRs (max outstanding requests)") - out_bus = Param.Bus("outgoing bus object") prioritizeRequests = Param.Bool(False, "always service demand misses first") protocol = Param.CoherenceProtocol(NULL, "coherence protocol to use") @@ -63,3 +60,6 @@ class BaseCache(BaseMem): "Use the CPU ID to seperate calculations of prefetches") prefetch_data_accesses_only = Param.Bool(False, "Only prefetch on data not on instruction accesses") + hit_latency = Param.Int(1,"Hit Latency of the cache") + cpu_side = Port("Port on side closer to CPU") + mem_side = Port("Port on side closer to MEM") diff --git a/src/python/m5/objects/Bridge.py b/src/python/m5/objects/Bridge.py index 880535755..c9e673afb 100644 --- a/src/python/m5/objects/Bridge.py +++ b/src/python/m5/objects/Bridge.py @@ -3,6 +3,8 @@ from MemObject import MemObject class Bridge(MemObject): type = 'Bridge' + side_a = Port('Side A port') + side_b = Port('Side B port') queue_size_a = Param.Int(16, "The number of requests to buffer") queue_size_b = Param.Int(16, "The number of requests to buffer") delay = Param.Latency('0ns', "The latency of this bridge") diff --git a/src/python/m5/objects/Bus.py b/src/python/m5/objects/Bus.py index c37dab438..e0278e6c3 100644 --- a/src/python/m5/objects/Bus.py +++ b/src/python/m5/objects/Bus.py @@ -3,4 +3,6 @@ from MemObject import MemObject class Bus(MemObject): type = 'Bus' + port = VectorPort("vector port for connecting devices") + default = Port("Default port for requests that aren't handeled by a device.") bus_id = Param.Int(0, "blah") diff --git a/src/python/m5/objects/Device.py b/src/python/m5/objects/Device.py index 7798f5f04..222f750da 100644 --- a/src/python/m5/objects/Device.py +++ b/src/python/m5/objects/Device.py @@ -4,6 +4,7 @@ from MemObject import MemObject class PioDevice(MemObject): type = 'PioDevice' abstract = True + pio = Port("Programmed I/O port") platform = Param.Platform(Parent.any, "Platform this device is part of") system = Param.System(Parent.any, "System this device is part of") @@ -16,3 +17,4 @@ class BasicPioDevice(PioDevice): class DmaDevice(PioDevice): type = 'DmaDevice' abstract = True + dma = Port("DMA port") diff --git a/src/python/m5/objects/FuncUnit.py b/src/python/m5/objects/FuncUnit.py new file mode 100644 index 000000000..f61590ae9 --- /dev/null +++ b/src/python/m5/objects/FuncUnit.py @@ -0,0 +1,17 @@ +from m5.config import * + +class OpType(Enum): + vals = ['(null)', 'IntAlu', 'IntMult', 'IntDiv', 'FloatAdd', + 'FloatCmp', 'FloatCvt', 'FloatMult', 'FloatDiv', 'FloatSqrt', + 'MemRead', 'MemWrite', 'IprAccess', 'InstPrefetch'] + +class OpDesc(SimObject): + type = 'OpDesc' + issueLat = Param.Int(1, "cycles until another can be issued") + opClass = Param.OpType("type of operation") + opLat = Param.Int(1, "cycles until result is available") + +class FUDesc(SimObject): + type = 'FUDesc' + count = Param.Int("number of these FU's available") + opList = VectorParam.OpDesc("operation classes for this FU type") diff --git a/src/python/m5/objects/AlphaFullCPU.py b/src/python/m5/objects/O3CPU.py index 2988305d3..d6bc454ad 100644 --- a/src/python/m5/objects/AlphaFullCPU.py +++ b/src/python/m5/objects/O3CPU.py @@ -2,14 +2,16 @@ from m5 import build_env from m5.config import * from BaseCPU import BaseCPU -class DerivAlphaFullCPU(BaseCPU): - type = 'DerivAlphaFullCPU' +class DerivO3CPU(BaseCPU): + type = 'DerivO3CPU' activity = Param.Unsigned("Initial count") numThreads = Param.Unsigned("number of HW thread contexts") checker = Param.BaseCPU(NULL, "checker") cachePorts = Param.Unsigned("Cache Ports") + icache_port = Port("Instruction Port") + dcache_port = Port("Data Port") decodeToFetchDelay = Param.Unsigned("Decode to fetch delay") renameToFetchDelay = Param.Unsigned("Rename to fetch delay") @@ -37,12 +39,10 @@ class DerivAlphaFullCPU(BaseCPU): "Issue/Execute/Writeback delay") issueToExecuteDelay = Param.Unsigned("Issue to execute delay (internal " "to the IEW stage)") + dispatchWidth = Param.Unsigned("Dispatch width") issueWidth = Param.Unsigned("Issue width") - executeWidth = Param.Unsigned("Execute width") - executeIntWidth = Param.Unsigned("Integer execute width") - executeFloatWidth = Param.Unsigned("Floating point execute width") - executeBranchWidth = Param.Unsigned("Branch execute width") - executeMemoryWidth = Param.Unsigned("Memory execute width") + wbWidth = Param.Unsigned("Writeback width") + wbDepth = Param.Unsigned("Writeback depth") fuPool = Param.FUPool(NULL, "Functional Unit pool") iewToCommitDelay = Param.Unsigned("Issue/Execute/Writeback to commit " @@ -53,6 +53,9 @@ class DerivAlphaFullCPU(BaseCPU): trapLatency = Param.Tick("Trap latency") fetchTrapLatency = Param.Tick("Fetch trap latency") + backComSize = Param.Unsigned("Time buffer size for backwards communication") + forwardComSize = Param.Unsigned("Time buffer size for forward communication") + predType = Param.String("Branch predictor type ('local', 'tournament')") localPredictorSize = Param.Unsigned("Size of local predictor") localCtrBits = Param.Unsigned("Bits per counter") diff --git a/src/python/m5/objects/OzoneCPU.py b/src/python/m5/objects/OzoneCPU.py index f2d9aea84..88fb63c74 100644 --- a/src/python/m5/objects/OzoneCPU.py +++ b/src/python/m5/objects/OzoneCPU.py @@ -7,11 +7,11 @@ class DerivOzoneCPU(BaseCPU): numThreads = Param.Unsigned("number of HW thread contexts") - if not build_env['FULL_SYSTEM']: - mem = Param.FunctionalMemory(NULL, "memory") - checker = Param.BaseCPU("Checker CPU") + icache_port = Port("Instruction Port") + dcache_port = Port("Data Port") + width = Param.Unsigned("Width") frontEndWidth = Param.Unsigned("Front end width") backEndWidth = Param.Unsigned("Back end width") diff --git a/src/python/m5/objects/Pci.py b/src/python/m5/objects/Pci.py index 9e1e91b13..29014bb37 100644 --- a/src/python/m5/objects/Pci.py +++ b/src/python/m5/objects/Pci.py @@ -1,5 +1,5 @@ from m5.config import * -from Device import BasicPioDevice, DmaDevice +from Device import BasicPioDevice, DmaDevice, PioDevice class PciConfigData(SimObject): type = 'PciConfigData' @@ -38,18 +38,22 @@ class PciConfigData(SimObject): MaximumLatency = Param.UInt8(0x00, "Maximum Latency") MinimumGrant = Param.UInt8(0x00, "Minimum Grant") -class PciConfigAll(BasicPioDevice): +class PciConfigAll(PioDevice): type = 'PciConfigAll' + pio_latency = Param.Tick(1, "Programmed IO latency in simticks") + bus = Param.UInt8(0x00, "PCI bus to act as config space for") + size = Param.MemorySize32('16MB', "Size of config space") + class PciDevice(DmaDevice): type = 'PciDevice' abstract = True + config = Port("PCI configuration space port") pci_bus = Param.Int("PCI bus") pci_dev = Param.Int("PCI device number") pci_func = Param.Int("PCI function code") pio_latency = Param.Tick(1, "Programmed IO latency in simticks") configdata = Param.PciConfigData(Parent.any, "PCI Config data") - configspace = Param.PciConfigAll(Parent.any, "PCI Configspace") class PciFake(PciDevice): type = 'PciFake' diff --git a/src/python/m5/objects/PhysicalMemory.py b/src/python/m5/objects/PhysicalMemory.py index bed90d555..9cc7510a2 100644 --- a/src/python/m5/objects/PhysicalMemory.py +++ b/src/python/m5/objects/PhysicalMemory.py @@ -3,6 +3,7 @@ from MemObject import * class PhysicalMemory(MemObject): type = 'PhysicalMemory' + port = Port("the access port") range = Param.AddrRange("Device Address") file = Param.String('', "memory mapped file") latency = Param.Latency(Parent.clock, "latency of an access") diff --git a/src/python/m5/objects/System.py b/src/python/m5/objects/System.py index 9a1e1d690..386f39277 100644 --- a/src/python/m5/objects/System.py +++ b/src/python/m5/objects/System.py @@ -1,9 +1,12 @@ from m5 import build_env from m5.config import * +class MemoryMode(Enum): vals = ['invalid', 'atomic', 'timing'] + class System(SimObject): type = 'System' physmem = Param.PhysicalMemory(Parent.any, "phsyical memory") + mem_mode = Param.MemoryMode('atomic', "The mode the memory system is in") if build_env['FULL_SYSTEM']: boot_cpu_frequency = Param.Frequency(Self.cpu[0].clock.frequency, "boot processor frequency") |