summaryrefslogtreecommitdiff
path: root/src/python
diff options
context:
space:
mode:
Diffstat (limited to 'src/python')
-rw-r--r--src/python/SConscript117
-rw-r--r--src/python/m5/SimObject.py798
-rw-r--r--src/python/m5/__init__.py212
-rw-r--r--src/python/m5/attrdict.py61
-rw-r--r--src/python/m5/convert.py229
-rw-r--r--src/python/m5/main.py330
-rw-r--r--src/python/m5/multidict.py182
-rw-r--r--src/python/m5/objects/AlphaConsole.py10
-rw-r--r--src/python/m5/objects/AlphaTLB.py14
-rw-r--r--src/python/m5/objects/BadDevice.py6
-rw-r--r--src/python/m5/objects/BaseCPU.py59
-rw-r--r--src/python/m5/objects/BaseCache.py65
-rw-r--r--src/python/m5/objects/Bridge.py11
-rw-r--r--src/python/m5/objects/Bus.py8
-rw-r--r--src/python/m5/objects/CoherenceProtocol.py8
-rw-r--r--src/python/m5/objects/Device.py21
-rw-r--r--src/python/m5/objects/DiskImage.py16
-rw-r--r--src/python/m5/objects/Ethernet.py193
-rw-r--r--src/python/m5/objects/FUPool.py6
-rw-r--r--src/python/m5/objects/FuncUnit.py18
-rw-r--r--src/python/m5/objects/Ide.py40
-rw-r--r--src/python/m5/objects/IntrControl.py6
-rw-r--r--src/python/m5/objects/MemObject.py6
-rw-r--r--src/python/m5/objects/MemTest.py20
-rw-r--r--src/python/m5/objects/O3CPU.py115
-rw-r--r--src/python/m5/objects/OzoneCPU.py95
-rw-r--r--src/python/m5/objects/Pci.py62
-rw-r--r--src/python/m5/objects/PhysicalMemory.py28
-rw-r--r--src/python/m5/objects/Platform.py7
-rw-r--r--src/python/m5/objects/Process.py35
-rw-r--r--src/python/m5/objects/Repl.py11
-rw-r--r--src/python/m5/objects/Root.py25
-rw-r--r--src/python/m5/objects/SimConsole.py14
-rw-r--r--src/python/m5/objects/SimpleDisk.py7
-rw-r--r--src/python/m5/objects/SimpleOzoneCPU.py87
-rw-r--r--src/python/m5/objects/System.py26
-rw-r--r--src/python/m5/objects/Tsunami.py95
-rw-r--r--src/python/m5/objects/Uart.py17
-rw-r--r--src/python/m5/params.py968
-rw-r--r--src/python/m5/proxy.py206
-rw-r--r--src/python/m5/smartdict.py154
-rw-r--r--src/python/m5/util.py59
42 files changed, 4447 insertions, 0 deletions
diff --git a/src/python/SConscript b/src/python/SConscript
new file mode 100644
index 000000000..c9e713199
--- /dev/null
+++ b/src/python/SConscript
@@ -0,0 +1,117 @@
+# -*- mode:python -*-
+
+# Copyright (c) 2004-2005 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Steve Reinhardt
+# Nathan Binkert
+
+import os, os.path, re, sys
+from zipfile import PyZipFile
+
+# handy function for path joins
+def join(*args):
+ return os.path.normpath(os.path.join(*args))
+
+Import('env')
+
+# This SConscript is in charge of collecting .py files and generating
+# a zip archive that is appended to the m5 binary.
+
+# List of files & directories to include in the zip file. To include
+# a package, list only the root directory of the package, not any
+# internal .py files (else they will get the path stripped off when
+# they are imported into the zip file).
+pyzip_files = []
+
+# List of additional files on which the zip archive depends, but which
+# are not included in pyzip_files... i.e. individual .py files within
+# a package.
+pyzip_dep_files = []
+
+# Add the specified package to the zip archive. Adds the directory to
+# pyzip_files and all included .py files to pyzip_dep_files.
+def addPkg(pkgdir):
+ pyzip_files.append(pkgdir)
+ origdir = os.getcwd()
+ srcdir = join(Dir('.').srcnode().abspath, pkgdir)
+ os.chdir(srcdir)
+ for path, dirs, files in os.walk('.'):
+ for i,dir in enumerate(dirs):
+ if dir == 'SCCS':
+ del dirs[i]
+ break
+
+ for f in files:
+ if f.endswith('.py'):
+ pyzip_dep_files.append(join(pkgdir, path, f))
+
+ os.chdir(origdir)
+
+# Generate Python file that contains a dict specifying the current
+# build_env flags.
+def MakeDefinesPyFile(target, source, env):
+ f = file(str(target[0]), 'w')
+ print >>f, "m5_build_env = ", source[0]
+ f.close()
+
+optionDict = dict([(opt, env[opt]) for opt in env.ExportOptions])
+env.Command('m5/defines.py', Value(optionDict), MakeDefinesPyFile)
+
+def MakeInfoPyFile(target, source, env):
+ f = file(str(target[0]), 'w')
+ for src in source:
+ data = ''.join(file(src.srcnode().abspath, 'r').xreadlines())
+ print >>f, "%s = %s" % (src, repr(data))
+ f.close()
+
+env.Command('m5/info.py',
+ [ '#/AUTHORS', '#/LICENSE', '#/README', '#/RELEASE_NOTES' ],
+ MakeInfoPyFile)
+
+# Now specify the packages & files for the zip archive.
+addPkg('m5')
+pyzip_files.append('m5/defines.py')
+pyzip_files.append('m5/info.py')
+pyzip_files.append(join(env['ROOT'], 'util/pbs/jobfile.py'))
+
+env.Command(['swig/cc_main_wrap.cc', 'm5/cc_main.py'],
+ 'swig/cc_main.i',
+ '$SWIG $SWIGFLAGS -outdir ${TARGETS[1].dir} '
+ '-o ${TARGETS[0]} $SOURCES')
+
+pyzip_dep_files.append('m5/cc_main.py')
+
+# Action function to build the zip archive. Uses the PyZipFile module
+# included in the standard Python library.
+def buildPyZip(target, source, env):
+ pzf = PyZipFile(str(target[0]), 'w')
+ for s in source:
+ pzf.writepy(str(s))
+
+# Add the zip file target to the environment.
+env.Command('m5py.zip', pyzip_files, buildPyZip)
+env.Depends('m5py.zip', pyzip_dep_files)
diff --git a/src/python/m5/SimObject.py b/src/python/m5/SimObject.py
new file mode 100644
index 000000000..a0d66e643
--- /dev/null
+++ b/src/python/m5/SimObject.py
@@ -0,0 +1,798 @@
+# Copyright (c) 2004-2006 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Steve Reinhardt
+# Nathan Binkert
+
+import sys, types
+
+from util import *
+from multidict import multidict
+
+# These utility functions have to come first because they're
+# referenced in params.py... otherwise they won't be defined when we
+# import params below, and the recursive import of this file from
+# params.py will not find these names.
+def isSimObject(value):
+ return isinstance(value, SimObject)
+
+def isSimObjectClass(value):
+ return issubclass(value, SimObject)
+
+def isSimObjectSequence(value):
+ if not isinstance(value, (list, tuple)) or len(value) == 0:
+ return False
+
+ for val in value:
+ if not isNullPointer(val) and not isSimObject(val):
+ return False
+
+ return True
+
+def isSimObjectOrSequence(value):
+ return isSimObject(value) or isSimObjectSequence(value)
+
+# Have to import params up top since Param is referenced on initial
+# load (when SimObject class references Param to create a class
+# variable, the 'name' param)...
+from params import *
+# There are a few things we need that aren't in params.__all__ since
+# normal users don't need them
+from params import ParamDesc, isNullPointer, SimObjVector
+
+noDot = False
+try:
+ import pydot
+except:
+ noDot = True
+
+#####################################################################
+#
+# M5 Python Configuration Utility
+#
+# The basic idea is to write simple Python programs that build Python
+# objects corresponding to M5 SimObjects for the desired simulation
+# configuration. For now, the Python emits a .ini file that can be
+# parsed by M5. In the future, some tighter integration between M5
+# and the Python interpreter may allow bypassing the .ini file.
+#
+# Each SimObject class in M5 is represented by a Python class with the
+# same name. The Python inheritance tree mirrors the M5 C++ tree
+# (e.g., SimpleCPU derives from BaseCPU in both cases, and all
+# SimObjects inherit from a single SimObject base class). To specify
+# an instance of an M5 SimObject in a configuration, the user simply
+# instantiates the corresponding Python object. The parameters for
+# that SimObject are given by assigning to attributes of the Python
+# object, either using keyword assignment in the constructor or in
+# separate assignment statements. For example:
+#
+# cache = BaseCache(size='64KB')
+# cache.hit_latency = 3
+# cache.assoc = 8
+#
+# The magic lies in the mapping of the Python attributes for SimObject
+# classes to the actual SimObject parameter specifications. This
+# allows parameter validity checking in the Python code. Continuing
+# the example above, the statements "cache.blurfl=3" or
+# "cache.assoc='hello'" would both result in runtime errors in Python,
+# since the BaseCache object has no 'blurfl' parameter and the 'assoc'
+# parameter requires an integer, respectively. This magic is done
+# primarily by overriding the special __setattr__ method that controls
+# assignment to object attributes.
+#
+# Once a set of Python objects have been instantiated in a hierarchy,
+# calling 'instantiate(obj)' (where obj is the root of the hierarchy)
+# will generate a .ini file.
+#
+#####################################################################
+
+# dict to look up SimObjects based on path
+instanceDict = {}
+
+# The metaclass for SimObject. This class controls how new classes
+# that derive from SimObject are instantiated, and provides inherited
+# class behavior (just like a class controls how instances of that
+# class are instantiated, and provides inherited instance behavior).
+class MetaSimObject(type):
+ # Attributes that can be set only at initialization time
+ init_keywords = { 'abstract' : types.BooleanType,
+ 'type' : types.StringType }
+ # Attributes that can be set any time
+ keywords = { 'check' : types.FunctionType,
+ 'cxx_type' : types.StringType,
+ 'cxx_predecls' : types.ListType,
+ 'swig_predecls' : types.ListType }
+
+ # __new__ is called before __init__, and is where the statements
+ # in the body of the class definition get loaded into the class's
+ # __dict__. We intercept this to filter out parameter & port assignments
+ # and only allow "private" attributes to be passed to the base
+ # __new__ (starting with underscore).
+ def __new__(mcls, name, bases, dict):
+ # Copy "private" attributes, functions, and classes to the
+ # official dict. Everything else goes in _init_dict to be
+ # filtered in __init__.
+ cls_dict = {}
+ value_dict = {}
+ for key,val in dict.items():
+ if key.startswith('_') or isinstance(val, (types.FunctionType,
+ types.TypeType)):
+ cls_dict[key] = val
+ else:
+ # must be a param/port setting
+ value_dict[key] = val
+ cls_dict['_value_dict'] = value_dict
+ return super(MetaSimObject, mcls).__new__(mcls, name, bases, cls_dict)
+
+ # subclass initialization
+ def __init__(cls, name, bases, dict):
+ # calls type.__init__()... I think that's a no-op, but leave
+ # it here just in case it's not.
+ super(MetaSimObject, cls).__init__(name, bases, dict)
+
+ # initialize required attributes
+
+ # class-only attributes
+ cls._params = multidict() # param descriptions
+ cls._ports = multidict() # port descriptions
+
+ # class or instance attributes
+ cls._values = multidict() # param values
+ cls._port_refs = multidict() # port ref objects
+ cls._instantiated = False # really instantiated, cloned, or subclassed
+
+ # We don't support multiple inheritance. If you want to, you
+ # must fix multidict to deal with it properly.
+ if len(bases) > 1:
+ raise TypeError, "SimObjects do not support multiple inheritance"
+
+ base = bases[0]
+
+ # Set up general inheritance via multidicts. A subclass will
+ # inherit all its settings from the base class. The only time
+ # the following is not true is when we define the SimObject
+ # class itself (in which case the multidicts have no parent).
+ if isinstance(base, MetaSimObject):
+ cls._params.parent = base._params
+ cls._ports.parent = base._ports
+ cls._values.parent = base._values
+ cls._port_refs.parent = base._port_refs
+ # mark base as having been subclassed
+ base._instantiated = True
+
+ # Now process the _value_dict items. They could be defining
+ # new (or overriding existing) parameters or ports, setting
+ # class keywords (e.g., 'abstract'), or setting parameter
+ # values or port bindings. The first 3 can only be set when
+ # the class is defined, so we handle them here. The others
+ # can be set later too, so just emulate that by calling
+ # setattr().
+ for key,val in cls._value_dict.items():
+ # param descriptions
+ if isinstance(val, ParamDesc):
+ cls._new_param(key, val)
+
+ # port objects
+ elif isinstance(val, Port):
+ cls._new_port(key, val)
+
+ # init-time-only keywords
+ elif cls.init_keywords.has_key(key):
+ cls._set_keyword(key, val, cls.init_keywords[key])
+
+ # default: use normal path (ends up in __setattr__)
+ else:
+ setattr(cls, key, val)
+
+ cls.cxx_type = cls.type + '*'
+ # A forward class declaration is sufficient since we are just
+ # declaring a pointer.
+ cls.cxx_predecls = ['class %s;' % cls.type]
+ cls.swig_predecls = cls.cxx_predecls
+
+ def _set_keyword(cls, keyword, val, kwtype):
+ if not isinstance(val, kwtype):
+ raise TypeError, 'keyword %s has bad type %s (expecting %s)' % \
+ (keyword, type(val), kwtype)
+ if isinstance(val, types.FunctionType):
+ val = classmethod(val)
+ type.__setattr__(cls, keyword, val)
+
+ def _new_param(cls, name, pdesc):
+ # each param desc should be uniquely assigned to one variable
+ assert(not hasattr(pdesc, 'name'))
+ pdesc.name = name
+ cls._params[name] = pdesc
+ if hasattr(pdesc, 'default'):
+ cls._set_param(name, pdesc.default, pdesc)
+
+ def _set_param(cls, name, value, param):
+ assert(param.name == name)
+ try:
+ cls._values[name] = param.convert(value)
+ except Exception, e:
+ msg = "%s\nError setting param %s.%s to %s\n" % \
+ (e, cls.__name__, name, value)
+ e.args = (msg, )
+ raise
+
+ def _new_port(cls, name, port):
+ # each port should be uniquely assigned to one variable
+ assert(not hasattr(port, 'name'))
+ port.name = name
+ cls._ports[name] = port
+ if hasattr(port, 'default'):
+ cls._cls_get_port_ref(name).connect(port.default)
+
+ # same as _get_port_ref, effectively, but for classes
+ def _cls_get_port_ref(cls, attr):
+ # Return reference that can be assigned to another port
+ # via __setattr__. There is only ever one reference
+ # object per port, but we create them lazily here.
+ ref = cls._port_refs.get(attr)
+ if not ref:
+ ref = cls._ports[attr].makeRef(cls)
+ cls._port_refs[attr] = ref
+ return ref
+
+ # Set attribute (called on foo.attr = value when foo is an
+ # instance of class cls).
+ def __setattr__(cls, attr, value):
+ # normal processing for private attributes
+ if attr.startswith('_'):
+ type.__setattr__(cls, attr, value)
+ return
+
+ if cls.keywords.has_key(attr):
+ cls._set_keyword(attr, value, cls.keywords[attr])
+ return
+
+ if cls._ports.has_key(attr):
+ cls._cls_get_port_ref(attr).connect(value)
+ return
+
+ if isSimObjectOrSequence(value) and cls._instantiated:
+ raise RuntimeError, \
+ "cannot set SimObject parameter '%s' after\n" \
+ " class %s has been instantiated or subclassed" \
+ % (attr, cls.__name__)
+
+ # check for param
+ param = cls._params.get(attr)
+ if param:
+ cls._set_param(attr, value, param)
+ return
+
+ if isSimObjectOrSequence(value):
+ # If RHS is a SimObject, it's an implicit child assignment.
+ # Classes don't have children, so we just put this object
+ # in _values; later, each instance will do a 'setattr(self,
+ # attr, _values[attr])' in SimObject.__init__ which will
+ # add this object as a child.
+ cls._values[attr] = value
+ return
+
+ # no valid assignment... raise exception
+ raise AttributeError, \
+ "Class %s has no parameter \'%s\'" % (cls.__name__, attr)
+
+ def __getattr__(cls, attr):
+ if cls._values.has_key(attr):
+ return cls._values[attr]
+
+ raise AttributeError, \
+ "object '%s' has no attribute '%s'" % (cls.__name__, attr)
+
+ def __str__(cls):
+ return cls.__name__
+
+ def cxx_decl(cls):
+ code = "#ifndef __PARAMS__%s\n#define __PARAMS__%s\n\n" % (cls, cls)
+
+ if str(cls) != 'SimObject':
+ base = cls.__bases__[0].type
+ else:
+ base = None
+
+ # The 'dict' attribute restricts us to the params declared in
+ # the object itself, not including inherited params (which
+ # will also be inherited from the base class's param struct
+ # here).
+ params = cls._params.dict.values()
+ try:
+ ptypes = [p.ptype for p in params]
+ except:
+ print cls, p, p.ptype_str
+ print params
+ raise
+
+ # get a list of lists of predeclaration lines
+ predecls = [p.cxx_predecls() for p in params]
+ # flatten
+ predecls = reduce(lambda x,y:x+y, predecls, [])
+ # remove redundant lines
+ predecls2 = []
+ for pd in predecls:
+ if pd not in predecls2:
+ predecls2.append(pd)
+ predecls2.sort()
+ code += "\n".join(predecls2)
+ code += "\n\n";
+
+ if base:
+ code += '#include "params/%s.hh"\n\n' % base
+
+ # Generate declarations for locally defined enumerations.
+ enum_ptypes = [t for t in ptypes if issubclass(t, Enum)]
+ if enum_ptypes:
+ code += "\n".join([t.cxx_decl() for t in enum_ptypes])
+ code += "\n\n"
+
+ # now generate the actual param struct
+ code += "struct %sParams" % cls
+ if base:
+ code += " : public %sParams" % base
+ code += " {\n"
+ decls = [p.cxx_decl() for p in params]
+ decls.sort()
+ code += "".join([" %s\n" % d for d in decls])
+ code += "};\n"
+
+ # close #ifndef __PARAMS__* guard
+ code += "\n#endif\n"
+ return code
+
+ def swig_decl(cls):
+
+ code = '%%module %sParams\n' % cls
+
+ if str(cls) != 'SimObject':
+ base = cls.__bases__[0].type
+ else:
+ base = None
+
+ # The 'dict' attribute restricts us to the params declared in
+ # the object itself, not including inherited params (which
+ # will also be inherited from the base class's param struct
+ # here).
+ params = cls._params.dict.values()
+ ptypes = [p.ptype for p in params]
+
+ # get a list of lists of predeclaration lines
+ predecls = [p.swig_predecls() for p in params]
+ # flatten
+ predecls = reduce(lambda x,y:x+y, predecls, [])
+ # remove redundant lines
+ predecls2 = []
+ for pd in predecls:
+ if pd not in predecls2:
+ predecls2.append(pd)
+ predecls2.sort()
+ code += "\n".join(predecls2)
+ code += "\n\n";
+
+ if base:
+ code += '%%import "python/m5/swig/%sParams.i"\n\n' % base
+
+ code += '%{\n'
+ code += '#include "params/%s.hh"\n' % cls
+ code += '%}\n\n'
+ code += '%%include "params/%s.hh"\n\n' % cls
+
+ return code
+
+# The SimObject class is the root of the special hierarchy. Most of
+# the code in this class deals with the configuration hierarchy itself
+# (parent/child node relationships).
+class SimObject(object):
+ # Specify metaclass. Any class inheriting from SimObject will
+ # get this metaclass.
+ __metaclass__ = MetaSimObject
+ type = 'SimObject'
+
+ name = Param.String("Object name")
+
+ # Initialize new instance. For objects with SimObject-valued
+ # children, we need to recursively clone the classes represented
+ # by those param values as well in a consistent "deep copy"-style
+ # fashion. That is, we want to make sure that each instance is
+ # cloned only once, and that if there are multiple references to
+ # the same original object, we end up with the corresponding
+ # cloned references all pointing to the same cloned instance.
+ def __init__(self, **kwargs):
+ ancestor = kwargs.get('_ancestor')
+ memo_dict = kwargs.get('_memo')
+ if memo_dict is None:
+ # prepare to memoize any recursively instantiated objects
+ memo_dict = {}
+ elif ancestor:
+ # memoize me now to avoid problems with recursive calls
+ memo_dict[ancestor] = self
+
+ if not ancestor:
+ ancestor = self.__class__
+ ancestor._instantiated = True
+
+ # initialize required attributes
+ self._parent = None
+ self._children = {}
+ self._ccObject = None # pointer to C++ object
+ self._instantiated = False # really "cloned"
+
+ # Inherit parameter values from class using multidict so
+ # individual value settings can be overridden.
+ self._values = multidict(ancestor._values)
+ # clone SimObject-valued parameters
+ for key,val in ancestor._values.iteritems():
+ if isSimObject(val):
+ setattr(self, key, val(_memo=memo_dict))
+ elif isSimObjectSequence(val) and len(val):
+ setattr(self, key, [ v(_memo=memo_dict) for v in val ])
+ # clone port references. no need to use a multidict here
+ # since we will be creating new references for all ports.
+ self._port_refs = {}
+ for key,val in ancestor._port_refs.iteritems():
+ self._port_refs[key] = val.clone(self, memo_dict)
+ # apply attribute assignments from keyword args, if any
+ for key,val in kwargs.iteritems():
+ setattr(self, key, val)
+
+ # "Clone" the current instance by creating another instance of
+ # this instance's class, but that inherits its parameter values
+ # and port mappings from the current instance. If we're in a
+ # "deep copy" recursive clone, check the _memo dict to see if
+ # we've already cloned this instance.
+ def __call__(self, **kwargs):
+ memo_dict = kwargs.get('_memo')
+ if memo_dict is None:
+ # no memo_dict: must be top-level clone operation.
+ # this is only allowed at the root of a hierarchy
+ if self._parent:
+ raise RuntimeError, "attempt to clone object %s " \
+ "not at the root of a tree (parent = %s)" \
+ % (self, self._parent)
+ # create a new dict and use that.
+ memo_dict = {}
+ kwargs['_memo'] = memo_dict
+ elif memo_dict.has_key(self):
+ # clone already done & memoized
+ return memo_dict[self]
+ return self.__class__(_ancestor = self, **kwargs)
+
+ def _get_port_ref(self, attr):
+ # Return reference that can be assigned to another port
+ # via __setattr__. There is only ever one reference
+ # object per port, but we create them lazily here.
+ ref = self._port_refs.get(attr)
+ if not ref:
+ ref = self._ports[attr].makeRef(self)
+ self._port_refs[attr] = ref
+ return ref
+
+ def __getattr__(self, attr):
+ if self._ports.has_key(attr):
+ return self._get_port_ref(attr)
+
+ if self._values.has_key(attr):
+ return self._values[attr]
+
+ raise AttributeError, "object '%s' has no attribute '%s'" \
+ % (self.__class__.__name__, attr)
+
+ # Set attribute (called on foo.attr = value when foo is an
+ # instance of class cls).
+ def __setattr__(self, attr, value):
+ # normal processing for private attributes
+ if attr.startswith('_'):
+ object.__setattr__(self, attr, value)
+ return
+
+ if self._ports.has_key(attr):
+ # set up port connection
+ self._get_port_ref(attr).connect(value)
+ return
+
+ if isSimObjectOrSequence(value) and self._instantiated:
+ raise RuntimeError, \
+ "cannot set SimObject parameter '%s' after\n" \
+ " instance been cloned %s" % (attr, `self`)
+
+ # must be SimObject param
+ param = self._params.get(attr)
+ if param:
+ try:
+ value = param.convert(value)
+ except Exception, e:
+ msg = "%s\nError setting param %s.%s to %s\n" % \
+ (e, self.__class__.__name__, attr, value)
+ e.args = (msg, )
+ raise
+ self._set_child(attr, value)
+ return
+
+ if isSimObjectOrSequence(value):
+ self._set_child(attr, value)
+ return
+
+ # no valid assignment... raise exception
+ raise AttributeError, "Class %s has no parameter %s" \
+ % (self.__class__.__name__, attr)
+
+
+ # this hack allows tacking a '[0]' onto parameters that may or may
+ # not be vectors, and always getting the first element (e.g. cpus)
+ def __getitem__(self, key):
+ if key == 0:
+ return self
+ raise TypeError, "Non-zero index '%s' to SimObject" % key
+
+ # clear out children with given name, even if it's a vector
+ def clear_child(self, name):
+ if not self._children.has_key(name):
+ return
+ child = self._children[name]
+ if isinstance(child, SimObjVector):
+ for i in xrange(len(child)):
+ del self._children["s%d" % (name, i)]
+ del self._children[name]
+
+ def add_child(self, name, value):
+ self._children[name] = value
+
+ def _maybe_set_parent(self, parent, name):
+ if not self._parent:
+ self._parent = parent
+ self._name = name
+ parent.add_child(name, self)
+
+ def _set_child(self, attr, value):
+ # if RHS is a SimObject, it's an implicit child assignment
+ # clear out old child with this name, if any
+ self.clear_child(attr)
+
+ if isSimObject(value):
+ value._maybe_set_parent(self, attr)
+ elif isSimObjectSequence(value):
+ value = SimObjVector(value)
+ [v._maybe_set_parent(self, "%s%d" % (attr, i))
+ for i,v in enumerate(value)]
+
+ self._values[attr] = value
+
+ def path(self):
+ if not self._parent:
+ return 'root'
+ ppath = self._parent.path()
+ if ppath == 'root':
+ return self._name
+ return ppath + "." + self._name
+
+ def __str__(self):
+ return self.path()
+
+ def ini_str(self):
+ return self.path()
+
+ def find_any(self, ptype):
+ if isinstance(self, ptype):
+ return self, True
+
+ found_obj = None
+ for child in self._children.itervalues():
+ if isinstance(child, ptype):
+ if found_obj != None and child != found_obj:
+ raise AttributeError, \
+ 'parent.any matched more than one: %s %s' % \
+ (found_obj.path, child.path)
+ found_obj = child
+ # search param space
+ for pname,pdesc in self._params.iteritems():
+ if issubclass(pdesc.ptype, ptype):
+ match_obj = self._values[pname]
+ if found_obj != None and found_obj != match_obj:
+ raise AttributeError, \
+ 'parent.any matched more than one: %s' % obj.path
+ found_obj = match_obj
+ return found_obj, found_obj != None
+
+ def unproxy(self, base):
+ return self
+
+ def unproxy_all(self):
+ for param in self._params.iterkeys():
+ value = self._values.get(param)
+ if value != None and proxy.isproxy(value):
+ try:
+ value = value.unproxy(self)
+ except:
+ print "Error in unproxying param '%s' of %s" % \
+ (param, self.path())
+ raise
+ setattr(self, param, value)
+
+ # Unproxy ports in sorted order so that 'append' operations on
+ # vector ports are done in a deterministic fashion.
+ port_names = self._ports.keys()
+ port_names.sort()
+ for port_name in port_names:
+ port = self._port_refs.get(port_name)
+ if port != None:
+ port.unproxy(self)
+
+ # Unproxy children in sorted order for determinism also.
+ child_names = self._children.keys()
+ child_names.sort()
+ for child in child_names:
+ self._children[child].unproxy_all()
+
+ def print_ini(self):
+ print '[' + self.path() + ']' # .ini section header
+
+ instanceDict[self.path()] = self
+
+ if hasattr(self, 'type') and not isinstance(self, ParamContext):
+ print 'type=%s' % self.type
+
+ child_names = self._children.keys()
+ child_names.sort()
+ np_child_names = [c for c in child_names \
+ if not isinstance(self._children[c], ParamContext)]
+ if len(np_child_names):
+ print 'children=%s' % ' '.join(np_child_names)
+
+ param_names = self._params.keys()
+ param_names.sort()
+ for param in param_names:
+ value = self._values.get(param)
+ if value != None:
+ print '%s=%s' % (param, self._values[param].ini_str())
+
+ port_names = self._ports.keys()
+ port_names.sort()
+ for port_name in port_names:
+ port = self._port_refs.get(port_name, None)
+ if port != None:
+ print '%s=%s' % (port_name, port.ini_str())
+
+ print # blank line between objects
+
+ for child in child_names:
+ self._children[child].print_ini()
+
+ # Call C++ to create C++ object corresponding to this object and
+ # (recursively) all its children
+ def createCCObject(self):
+ self.getCCObject() # force creation
+ for child in self._children.itervalues():
+ child.createCCObject()
+
+ # Get C++ object corresponding to this object, calling C++ if
+ # necessary to construct it. Does *not* recursively create
+ # children.
+ def getCCObject(self):
+ if not self._ccObject:
+ self._ccObject = -1 # flag to catch cycles in recursion
+ self._ccObject = cc_main.createSimObject(self.path())
+ elif self._ccObject == -1:
+ raise RuntimeError, "%s: recursive call to getCCObject()" \
+ % self.path()
+ return self._ccObject
+
+ # Create C++ port connections corresponding to the connections in
+ # _port_refs (& recursively for all children)
+ def connectPorts(self):
+ for portRef in self._port_refs.itervalues():
+ portRef.ccConnect()
+ for child in self._children.itervalues():
+ child.connectPorts()
+
+ def startDrain(self, drain_event, recursive):
+ count = 0
+ # ParamContexts don't serialize
+ if isinstance(self, SimObject) and not isinstance(self, ParamContext):
+ count += self._ccObject.drain(drain_event)
+ if recursive:
+ for child in self._children.itervalues():
+ count += child.startDrain(drain_event, True)
+ return count
+
+ def resume(self):
+ if isinstance(self, SimObject) and not isinstance(self, ParamContext):
+ self._ccObject.resume()
+ for child in self._children.itervalues():
+ child.resume()
+
+ def changeTiming(self, mode):
+ if isinstance(self, System):
+ self._ccObject.setMemoryMode(mode)
+ for child in self._children.itervalues():
+ child.changeTiming(mode)
+
+ def takeOverFrom(self, old_cpu):
+ cpu_ptr = cc_main.convertToBaseCPUPtr(old_cpu._ccObject)
+ self._ccObject.takeOverFrom(cpu_ptr)
+
+ # generate output file for 'dot' to display as a pretty graph.
+ # this code is currently broken.
+ def outputDot(self, dot):
+ label = "{%s|" % self.path
+ if isSimObject(self.realtype):
+ label += '%s|' % self.type
+
+ if self.children:
+ # instantiate children in same order they were added for
+ # backward compatibility (else we can end up with cpu1
+ # before cpu0).
+ for c in self.children:
+ dot.add_edge(pydot.Edge(self.path,c.path, style="bold"))
+
+ simobjs = []
+ for param in self.params:
+ try:
+ if param.value is None:
+ raise AttributeError, 'Parameter with no value'
+
+ value = param.value
+ string = param.string(value)
+ except Exception, e:
+ msg = 'exception in %s:%s\n%s' % (self.name, param.name, e)
+ e.args = (msg, )
+ raise
+
+ if isSimObject(param.ptype) and string != "Null":
+ simobjs.append(string)
+ else:
+ label += '%s = %s\\n' % (param.name, string)
+
+ for so in simobjs:
+ label += "|<%s> %s" % (so, so)
+ dot.add_edge(pydot.Edge("%s:%s" % (self.path, so), so,
+ tailport="w"))
+ label += '}'
+ dot.add_node(pydot.Node(self.path,shape="Mrecord",label=label))
+
+ # recursively dump out children
+ for c in self.children:
+ c.outputDot(dot)
+
+class ParamContext(SimObject):
+ pass
+
+# Function to provide to C++ so it can look up instances based on paths
+def resolveSimObject(name):
+ obj = instanceDict[name]
+ return obj.getCCObject()
+
+# __all__ defines the list of symbols that get exported when
+# 'from config import *' is invoked. Try to keep this reasonably
+# short to avoid polluting other namespaces.
+__all__ = ['SimObject', 'ParamContext']
+
+
+# see comment on imports at end of __init__.py.
+import proxy
+import cc_main
+import m5
diff --git a/src/python/m5/__init__.py b/src/python/m5/__init__.py
new file mode 100644
index 000000000..5717b49b6
--- /dev/null
+++ b/src/python/m5/__init__.py
@@ -0,0 +1,212 @@
+# Copyright (c) 2005 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Nathan Binkert
+# Steve Reinhardt
+
+import atexit, os, sys
+
+# import the SWIG-wrapped main C++ functions
+import cc_main
+# import a few SWIG-wrapped items (those that are likely to be used
+# directly by user scripts) completely into this module for
+# convenience
+from cc_main import simulate, SimLoopExitEvent
+
+# import the m5 compile options
+import defines
+
+# define this here so we can use it right away if necessary
+def panic(string):
+ print >>sys.stderr, 'panic:', string
+ sys.exit(1)
+
+# force scalars to one-element lists for uniformity
+def makeList(objOrList):
+ if isinstance(objOrList, list):
+ return objOrList
+ return [objOrList]
+
+# Prepend given directory to system module search path. We may not
+# need this anymore if we can structure our config library more like a
+# Python package.
+def AddToPath(path):
+ # if it's a relative path and we know what directory the current
+ # python script is in, make the path relative to that directory.
+ if not os.path.isabs(path) and sys.path[0]:
+ path = os.path.join(sys.path[0], path)
+ path = os.path.realpath(path)
+ # sys.path[0] should always refer to the current script's directory,
+ # so place the new dir right after that.
+ sys.path.insert(1, path)
+
+# make a SmartDict out of the build options for our local use
+import smartdict
+build_env = smartdict.SmartDict()
+build_env.update(defines.m5_build_env)
+
+# make a SmartDict out of the OS environment too
+env = smartdict.SmartDict()
+env.update(os.environ)
+
+# The final hook to generate .ini files. Called from the user script
+# once the config is built.
+def instantiate(root):
+ params.ticks_per_sec = float(root.clock.frequency)
+ root.unproxy_all()
+ # ugly temporary hack to get output to config.ini
+ sys.stdout = file(os.path.join(options.outdir, 'config.ini'), 'w')
+ root.print_ini()
+ sys.stdout.close() # close config.ini
+ sys.stdout = sys.__stdout__ # restore to original
+ cc_main.loadIniFile(resolveSimObject) # load config.ini into C++
+ root.createCCObject()
+ root.connectPorts()
+ cc_main.finalInit()
+ noDot = True # temporary until we fix dot
+ if not noDot:
+ dot = pydot.Dot()
+ instance.outputDot(dot)
+ dot.orientation = "portrait"
+ dot.size = "8.5,11"
+ dot.ranksep="equally"
+ dot.rank="samerank"
+ dot.write("config.dot")
+ dot.write_ps("config.ps")
+
+# Export curTick to user script.
+def curTick():
+ return cc_main.cvar.curTick
+
+# register our C++ exit callback function with Python
+atexit.register(cc_main.doExitCleanup)
+
+# This loops until all objects have been fully drained.
+def doDrain(root):
+ all_drained = drain(root)
+ while (not all_drained):
+ all_drained = drain(root)
+
+# Tries to drain all objects. Draining might not be completed unless
+# all objects return that they are drained on the first call. This is
+# because as objects drain they may cause other objects to no longer
+# be drained.
+def drain(root):
+ all_drained = False
+ drain_event = cc_main.createCountedDrain()
+ unready_objects = root.startDrain(drain_event, True)
+ # If we've got some objects that can't drain immediately, then simulate
+ if unready_objects > 0:
+ drain_event.setCount(unready_objects)
+ simulate()
+ else:
+ all_drained = True
+ cc_main.cleanupCountedDrain(drain_event)
+ return all_drained
+
+def resume(root):
+ root.resume()
+
+def checkpoint(root, dir):
+ if not isinstance(root, objects.Root):
+ raise TypeError, "Object is not a root object. Checkpoint must be called on a root object."
+ doDrain(root)
+ print "Writing checkpoint"
+ cc_main.serializeAll(dir)
+ resume(root)
+
+def restoreCheckpoint(root, dir):
+ print "Restoring from checkpoint"
+ cc_main.unserializeAll(dir)
+ resume(root)
+
+def changeToAtomic(system):
+ if not isinstance(system, objects.Root) and not isinstance(system, System):
+ raise TypeError, "Object is not a root or system object. Checkpoint must be "
+ "called on a root object."
+ doDrain(system)
+ print "Changing memory mode to atomic"
+ system.changeTiming(cc_main.SimObject.Atomic)
+ resume(system)
+
+def changeToTiming(system):
+ if not isinstance(system, objects.Root) and not isinstance(system, System):
+ raise TypeError, "Object is not a root or system object. Checkpoint must be "
+ "called on a root object."
+ doDrain(system)
+ print "Changing memory mode to timing"
+ system.changeTiming(cc_main.SimObject.Timing)
+ resume(system)
+
+def switchCpus(cpuList):
+ if not isinstance(cpuList, list):
+ raise RuntimeError, "Must pass a list to this function"
+ for i in cpuList:
+ if not isinstance(i, tuple):
+ raise RuntimeError, "List must have tuples of (oldCPU,newCPU)"
+
+ [old_cpus, new_cpus] = zip(*cpuList)
+
+ for cpu in old_cpus:
+ if not isinstance(cpu, objects.BaseCPU):
+ raise TypeError, "%s is not of type BaseCPU", cpu
+ for cpu in new_cpus:
+ if not isinstance(cpu, objects.BaseCPU):
+ raise TypeError, "%s is not of type BaseCPU", cpu
+
+ # Drain all of the individual CPUs
+ drain_event = cc_main.createCountedDrain()
+ unready_cpus = 0
+ for old_cpu in old_cpus:
+ unready_cpus += old_cpu.startDrain(drain_event, False)
+ # If we've got some objects that can't drain immediately, then simulate
+ if unready_cpus > 0:
+ drain_event.setCount(unready_cpus)
+ simulate()
+ cc_main.cleanupCountedDrain(drain_event)
+ # Now all of the CPUs are ready to be switched out
+ for old_cpu in old_cpus:
+ old_cpu._ccObject.switchOut()
+ index = 0
+ print "Switching CPUs"
+ for new_cpu in new_cpus:
+ new_cpu.takeOverFrom(old_cpus[index])
+ new_cpu._ccObject.resume()
+ index += 1
+
+# Since we have so many mutual imports in this package, we should:
+# 1. Put all intra-package imports at the *bottom* of the file, unless
+# they're absolutely needed before that (for top-level statements
+# or class attributes). Imports of "trivial" packages that don't
+# import other packages (e.g., 'smartdict') can be at the top.
+# 2. Never use 'from foo import *' on an intra-package import since
+# you can get the wrong result if foo is only partially imported
+# at the point you do that (i.e., because foo is in the middle of
+# importing *you*).
+from main import options
+import objects
+import params
+from SimObject import resolveSimObject
diff --git a/src/python/m5/attrdict.py b/src/python/m5/attrdict.py
new file mode 100644
index 000000000..4ee7f1b8c
--- /dev/null
+++ b/src/python/m5/attrdict.py
@@ -0,0 +1,61 @@
+# Copyright (c) 2006 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Nathan Binkert
+
+__all__ = [ 'attrdict' ]
+
+class attrdict(dict):
+ def __getattr__(self, attr):
+ if attr in self:
+ return self.__getitem__(attr)
+ return super(attrdict, self).__getattribute__(attr)
+
+ def __setattr__(self, attr, value):
+ if attr in dir(self):
+ return super(attrdict, self).__setattr__(attr, value)
+ return self.__setitem__(attr, value)
+
+ def __delattr__(self, attr):
+ if attr in self:
+ return self.__delitem__(attr)
+ return super(attrdict, self).__delattr__(attr, value)
+
+if __name__ == '__main__':
+ x = attrdict()
+ x.y = 1
+ x['z'] = 2
+ print x['y'], x.y
+ print x['z'], x.z
+ print dir(x)
+ print x
+
+ print
+
+ del x['y']
+ del x.z
+ print dir(x)
+ print(x)
diff --git a/src/python/m5/convert.py b/src/python/m5/convert.py
new file mode 100644
index 000000000..580a579bc
--- /dev/null
+++ b/src/python/m5/convert.py
@@ -0,0 +1,229 @@
+# Copyright (c) 2005 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Nathan Binkert
+
+# metric prefixes
+exa = 1.0e18
+peta = 1.0e15
+tera = 1.0e12
+giga = 1.0e9
+mega = 1.0e6
+kilo = 1.0e3
+
+milli = 1.0e-3
+micro = 1.0e-6
+nano = 1.0e-9
+pico = 1.0e-12
+femto = 1.0e-15
+atto = 1.0e-18
+
+# power of 2 prefixes
+kibi = 1024
+mebi = kibi * 1024
+gibi = mebi * 1024
+tebi = gibi * 1024
+pebi = tebi * 1024
+exbi = pebi * 1024
+
+# memory size configuration stuff
+def toFloat(value):
+ if not isinstance(value, str):
+ raise TypeError, "wrong type '%s' should be str" % type(value)
+
+ if value.endswith('Ei'):
+ return float(value[:-2]) * exbi
+ elif value.endswith('Pi'):
+ return float(value[:-2]) * pebi
+ elif value.endswith('Ti'):
+ return float(value[:-2]) * tebi
+ elif value.endswith('Gi'):
+ return float(value[:-2]) * gibi
+ elif value.endswith('Mi'):
+ return float(value[:-2]) * mebi
+ elif value.endswith('ki'):
+ return float(value[:-2]) * kibi
+ elif value.endswith('E'):
+ return float(value[:-1]) * exa
+ elif value.endswith('P'):
+ return float(value[:-1]) * peta
+ elif value.endswith('T'):
+ return float(value[:-1]) * tera
+ elif value.endswith('G'):
+ return float(value[:-1]) * giga
+ elif value.endswith('M'):
+ return float(value[:-1]) * mega
+ elif value.endswith('k'):
+ return float(value[:-1]) * kilo
+ elif value.endswith('m'):
+ return float(value[:-1]) * milli
+ elif value.endswith('u'):
+ return float(value[:-1]) * micro
+ elif value.endswith('n'):
+ return float(value[:-1]) * nano
+ elif value.endswith('p'):
+ return float(value[:-1]) * pico
+ elif value.endswith('f'):
+ return float(value[:-1]) * femto
+ else:
+ return float(value)
+
+def toInteger(value):
+ value = toFloat(value)
+ result = long(value)
+ if value != result:
+ raise ValueError, "cannot convert '%s' to integer" % value
+
+ return result
+
+_bool_dict = {
+ 'true' : True, 't' : True, 'yes' : True, 'y' : True, '1' : True,
+ 'false' : False, 'f' : False, 'no' : False, 'n' : False, '0' : False
+ }
+
+def toBool(value):
+ if not isinstance(value, str):
+ raise TypeError, "wrong type '%s' should be str" % type(value)
+
+ value = value.lower()
+ result = _bool_dict.get(value, None)
+ if result == None:
+ raise ValueError, "cannot convert '%s' to bool" % value
+ return result
+
+def toFrequency(value):
+ if not isinstance(value, str):
+ raise TypeError, "wrong type '%s' should be str" % type(value)
+
+ if value.endswith('THz'):
+ return float(value[:-3]) * tera
+ elif value.endswith('GHz'):
+ return float(value[:-3]) * giga
+ elif value.endswith('MHz'):
+ return float(value[:-3]) * mega
+ elif value.endswith('kHz'):
+ return float(value[:-3]) * kilo
+ elif value.endswith('Hz'):
+ return float(value[:-2])
+
+ raise ValueError, "cannot convert '%s' to frequency" % value
+
+def toLatency(value):
+ if not isinstance(value, str):
+ raise TypeError, "wrong type '%s' should be str" % type(value)
+
+ if value.endswith('ps'):
+ return float(value[:-2]) * pico
+ elif value.endswith('ns'):
+ return float(value[:-2]) * nano
+ elif value.endswith('us'):
+ return float(value[:-2]) * micro
+ elif value.endswith('ms'):
+ return float(value[:-2]) * milli
+ elif value.endswith('s'):
+ return float(value[:-1])
+
+ raise ValueError, "cannot convert '%s' to latency" % value
+
+def toClockPeriod(value):
+ """result is a clock period"""
+
+ if not isinstance(value, str):
+ raise TypeError, "wrong type '%s' should be str" % type(value)
+
+ try:
+ val = toFrequency(value)
+ if val != 0:
+ val = 1 / val
+ return val
+ except ValueError:
+ pass
+
+ try:
+ val = toLatency(value)
+ return val
+ except ValueError:
+ pass
+
+ raise ValueError, "cannot convert '%s' to clock period" % value
+
+
+def toNetworkBandwidth(value):
+ if not isinstance(value, str):
+ raise TypeError, "wrong type '%s' should be str" % type(value)
+
+ if value.endswith('Tbps'):
+ return float(value[:-4]) * tera
+ elif value.endswith('Gbps'):
+ return float(value[:-4]) * giga
+ elif value.endswith('Mbps'):
+ return float(value[:-4]) * mega
+ elif value.endswith('kbps'):
+ return float(value[:-4]) * kilo
+ elif value.endswith('bps'):
+ return float(value[:-3])
+ else:
+ return float(value)
+
+ raise ValueError, "cannot convert '%s' to network bandwidth" % value
+
+def toMemoryBandwidth(value):
+ if not isinstance(value, str):
+ raise TypeError, "wrong type '%s' should be str" % type(value)
+
+ if value.endswith('PB/s'):
+ return float(value[:-4]) * pebi
+ elif value.endswith('TB/s'):
+ return float(value[:-4]) * tebi
+ elif value.endswith('GB/s'):
+ return float(value[:-4]) * gibi
+ elif value.endswith('MB/s'):
+ return float(value[:-4]) * mebi
+ elif value.endswith('kB/s'):
+ return float(value[:-4]) * kibi
+ elif value.endswith('B/s'):
+ return float(value[:-3])
+
+ raise ValueError, "cannot convert '%s' to memory bandwidth" % value
+
+def toMemorySize(value):
+ if not isinstance(value, str):
+ raise TypeError, "wrong type '%s' should be str" % type(value)
+
+ if value.endswith('PB'):
+ return long(value[:-2]) * pebi
+ elif value.endswith('TB'):
+ return long(value[:-2]) * tebi
+ elif value.endswith('GB'):
+ return long(value[:-2]) * gibi
+ elif value.endswith('MB'):
+ return long(value[:-2]) * mebi
+ elif value.endswith('kB'):
+ return long(value[:-2]) * kibi
+ elif value.endswith('B'):
+ return long(value[:-1])
+
+ raise ValueError, "cannot convert '%s' to memory size" % value
diff --git a/src/python/m5/main.py b/src/python/m5/main.py
new file mode 100644
index 000000000..e296453db
--- /dev/null
+++ b/src/python/m5/main.py
@@ -0,0 +1,330 @@
+# Copyright (c) 2005 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Nathan Binkert
+
+import code, optparse, os, socket, sys
+from datetime import datetime
+from attrdict import attrdict
+
+try:
+ import info
+except ImportError:
+ info = None
+
+__all__ = [ 'options', 'arguments', 'main' ]
+
+usage="%prog [m5 options] script.py [script options]"
+version="%prog 2.0"
+brief_copyright='''
+Copyright (c) 2001-2006
+The Regents of The University of Michigan
+All Rights Reserved
+'''
+
+# there's only one option parsing done, so make it global and add some
+# helper functions to make it work well.
+parser = optparse.OptionParser(usage=usage, version=version,
+ description=brief_copyright,
+ formatter=optparse.TitledHelpFormatter())
+parser.disable_interspersed_args()
+
+# current option group
+group = None
+
+def set_group(*args, **kwargs):
+ '''set the current option group'''
+ global group
+ if not args and not kwargs:
+ group = None
+ else:
+ group = parser.add_option_group(*args, **kwargs)
+
+class splitter(object):
+ def __init__(self, split):
+ self.split = split
+ def __call__(self, option, opt_str, value, parser):
+ getattr(parser.values, option.dest).extend(value.split(self.split))
+
+def add_option(*args, **kwargs):
+ '''add an option to the current option group, or global none set'''
+
+ # if action=split, but allows the option arguments
+ # themselves to be lists separated by the split variable'''
+
+ if kwargs.get('action', None) == 'append' and 'split' in kwargs:
+ split = kwargs.pop('split')
+ kwargs['default'] = []
+ kwargs['type'] = 'string'
+ kwargs['action'] = 'callback'
+ kwargs['callback'] = splitter(split)
+
+ if group:
+ return group.add_option(*args, **kwargs)
+
+ return parser.add_option(*args, **kwargs)
+
+def bool_option(name, default, help):
+ '''add a boolean option called --name and --no-name.
+ Display help depending on which is the default'''
+
+ tname = '--%s' % name
+ fname = '--no-%s' % name
+ dest = name.replace('-', '_')
+ if default:
+ thelp = optparse.SUPPRESS_HELP
+ fhelp = help
+ else:
+ thelp = help
+ fhelp = optparse.SUPPRESS_HELP
+
+ add_option(tname, action="store_true", default=default, help=thelp)
+ add_option(fname, action="store_false", dest=dest, help=fhelp)
+
+# Help options
+add_option('-A', "--authors", action="store_true", default=False,
+ help="Show author information")
+add_option('-C', "--copyright", action="store_true", default=False,
+ help="Show full copyright information")
+add_option('-R', "--readme", action="store_true", default=False,
+ help="Show the readme")
+add_option('-N', "--release-notes", action="store_true", default=False,
+ help="Show the release notes")
+
+# Options for configuring the base simulator
+add_option('-d', "--outdir", metavar="DIR", default=".",
+ help="Set the output directory to DIR [Default: %default]")
+add_option('-i', "--interactive", action="store_true", default=False,
+ help="Invoke the interactive interpreter after running the script")
+add_option("--pdb", action="store_true", default=False,
+ help="Invoke the python debugger before running the script")
+add_option('-p', "--path", metavar="PATH[:PATH]", action='append', split=':',
+ help="Prepend PATH to the system path when invoking the script")
+add_option('-q', "--quiet", action="count", default=0,
+ help="Reduce verbosity")
+add_option('-v', "--verbose", action="count", default=0,
+ help="Increase verbosity")
+
+# Statistics options
+set_group("Statistics Options")
+add_option("--stats-file", metavar="FILE", default="m5stats.txt",
+ help="Sets the output file for statistics [Default: %default]")
+
+# Debugging options
+set_group("Debugging Options")
+add_option("--debug-break", metavar="TIME[,TIME]", action='append', split=',',
+ help="Cycle to create a breakpoint")
+
+# Tracing options
+set_group("Trace Options")
+add_option("--trace-flags", metavar="FLAG[,FLAG]", action='append', split=',',
+ help="Sets the flags for tracing")
+add_option("--trace-start", metavar="TIME", default='0s',
+ help="Start tracing at TIME (must have units)")
+add_option("--trace-file", metavar="FILE", default="cout",
+ help="Sets the output file for tracing [Default: %default]")
+add_option("--trace-circlebuf", metavar="SIZE", type="int", default=0,
+ help="If SIZE is non-zero, turn on the circular buffer with SIZE lines")
+add_option("--no-trace-circlebuf", action="store_const", const=0,
+ dest='trace_circlebuf', help=optparse.SUPPRESS_HELP)
+bool_option("trace-dumponexit", default=False,
+ help="Dump trace buffer on exit")
+add_option("--trace-ignore", metavar="EXPR", action='append', split=':',
+ help="Ignore EXPR sim objects")
+
+# Execution Trace options
+set_group("Execution Trace Options")
+bool_option("speculative", default=True,
+ help="Don't capture speculative instructions")
+bool_option("print-cycle", default=True,
+ help="Don't print cycle numbers in trace output")
+bool_option("print-symbol", default=True,
+ help="Disable PC symbols in trace output")
+bool_option("print-opclass", default=True,
+ help="Don't print op class type in trace output")
+bool_option("print-thread", default=True,
+ help="Don't print thread number in trace output")
+bool_option("print-effaddr", default=True,
+ help="Don't print effective address in trace output")
+bool_option("print-data", default=True,
+ help="Don't print result data in trace output")
+bool_option("print-iregs", default=False,
+ help="Print fetch sequence numbers in trace output")
+bool_option("print-fetch-seq", default=False,
+ help="Print fetch sequence numbers in trace output")
+bool_option("print-cpseq", default=False,
+ help="Print correct path sequence numbers in trace output")
+#bool_option("print-reg-delta", default=False,
+# help="Print which registers changed to what in trace output")
+
+options = attrdict()
+arguments = []
+
+def usage(exitcode=None):
+ parser.print_help()
+ if exitcode is not None:
+ sys.exit(exitcode)
+
+def parse_args():
+ _opts,args = parser.parse_args()
+ opts = attrdict(_opts.__dict__)
+
+ # setting verbose and quiet at the same time doesn't make sense
+ if opts.verbose > 0 and opts.quiet > 0:
+ usage(2)
+
+ # store the verbosity in a single variable. 0 is default,
+ # negative numbers represent quiet and positive values indicate verbose
+ opts.verbose -= opts.quiet
+
+ del opts.quiet
+
+ options.update(opts)
+ arguments.extend(args)
+ return opts,args
+
+def main():
+ import cc_main
+
+ parse_args()
+
+ done = False
+ if options.copyright:
+ done = True
+ print info.LICENSE
+ print
+
+ if options.authors:
+ done = True
+ print 'Author information:'
+ print
+ print info.AUTHORS
+ print
+
+ if options.readme:
+ done = True
+ print 'Readme:'
+ print
+ print info.README
+ print
+
+ if options.release_notes:
+ done = True
+ print 'Release Notes:'
+ print
+ print info.RELEASE_NOTES
+ print
+
+ if done:
+ sys.exit(0)
+
+ if options.verbose >= 0:
+ print "M5 Simulator System"
+ print brief_copyright
+ print
+ print "M5 compiled %s" % cc_main.cvar.compileDate;
+ print "M5 started %s" % datetime.now().ctime()
+ print "M5 executing on %s" % socket.gethostname()
+ print "command line:",
+ for argv in sys.argv:
+ print argv,
+ print
+
+ # check to make sure we can find the listed script
+ if not arguments or not os.path.isfile(arguments[0]):
+ if arguments and not os.path.isfile(arguments[0]):
+ print "Script %s not found" % arguments[0]
+ usage(2)
+
+ # tell C++ about output directory
+ cc_main.setOutputDir(options.outdir)
+
+ # update the system path with elements from the -p option
+ sys.path[0:0] = options.path
+
+ import objects
+
+ # set stats options
+ objects.Statistics.text_file = options.stats_file
+
+ # set debugging options
+ objects.Debug.break_cycles = options.debug_break
+
+ # set tracing options
+ objects.Trace.flags = options.trace_flags
+ objects.Trace.start = options.trace_start
+ objects.Trace.file = options.trace_file
+ objects.Trace.bufsize = options.trace_circlebuf
+ objects.Trace.dump_on_exit = options.trace_dumponexit
+ objects.Trace.ignore = options.trace_ignore
+
+ # set execution trace options
+ objects.ExecutionTrace.speculative = options.speculative
+ objects.ExecutionTrace.print_cycle = options.print_cycle
+ objects.ExecutionTrace.pc_symbol = options.print_symbol
+ objects.ExecutionTrace.print_opclass = options.print_opclass
+ objects.ExecutionTrace.print_thread = options.print_thread
+ objects.ExecutionTrace.print_effaddr = options.print_effaddr
+ objects.ExecutionTrace.print_data = options.print_data
+ objects.ExecutionTrace.print_iregs = options.print_iregs
+ objects.ExecutionTrace.print_fetchseq = options.print_fetch_seq
+ objects.ExecutionTrace.print_cpseq = options.print_cpseq
+ #objects.ExecutionTrace.print_reg_delta = options.print_reg_delta
+
+ sys.argv = arguments
+ sys.path = [ os.path.dirname(sys.argv[0]) ] + sys.path
+
+ scope = { '__file__' : sys.argv[0] }
+
+ # we want readline if we're doing anything interactive
+ if options.interactive or options.pdb:
+ exec("import readline", scope)
+
+ # if pdb was requested, execfile the thing under pdb, otherwise,
+ # just do the execfile normally
+ if options.pdb:
+ from pdb import Pdb
+ debugger = Pdb()
+ debugger.run('execfile("%s")' % sys.argv[0], scope)
+ else:
+ execfile(sys.argv[0], scope)
+
+ # once the script is done
+ if options.interactive:
+ interact = code.InteractiveConsole(scope)
+ interact.interact("M5 Interactive Console")
+
+if __name__ == '__main__':
+ from pprint import pprint
+
+ parse_args()
+
+ print 'opts:'
+ pprint(options, indent=4)
+ print
+
+ print 'args:'
+ pprint(arguments, indent=4)
diff --git a/src/python/m5/multidict.py b/src/python/m5/multidict.py
new file mode 100644
index 000000000..b5cd700ef
--- /dev/null
+++ b/src/python/m5/multidict.py
@@ -0,0 +1,182 @@
+# Copyright (c) 2005 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Nathan Binkert
+
+__all__ = [ 'multidict' ]
+
+class multidict(object):
+ def __init__(self, parent = {}, **kwargs):
+ self.local = dict(**kwargs)
+ self.parent = parent
+ self.deleted = {}
+
+ def __str__(self):
+ return str(dict(self.items()))
+
+ def __repr__(self):
+ return `dict(self.items())`
+
+ def __contains__(self, key):
+ return self.local.has_key(key) or self.parent.has_key(key)
+
+ def __delitem__(self, key):
+ try:
+ del self.local[key]
+ except KeyError, e:
+ if key in self.parent:
+ self.deleted[key] = True
+ else:
+ raise KeyError, e
+
+ def __setitem__(self, key, value):
+ self.deleted.pop(key, False)
+ self.local[key] = value
+
+ def __getitem__(self, key):
+ try:
+ return self.local[key]
+ except KeyError, e:
+ if not self.deleted.get(key, False) and key in self.parent:
+ return self.parent[key]
+ else:
+ raise KeyError, e
+
+ def __len__(self):
+ return len(self.local) + len(self.parent)
+
+ def next(self):
+ for key,value in self.local.items():
+ yield key,value
+
+ if self.parent:
+ for key,value in self.parent.next():
+ if key not in self.local and key not in self.deleted:
+ yield key,value
+
+ def has_key(self, key):
+ return key in self
+
+ def iteritems(self):
+ for item in self.next():
+ yield item
+
+ def items(self):
+ return [ item for item in self.next() ]
+
+ def iterkeys(self):
+ for key,value in self.next():
+ yield key
+
+ def keys(self):
+ return [ key for key,value in self.next() ]
+
+ def itervalues(self):
+ for key,value in self.next():
+ yield value
+
+ def values(self):
+ return [ value for key,value in self.next() ]
+
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError, e:
+ return default
+
+ def setdefault(self, key, default):
+ try:
+ return self[key]
+ except KeyError:
+ self.deleted.pop(key, False)
+ self.local[key] = default
+ return default
+
+ def _dump(self):
+ print 'multidict dump'
+ node = self
+ while isinstance(node, multidict):
+ print ' ', node.local
+ node = node.parent
+
+ def _dumpkey(self, key):
+ values = []
+ node = self
+ while isinstance(node, multidict):
+ if key in node.local:
+ values.append(node.local[key])
+ node = node.parent
+ print key, values
+
+if __name__ == '__main__':
+ test1 = multidict()
+ test2 = multidict(test1)
+ test3 = multidict(test2)
+ test4 = multidict(test3)
+
+ test1['a'] = 'test1_a'
+ test1['b'] = 'test1_b'
+ test1['c'] = 'test1_c'
+ test1['d'] = 'test1_d'
+ test1['e'] = 'test1_e'
+
+ test2['a'] = 'test2_a'
+ del test2['b']
+ test2['c'] = 'test2_c'
+ del test1['a']
+
+ test2.setdefault('f', multidict)
+
+ print 'test1>', test1.items()
+ print 'test2>', test2.items()
+ #print test1['a']
+ print test1['b']
+ print test1['c']
+ print test1['d']
+ print test1['e']
+
+ print test2['a']
+ #print test2['b']
+ print test2['c']
+ print test2['d']
+ print test2['e']
+
+ for key in test2.iterkeys():
+ print key
+
+ test2.get('g', 'foo')
+ #test2.get('b')
+ test2.get('b', 'bar')
+ test2.setdefault('b', 'blah')
+ print test1
+ print test2
+ print `test2`
+
+ print len(test2)
+
+ test3['a'] = [ 0, 1, 2, 3 ]
+
+ print test4
diff --git a/src/python/m5/objects/AlphaConsole.py b/src/python/m5/objects/AlphaConsole.py
new file mode 100644
index 000000000..1c71493b1
--- /dev/null
+++ b/src/python/m5/objects/AlphaConsole.py
@@ -0,0 +1,10 @@
+from m5.params import *
+from m5.proxy import *
+from Device import BasicPioDevice
+
+class AlphaConsole(BasicPioDevice):
+ type = 'AlphaConsole'
+ cpu = Param.BaseCPU(Parent.any, "Processor")
+ disk = Param.SimpleDisk("Simple Disk")
+ sim_console = Param.SimConsole(Parent.any, "The Simulator Console")
+ system = Param.AlphaSystem(Parent.any, "system object")
diff --git a/src/python/m5/objects/AlphaTLB.py b/src/python/m5/objects/AlphaTLB.py
new file mode 100644
index 000000000..af7c04a84
--- /dev/null
+++ b/src/python/m5/objects/AlphaTLB.py
@@ -0,0 +1,14 @@
+from m5.SimObject import SimObject
+from m5.params import *
+class AlphaTLB(SimObject):
+ type = 'AlphaTLB'
+ abstract = True
+ size = Param.Int("TLB size")
+
+class AlphaDTB(AlphaTLB):
+ type = 'AlphaDTB'
+ size = 64
+
+class AlphaITB(AlphaTLB):
+ type = 'AlphaITB'
+ size = 48
diff --git a/src/python/m5/objects/BadDevice.py b/src/python/m5/objects/BadDevice.py
new file mode 100644
index 000000000..919623887
--- /dev/null
+++ b/src/python/m5/objects/BadDevice.py
@@ -0,0 +1,6 @@
+from m5.params import *
+from Device import BasicPioDevice
+
+class BadDevice(BasicPioDevice):
+ type = 'BadDevice'
+ devicename = Param.String("Name of device to error on")
diff --git a/src/python/m5/objects/BaseCPU.py b/src/python/m5/objects/BaseCPU.py
new file mode 100644
index 000000000..05ccbca6a
--- /dev/null
+++ b/src/python/m5/objects/BaseCPU.py
@@ -0,0 +1,59 @@
+from m5.SimObject import SimObject
+from m5.params import *
+from m5.proxy import *
+from m5 import build_env
+from AlphaTLB import AlphaDTB, AlphaITB
+from Bus import Bus
+
+class BaseCPU(SimObject):
+ type = 'BaseCPU'
+ abstract = True
+ mem = Param.MemObject("memory")
+
+ system = Param.System(Parent.any, "system object")
+ if build_env['FULL_SYSTEM']:
+ dtb = Param.AlphaDTB(AlphaDTB(), "Data TLB")
+ itb = Param.AlphaITB(AlphaITB(), "Instruction TLB")
+ cpu_id = Param.Int(-1, "CPU identifier")
+ else:
+ workload = VectorParam.Process("processes to run")
+
+ max_insts_all_threads = Param.Counter(0,
+ "terminate when all threads have reached this inst count")
+ max_insts_any_thread = Param.Counter(0,
+ "terminate when any thread reaches this inst count")
+ max_loads_all_threads = Param.Counter(0,
+ "terminate when all threads have reached this load count")
+ max_loads_any_thread = Param.Counter(0,
+ "terminate when any thread reaches this load count")
+ stats_reset_inst = Param.Counter(0,
+ "reset stats once this many instructions are committed")
+ progress_interval = Param.Tick(0, "interval to print out the progress message")
+
+ defer_registration = Param.Bool(False,
+ "defer registration with system (for sampling)")
+
+ clock = Param.Clock(Parent.clock, "clock speed")
+
+ _mem_ports = []
+
+ def connectMemPorts(self, bus):
+ for p in self._mem_ports:
+ exec('self.%s = bus.port' % p)
+
+ def addPrivateSplitL1Caches(self, ic, dc):
+ assert(len(self._mem_ports) == 2)
+ self.icache = ic
+ self.dcache = dc
+ self.icache_port = ic.cpu_side
+ self.dcache_port = dc.cpu_side
+ self._mem_ports = ['icache.mem_side', 'dcache.mem_side']
+# self.mem = dc
+
+ def addTwoLevelCacheHierarchy(self, ic, dc, l2c):
+ self.addPrivateSplitL1Caches(ic, dc)
+ self.toL2Bus = Bus()
+ self.connectMemPorts(self.toL2Bus)
+ self.l2cache = l2c
+ self.l2cache.cpu_side = self.toL2Bus.port
+ self._mem_ports = ['l2cache.mem_side']
diff --git a/src/python/m5/objects/BaseCache.py b/src/python/m5/objects/BaseCache.py
new file mode 100644
index 000000000..db58a177f
--- /dev/null
+++ b/src/python/m5/objects/BaseCache.py
@@ -0,0 +1,65 @@
+from m5.params import *
+from MemObject import MemObject
+
+class Prefetch(Enum): vals = ['none', 'tagged', 'stride', 'ghb']
+
+class BaseCache(MemObject):
+ type = 'BaseCache'
+ adaptive_compression = Param.Bool(False,
+ "Use an adaptive compression scheme")
+ assoc = Param.Int("associativity")
+ block_size = Param.Int("block size in bytes")
+ latency = Param.Int("Latency")
+ compressed_bus = Param.Bool(False,
+ "This cache connects to a compressed memory")
+ compression_latency = Param.Latency('0ns',
+ "Latency in cycles of compression algorithm")
+ do_copy = Param.Bool(False, "perform fast copies in the cache")
+ hash_delay = Param.Int(1, "time in cycles of hash access")
+ lifo = Param.Bool(False,
+ "whether this NIC partition should use LIFO repl. policy")
+ max_miss_count = Param.Counter(0,
+ "number of misses to handle before calling exit")
+ mshrs = Param.Int("number of MSHRs (max outstanding requests)")
+ prioritizeRequests = Param.Bool(False,
+ "always service demand misses first")
+ protocol = Param.CoherenceProtocol(NULL, "coherence protocol to use")
+ repl = Param.Repl(NULL, "replacement policy")
+ size = Param.MemorySize("capacity in bytes")
+ split = Param.Bool(False, "whether or not this cache is split")
+ split_size = Param.Int(0,
+ "How many ways of the cache belong to CPU/LRU partition")
+ store_compressed = Param.Bool(False,
+ "Store compressed data in the cache")
+ subblock_size = Param.Int(0,
+ "Size of subblock in IIC used for compression")
+ tgts_per_mshr = Param.Int("max number of accesses per MSHR")
+ trace_addr = Param.Addr(0, "address to trace")
+ two_queue = Param.Bool(False,
+ "whether the lifo should have two queue replacement")
+ write_buffers = Param.Int(8, "number of write buffers")
+ prefetch_miss = Param.Bool(False,
+ "wheter you are using the hardware prefetcher from Miss stream")
+ prefetch_access = Param.Bool(False,
+ "wheter you are using the hardware prefetcher from Access stream")
+ prefetcher_size = Param.Int(100,
+ "Number of entries in the harware prefetch queue")
+ prefetch_past_page = Param.Bool(False,
+ "Allow prefetches to cross virtual page boundaries")
+ prefetch_serial_squash = Param.Bool(False,
+ "Squash prefetches with a later time on a subsequent miss")
+ prefetch_degree = Param.Int(1,
+ "Degree of the prefetch depth")
+ prefetch_latency = Param.Tick(10,
+ "Latency of the prefetcher")
+ prefetch_policy = Param.Prefetch('none',
+ "Type of prefetcher to use")
+ prefetch_cache_check_push = Param.Bool(True,
+ "Check if in cash on push or pop of prefetch queue")
+ prefetch_use_cpu_id = Param.Bool(True,
+ "Use the CPU ID to seperate calculations of prefetches")
+ prefetch_data_accesses_only = Param.Bool(False,
+ "Only prefetch on data not on instruction accesses")
+ hit_latency = Param.Int(1,"Hit Latency of the cache")
+ cpu_side = Port("Port on side closer to CPU")
+ mem_side = Port("Port on side closer to MEM")
diff --git a/src/python/m5/objects/Bridge.py b/src/python/m5/objects/Bridge.py
new file mode 100644
index 000000000..ee8e76bff
--- /dev/null
+++ b/src/python/m5/objects/Bridge.py
@@ -0,0 +1,11 @@
+from m5.params import *
+from MemObject import MemObject
+
+class Bridge(MemObject):
+ type = 'Bridge'
+ side_a = Port('Side A port')
+ side_b = Port('Side B port')
+ queue_size_a = Param.Int(16, "The number of requests to buffer")
+ queue_size_b = Param.Int(16, "The number of requests to buffer")
+ delay = Param.Latency('0ns', "The latency of this bridge")
+ write_ack = Param.Bool(False, "Should this bridge ack writes")
diff --git a/src/python/m5/objects/Bus.py b/src/python/m5/objects/Bus.py
new file mode 100644
index 000000000..f6828a0d5
--- /dev/null
+++ b/src/python/m5/objects/Bus.py
@@ -0,0 +1,8 @@
+from m5.params import *
+from MemObject import MemObject
+
+class Bus(MemObject):
+ type = 'Bus'
+ port = VectorPort("vector port for connecting devices")
+ default = Port("Default port for requests that aren't handeled by a device.")
+ bus_id = Param.Int(0, "blah")
diff --git a/src/python/m5/objects/CoherenceProtocol.py b/src/python/m5/objects/CoherenceProtocol.py
new file mode 100644
index 000000000..82adb6862
--- /dev/null
+++ b/src/python/m5/objects/CoherenceProtocol.py
@@ -0,0 +1,8 @@
+from m5.SimObject import SimObject
+from m5.params import *
+class Coherence(Enum): vals = ['uni', 'msi', 'mesi', 'mosi', 'moesi']
+
+class CoherenceProtocol(SimObject):
+ type = 'CoherenceProtocol'
+ do_upgrades = Param.Bool(True, "use upgrade transactions?")
+ protocol = Param.Coherence("name of coherence protocol")
diff --git a/src/python/m5/objects/Device.py b/src/python/m5/objects/Device.py
new file mode 100644
index 000000000..4672d1065
--- /dev/null
+++ b/src/python/m5/objects/Device.py
@@ -0,0 +1,21 @@
+from m5.params import *
+from m5.proxy import *
+from MemObject import MemObject
+
+class PioDevice(MemObject):
+ type = 'PioDevice'
+ abstract = True
+ pio = Port("Programmed I/O port")
+ platform = Param.Platform(Parent.any, "Platform this device is part of")
+ system = Param.System(Parent.any, "System this device is part of")
+
+class BasicPioDevice(PioDevice):
+ type = 'BasicPioDevice'
+ abstract = True
+ pio_addr = Param.Addr("Device Address")
+ pio_latency = Param.Latency('1ns', "Programmed IO latency in simticks")
+
+class DmaDevice(PioDevice):
+ type = 'DmaDevice'
+ abstract = True
+ dma = Port(Self.pio.peerObj.port, "DMA port")
diff --git a/src/python/m5/objects/DiskImage.py b/src/python/m5/objects/DiskImage.py
new file mode 100644
index 000000000..d0ada7ee1
--- /dev/null
+++ b/src/python/m5/objects/DiskImage.py
@@ -0,0 +1,16 @@
+from m5.SimObject import SimObject
+from m5.params import *
+class DiskImage(SimObject):
+ type = 'DiskImage'
+ abstract = True
+ image_file = Param.String("disk image file")
+ read_only = Param.Bool(False, "read only image")
+
+class RawDiskImage(DiskImage):
+ type = 'RawDiskImage'
+
+class CowDiskImage(DiskImage):
+ type = 'CowDiskImage'
+ child = Param.DiskImage(RawDiskImage(read_only=True),
+ "child image")
+ table_size = Param.Int(65536, "initial table size")
diff --git a/src/python/m5/objects/Ethernet.py b/src/python/m5/objects/Ethernet.py
new file mode 100644
index 000000000..f17a6c888
--- /dev/null
+++ b/src/python/m5/objects/Ethernet.py
@@ -0,0 +1,193 @@
+from m5.SimObject import SimObject
+from m5.params import *
+from m5.proxy import *
+from m5 import build_env
+from Device import DmaDevice
+from Pci import PciDevice, PciConfigData
+
+class EtherInt(SimObject):
+ type = 'EtherInt'
+ abstract = True
+ peer = Param.EtherInt(NULL, "peer interface")
+
+class EtherLink(SimObject):
+ type = 'EtherLink'
+ int1 = Param.EtherInt("interface 1")
+ int2 = Param.EtherInt("interface 2")
+ delay = Param.Latency('0us', "packet transmit delay")
+ delay_var = Param.Latency('0ns', "packet transmit delay variability")
+ speed = Param.NetworkBandwidth('1Gbps', "link speed")
+ dump = Param.EtherDump(NULL, "dump object")
+
+class EtherBus(SimObject):
+ type = 'EtherBus'
+ loopback = Param.Bool(True, "send packet back to the sending interface")
+ dump = Param.EtherDump(NULL, "dump object")
+ speed = Param.NetworkBandwidth('100Mbps', "bus speed in bits per second")
+
+class EtherTap(EtherInt):
+ type = 'EtherTap'
+ bufsz = Param.Int(10000, "tap buffer size")
+ dump = Param.EtherDump(NULL, "dump object")
+ port = Param.UInt16(3500, "tap port")
+
+class EtherDump(SimObject):
+ type = 'EtherDump'
+ file = Param.String("dump file")
+ maxlen = Param.Int(96, "max portion of packet data to dump")
+
+if build_env['ALPHA_TLASER']:
+
+ class EtherDev(DmaDevice):
+ type = 'EtherDev'
+ hardware_address = Param.EthernetAddr(NextEthernetAddr,
+ "Ethernet Hardware Address")
+
+ dma_data_free = Param.Bool(False, "DMA of Data is free")
+ dma_desc_free = Param.Bool(False, "DMA of Descriptors is free")
+ dma_read_delay = Param.Latency('0us', "fixed delay for dma reads")
+ dma_read_factor = Param.Latency('0us', "multiplier for dma reads")
+ dma_write_delay = Param.Latency('0us', "fixed delay for dma writes")
+ dma_write_factor = Param.Latency('0us', "multiplier for dma writes")
+ dma_no_allocate = Param.Bool(True, "Should we allocate cache on read")
+
+ rx_filter = Param.Bool(True, "Enable Receive Filter")
+ rx_delay = Param.Latency('1us', "Receive Delay")
+ tx_delay = Param.Latency('1us', "Transmit Delay")
+
+ intr_delay = Param.Latency('0us', "Interrupt Delay")
+ payload_bus = Param.Bus(NULL, "The IO Bus to attach to for payload")
+ physmem = Param.PhysicalMemory(Parent.any, "Physical Memory")
+ tlaser = Param.Turbolaser(Parent.any, "Turbolaser")
+
+ class EtherDevInt(EtherInt):
+ type = 'EtherDevInt'
+ device = Param.EtherDev("Ethernet device of this interface")
+
+
+class IGbE(PciDevice):
+ type = 'IGbE'
+ hardware_address = Param.EthernetAddr(NextEthernetAddr, "Ethernet Hardware Address")
+
+class IGbEPciData(PciConfigData):
+ VendorID = 0x8086
+ DeviceID = 0x1026
+ SubsystemID = 0x1008
+ SubsystemVendorID = 0x8086
+ Status = 0x0000
+ SubClassCode = 0x00
+ ClassCode = 0x02
+ ProgIF = 0x00
+ BAR0 = 0x00000000
+ BAR1 = 0x00000000
+ BAR2 = 0x00000000
+ BAR3 = 0x00000000
+ BAR4 = 0x00000000
+ BAR5 = 0x00000000
+ MaximumLatency = 0x00
+ MinimumGrant = 0xff
+ InterruptLine = 0x1e
+ InterruptPin = 0x01
+ BAR0Size = '128kB'
+
+class IGbEInt(EtherInt):
+ type = 'IGbEInt'
+ device = Param.IGbE("Ethernet device of this interface")
+
+
+
+class EtherDevBase(PciDevice):
+ hardware_address = Param.EthernetAddr(NextEthernetAddr,
+ "Ethernet Hardware Address")
+
+ clock = Param.Clock('0ns', "State machine processor frequency")
+
+ dma_read_delay = Param.Latency('0us', "fixed delay for dma reads")
+ dma_read_factor = Param.Latency('0us', "multiplier for dma reads")
+ dma_write_delay = Param.Latency('0us', "fixed delay for dma writes")
+ dma_write_factor = Param.Latency('0us', "multiplier for dma writes")
+
+ rx_delay = Param.Latency('1us', "Receive Delay")
+ tx_delay = Param.Latency('1us', "Transmit Delay")
+ rx_fifo_size = Param.MemorySize('512kB', "max size of rx fifo")
+ tx_fifo_size = Param.MemorySize('512kB', "max size of tx fifo")
+
+ rx_filter = Param.Bool(True, "Enable Receive Filter")
+ intr_delay = Param.Latency('10us', "Interrupt propagation delay")
+ rx_thread = Param.Bool(False, "dedicated kernel thread for transmit")
+ tx_thread = Param.Bool(False, "dedicated kernel threads for receive")
+ rss = Param.Bool(False, "Receive Side Scaling")
+
+class NSGigEPciData(PciConfigData):
+ VendorID = 0x100B
+ DeviceID = 0x0022
+ Status = 0x0290
+ SubClassCode = 0x00
+ ClassCode = 0x02
+ ProgIF = 0x00
+ BAR0 = 0x00000001
+ BAR1 = 0x00000000
+ BAR2 = 0x00000000
+ BAR3 = 0x00000000
+ BAR4 = 0x00000000
+ BAR5 = 0x00000000
+ MaximumLatency = 0x34
+ MinimumGrant = 0xb0
+ InterruptLine = 0x1e
+ InterruptPin = 0x01
+ BAR0Size = '256B'
+ BAR1Size = '4kB'
+
+class NSGigE(EtherDevBase):
+ type = 'NSGigE'
+
+ dma_data_free = Param.Bool(False, "DMA of Data is free")
+ dma_desc_free = Param.Bool(False, "DMA of Descriptors is free")
+ dma_no_allocate = Param.Bool(True, "Should we allocate cache on read")
+
+ configdata = NSGigEPciData()
+
+
+class NSGigEInt(EtherInt):
+ type = 'NSGigEInt'
+ device = Param.NSGigE("Ethernet device of this interface")
+
+class SinicPciData(PciConfigData):
+ VendorID = 0x1291
+ DeviceID = 0x1293
+ Status = 0x0290
+ SubClassCode = 0x00
+ ClassCode = 0x02
+ ProgIF = 0x00
+ BAR0 = 0x00000000
+ BAR1 = 0x00000000
+ BAR2 = 0x00000000
+ BAR3 = 0x00000000
+ BAR4 = 0x00000000
+ BAR5 = 0x00000000
+ MaximumLatency = 0x34
+ MinimumGrant = 0xb0
+ InterruptLine = 0x1e
+ InterruptPin = 0x01
+ BAR0Size = '64kB'
+
+class Sinic(EtherDevBase):
+ type = 'Sinic'
+
+ rx_max_copy = Param.MemorySize('1514B', "rx max copy")
+ tx_max_copy = Param.MemorySize('16kB', "tx max copy")
+ rx_max_intr = Param.UInt32(10, "max rx packets per interrupt")
+ rx_fifo_threshold = Param.MemorySize('384kB', "rx fifo high threshold")
+ rx_fifo_low_mark = Param.MemorySize('128kB', "rx fifo low threshold")
+ tx_fifo_high_mark = Param.MemorySize('384kB', "tx fifo high threshold")
+ tx_fifo_threshold = Param.MemorySize('128kB', "tx fifo low threshold")
+ virtual_count = Param.UInt32(1, "Virtualized SINIC")
+ zero_copy = Param.Bool(False, "Zero copy receive")
+ delay_copy = Param.Bool(False, "Delayed copy transmit")
+ virtual_addr = Param.Bool(False, "Virtual addressing")
+
+ configdata = SinicPciData()
+
+class SinicInt(EtherInt):
+ type = 'SinicInt'
+ device = Param.Sinic("Ethernet device of this interface")
diff --git a/src/python/m5/objects/FUPool.py b/src/python/m5/objects/FUPool.py
new file mode 100644
index 000000000..4b4be79a6
--- /dev/null
+++ b/src/python/m5/objects/FUPool.py
@@ -0,0 +1,6 @@
+from m5.SimObject import SimObject
+from m5.params import *
+
+class FUPool(SimObject):
+ type = 'FUPool'
+ FUList = VectorParam.FUDesc("list of FU's for this pool")
diff --git a/src/python/m5/objects/FuncUnit.py b/src/python/m5/objects/FuncUnit.py
new file mode 100644
index 000000000..f0ad55f7a
--- /dev/null
+++ b/src/python/m5/objects/FuncUnit.py
@@ -0,0 +1,18 @@
+from m5.SimObject import SimObject
+from m5.params import *
+
+class OpType(Enum):
+ vals = ['(null)', 'IntAlu', 'IntMult', 'IntDiv', 'FloatAdd',
+ 'FloatCmp', 'FloatCvt', 'FloatMult', 'FloatDiv', 'FloatSqrt',
+ 'MemRead', 'MemWrite', 'IprAccess', 'InstPrefetch']
+
+class OpDesc(SimObject):
+ type = 'OpDesc'
+ issueLat = Param.Int(1, "cycles until another can be issued")
+ opClass = Param.OpType("type of operation")
+ opLat = Param.Int(1, "cycles until result is available")
+
+class FUDesc(SimObject):
+ type = 'FUDesc'
+ count = Param.Int("number of these FU's available")
+ opList = VectorParam.OpDesc("operation classes for this FU type")
diff --git a/src/python/m5/objects/Ide.py b/src/python/m5/objects/Ide.py
new file mode 100644
index 000000000..ef7e28785
--- /dev/null
+++ b/src/python/m5/objects/Ide.py
@@ -0,0 +1,40 @@
+from m5.SimObject import SimObject
+from m5.params import *
+from Pci import PciDevice, PciConfigData
+
+class IdeID(Enum): vals = ['master', 'slave']
+
+class IdeControllerPciData(PciConfigData):
+ VendorID = 0x8086
+ DeviceID = 0x7111
+ Command = 0x0
+ Status = 0x280
+ Revision = 0x0
+ ClassCode = 0x01
+ SubClassCode = 0x01
+ ProgIF = 0x85
+ BAR0 = 0x00000001
+ BAR1 = 0x00000001
+ BAR2 = 0x00000001
+ BAR3 = 0x00000001
+ BAR4 = 0x00000001
+ BAR5 = 0x00000001
+ InterruptLine = 0x1f
+ InterruptPin = 0x01
+ BAR0Size = '8B'
+ BAR1Size = '4B'
+ BAR2Size = '8B'
+ BAR3Size = '4B'
+ BAR4Size = '16B'
+
+class IdeDisk(SimObject):
+ type = 'IdeDisk'
+ delay = Param.Latency('1us', "Fixed disk delay in microseconds")
+ driveID = Param.IdeID('master', "Drive ID")
+ image = Param.DiskImage("Disk image")
+
+class IdeController(PciDevice):
+ type = 'IdeController'
+ disks = VectorParam.IdeDisk("IDE disks attached to this controller")
+
+ configdata =IdeControllerPciData()
diff --git a/src/python/m5/objects/IntrControl.py b/src/python/m5/objects/IntrControl.py
new file mode 100644
index 000000000..95be0f4df
--- /dev/null
+++ b/src/python/m5/objects/IntrControl.py
@@ -0,0 +1,6 @@
+from m5.SimObject import SimObject
+from m5.params import *
+from m5.proxy import *
+class IntrControl(SimObject):
+ type = 'IntrControl'
+ cpu = Param.BaseCPU(Parent.any, "the cpu")
diff --git a/src/python/m5/objects/MemObject.py b/src/python/m5/objects/MemObject.py
new file mode 100644
index 000000000..8982d553d
--- /dev/null
+++ b/src/python/m5/objects/MemObject.py
@@ -0,0 +1,6 @@
+from m5.SimObject import SimObject
+from m5.SimObject import SimObject
+
+class MemObject(SimObject):
+ type = 'MemObject'
+ abstract = True
diff --git a/src/python/m5/objects/MemTest.py b/src/python/m5/objects/MemTest.py
new file mode 100644
index 000000000..97600768f
--- /dev/null
+++ b/src/python/m5/objects/MemTest.py
@@ -0,0 +1,20 @@
+from m5.SimObject import SimObject
+from m5.params import *
+class MemTest(SimObject):
+ type = 'MemTest'
+ cache = Param.BaseCache("L1 cache")
+ check_mem = Param.FunctionalMemory("check memory")
+ main_mem = Param.FunctionalMemory("hierarchical memory")
+ max_loads = Param.Counter("number of loads to execute")
+ memory_size = Param.Int(65536, "memory size")
+ percent_copies = Param.Percent(0, "target copy percentage")
+ percent_dest_unaligned = Param.Percent(50,
+ "percent of copy dest address that are unaligned")
+ percent_reads = Param.Percent(65, "target read percentage")
+ percent_source_unaligned = Param.Percent(50,
+ "percent of copy source address that are unaligned")
+ percent_uncacheable = Param.Percent(10,
+ "target uncacheable percentage")
+ progress_interval = Param.Counter(1000000,
+ "progress report interval (in accesses)")
+ trace_addr = Param.Addr(0, "address to trace")
diff --git a/src/python/m5/objects/O3CPU.py b/src/python/m5/objects/O3CPU.py
new file mode 100644
index 000000000..59b40c6e8
--- /dev/null
+++ b/src/python/m5/objects/O3CPU.py
@@ -0,0 +1,115 @@
+from m5.params import *
+from m5.proxy import *
+from m5 import build_env
+from BaseCPU import BaseCPU
+from Checker import O3Checker
+
+class DerivO3CPU(BaseCPU):
+ type = 'DerivO3CPU'
+ activity = Param.Unsigned(0, "Initial count")
+ numThreads = Param.Unsigned(1, "number of HW thread contexts")
+
+ if build_env['FULL_SYSTEM']:
+ profile = Param.Latency('0ns', "trace the kernel stack")
+ if build_env['USE_CHECKER']:
+ if not build_env['FULL_SYSTEM']:
+ checker = Param.BaseCPU(O3Checker(workload=Parent.workload,
+ exitOnError=True,
+ warnOnlyOnLoadError=False),
+ "checker")
+ else:
+ checker = Param.BaseCPU(O3Checker(exitOnError=True, warnOnlyOnLoadError=False), "checker")
+ checker.itb = Parent.itb
+ checker.dtb = Parent.dtb
+
+ cachePorts = Param.Unsigned("Cache Ports")
+ icache_port = Port("Instruction Port")
+ dcache_port = Port("Data Port")
+ _mem_ports = ['icache_port', 'dcache_port']
+
+ decodeToFetchDelay = Param.Unsigned(1, "Decode to fetch delay")
+ renameToFetchDelay = Param.Unsigned(1 ,"Rename to fetch delay")
+ iewToFetchDelay = Param.Unsigned(1, "Issue/Execute/Writeback to fetch "
+ "delay")
+ commitToFetchDelay = Param.Unsigned(1, "Commit to fetch delay")
+ fetchWidth = Param.Unsigned(8, "Fetch width")
+
+ renameToDecodeDelay = Param.Unsigned(1, "Rename to decode delay")
+ iewToDecodeDelay = Param.Unsigned(1, "Issue/Execute/Writeback to decode "
+ "delay")
+ commitToDecodeDelay = Param.Unsigned(1, "Commit to decode delay")
+ fetchToDecodeDelay = Param.Unsigned(1, "Fetch to decode delay")
+ decodeWidth = Param.Unsigned(8, "Decode width")
+
+ iewToRenameDelay = Param.Unsigned(1, "Issue/Execute/Writeback to rename "
+ "delay")
+ commitToRenameDelay = Param.Unsigned(1, "Commit to rename delay")
+ decodeToRenameDelay = Param.Unsigned(1, "Decode to rename delay")
+ renameWidth = Param.Unsigned(8, "Rename width")
+
+ commitToIEWDelay = Param.Unsigned(1, "Commit to "
+ "Issue/Execute/Writeback delay")
+ renameToIEWDelay = Param.Unsigned(2, "Rename to "
+ "Issue/Execute/Writeback delay")
+ issueToExecuteDelay = Param.Unsigned(1, "Issue to execute delay (internal "
+ "to the IEW stage)")
+ dispatchWidth = Param.Unsigned(8, "Dispatch width")
+ issueWidth = Param.Unsigned(8, "Issue width")
+ wbWidth = Param.Unsigned(8, "Writeback width")
+ wbDepth = Param.Unsigned(1, "Writeback depth")
+ fuPool = Param.FUPool("Functional Unit pool")
+
+ iewToCommitDelay = Param.Unsigned(1, "Issue/Execute/Writeback to commit "
+ "delay")
+ renameToROBDelay = Param.Unsigned(1, "Rename to reorder buffer delay")
+ commitWidth = Param.Unsigned(8, "Commit width")
+ squashWidth = Param.Unsigned(8, "Squash width")
+ trapLatency = Param.Tick(13, "Trap latency")
+ fetchTrapLatency = Param.Tick(1, "Fetch trap latency")
+
+ backComSize = Param.Unsigned(5, "Time buffer size for backwards communication")
+ forwardComSize = Param.Unsigned(5, "Time buffer size for forward communication")
+
+ predType = Param.String("tournament", "Branch predictor type ('local', 'tournament')")
+ localPredictorSize = Param.Unsigned(2048, "Size of local predictor")
+ localCtrBits = Param.Unsigned(2, "Bits per counter")
+ localHistoryTableSize = Param.Unsigned(2048, "Size of local history table")
+ localHistoryBits = Param.Unsigned(11, "Bits for the local history")
+ globalPredictorSize = Param.Unsigned(8192, "Size of global predictor")
+ globalCtrBits = Param.Unsigned(2, "Bits per counter")
+ globalHistoryBits = Param.Unsigned(4096, "Bits of history")
+ choicePredictorSize = Param.Unsigned(8192, "Size of choice predictor")
+ choiceCtrBits = Param.Unsigned(2, "Bits of choice counters")
+
+ BTBEntries = Param.Unsigned(4096, "Number of BTB entries")
+ BTBTagSize = Param.Unsigned(16, "Size of the BTB tags, in bits")
+
+ RASSize = Param.Unsigned(16, "RAS size")
+
+ LQEntries = Param.Unsigned(32, "Number of load queue entries")
+ SQEntries = Param.Unsigned(32, "Number of store queue entries")
+ LFSTSize = Param.Unsigned(1024, "Last fetched store table size")
+ SSITSize = Param.Unsigned(1024, "Store set ID table size")
+
+ numRobs = Param.Unsigned(1, "Number of Reorder Buffers");
+
+ numPhysIntRegs = Param.Unsigned(256, "Number of physical integer registers")
+ numPhysFloatRegs = Param.Unsigned(256, "Number of physical floating point "
+ "registers")
+ numIQEntries = Param.Unsigned(64, "Number of instruction queue entries")
+ numROBEntries = Param.Unsigned(192, "Number of reorder buffer entries")
+
+ instShiftAmt = Param.Unsigned(2, "Number of bits to shift instructions by")
+
+ function_trace = Param.Bool(False, "Enable function trace")
+ function_trace_start = Param.Tick(0, "Cycle to start function trace")
+
+ smtNumFetchingThreads = Param.Unsigned("SMT Number of Fetching Threads")
+ smtFetchPolicy = Param.String("SMT Fetch policy")
+ smtLSQPolicy = Param.String("SMT LSQ Sharing Policy")
+ smtLSQThreshold = Param.String("SMT LSQ Threshold Sharing Parameter")
+ smtIQPolicy = Param.String("SMT IQ Sharing Policy")
+ smtIQThreshold = Param.String("SMT IQ Threshold Sharing Parameter")
+ smtROBPolicy = Param.String("SMT ROB Sharing Policy")
+ smtROBThreshold = Param.String("SMT ROB Threshold Sharing Parameter")
+ smtCommitPolicy = Param.String("SMT Commit Policy")
diff --git a/src/python/m5/objects/OzoneCPU.py b/src/python/m5/objects/OzoneCPU.py
new file mode 100644
index 000000000..0913e044c
--- /dev/null
+++ b/src/python/m5/objects/OzoneCPU.py
@@ -0,0 +1,95 @@
+from m5.params import *
+from m5 import build_env
+from BaseCPU import BaseCPU
+
+class DerivOzoneCPU(BaseCPU):
+ type = 'DerivOzoneCPU'
+
+ numThreads = Param.Unsigned("number of HW thread contexts")
+
+ checker = Param.BaseCPU("Checker CPU")
+ if build_env['FULL_SYSTEM']:
+ profile = Param.Latency('0ns', "trace the kernel stack")
+
+ icache_port = Port("Instruction Port")
+ dcache_port = Port("Data Port")
+
+ width = Param.Unsigned("Width")
+ frontEndWidth = Param.Unsigned("Front end width")
+ frontEndLatency = Param.Unsigned("Front end latency")
+ backEndWidth = Param.Unsigned("Back end width")
+ backEndSquashLatency = Param.Unsigned("Back end squash latency")
+ backEndLatency = Param.Unsigned("Back end latency")
+ maxInstBufferSize = Param.Unsigned("Maximum instruction buffer size")
+ maxOutstandingMemOps = Param.Unsigned("Maximum number of outstanding memory operations")
+ decodeToFetchDelay = Param.Unsigned("Decode to fetch delay")
+ renameToFetchDelay = Param.Unsigned("Rename to fetch delay")
+ iewToFetchDelay = Param.Unsigned("Issue/Execute/Writeback to fetch "
+ "delay")
+ commitToFetchDelay = Param.Unsigned("Commit to fetch delay")
+ fetchWidth = Param.Unsigned("Fetch width")
+
+ renameToDecodeDelay = Param.Unsigned("Rename to decode delay")
+ iewToDecodeDelay = Param.Unsigned("Issue/Execute/Writeback to decode "
+ "delay")
+ commitToDecodeDelay = Param.Unsigned("Commit to decode delay")
+ fetchToDecodeDelay = Param.Unsigned("Fetch to decode delay")
+ decodeWidth = Param.Unsigned("Decode width")
+
+ iewToRenameDelay = Param.Unsigned("Issue/Execute/Writeback to rename "
+ "delay")
+ commitToRenameDelay = Param.Unsigned("Commit to rename delay")
+ decodeToRenameDelay = Param.Unsigned("Decode to rename delay")
+ renameWidth = Param.Unsigned("Rename width")
+
+ commitToIEWDelay = Param.Unsigned("Commit to "
+ "Issue/Execute/Writeback delay")
+ renameToIEWDelay = Param.Unsigned("Rename to "
+ "Issue/Execute/Writeback delay")
+ issueToExecuteDelay = Param.Unsigned("Issue to execute delay (internal "
+ "to the IEW stage)")
+ issueWidth = Param.Unsigned("Issue width")
+ executeWidth = Param.Unsigned("Execute width")
+ executeIntWidth = Param.Unsigned("Integer execute width")
+ executeFloatWidth = Param.Unsigned("Floating point execute width")
+ executeBranchWidth = Param.Unsigned("Branch execute width")
+ executeMemoryWidth = Param.Unsigned("Memory execute width")
+
+ iewToCommitDelay = Param.Unsigned("Issue/Execute/Writeback to commit "
+ "delay")
+ renameToROBDelay = Param.Unsigned("Rename to reorder buffer delay")
+ commitWidth = Param.Unsigned("Commit width")
+ squashWidth = Param.Unsigned("Squash width")
+
+ predType = Param.String("Type of branch predictor ('local', 'tournament')")
+ localPredictorSize = Param.Unsigned("Size of local predictor")
+ localCtrBits = Param.Unsigned("Bits per counter")
+ localHistoryTableSize = Param.Unsigned("Size of local history table")
+ localHistoryBits = Param.Unsigned("Bits for the local history")
+ globalPredictorSize = Param.Unsigned("Size of global predictor")
+ globalCtrBits = Param.Unsigned("Bits per counter")
+ globalHistoryBits = Param.Unsigned("Bits of history")
+ choicePredictorSize = Param.Unsigned("Size of choice predictor")
+ choiceCtrBits = Param.Unsigned("Bits of choice counters")
+
+ BTBEntries = Param.Unsigned("Number of BTB entries")
+ BTBTagSize = Param.Unsigned("Size of the BTB tags, in bits")
+
+ RASSize = Param.Unsigned("RAS size")
+
+ LQEntries = Param.Unsigned("Number of load queue entries")
+ SQEntries = Param.Unsigned("Number of store queue entries")
+ lsqLimits = Param.Bool(True, "LSQ size limits dispatch")
+ LFSTSize = Param.Unsigned("Last fetched store table size")
+ SSITSize = Param.Unsigned("Store set ID table size")
+
+ numPhysIntRegs = Param.Unsigned("Number of physical integer registers")
+ numPhysFloatRegs = Param.Unsigned("Number of physical floating point "
+ "registers")
+ numIQEntries = Param.Unsigned("Number of instruction queue entries")
+ numROBEntries = Param.Unsigned("Number of reorder buffer entries")
+
+ instShiftAmt = Param.Unsigned("Number of bits to shift instructions by")
+
+ function_trace = Param.Bool(False, "Enable function trace")
+ function_trace_start = Param.Tick(0, "Cycle to start function trace")
diff --git a/src/python/m5/objects/Pci.py b/src/python/m5/objects/Pci.py
new file mode 100644
index 000000000..55bf23534
--- /dev/null
+++ b/src/python/m5/objects/Pci.py
@@ -0,0 +1,62 @@
+from m5.SimObject import SimObject
+from m5.params import *
+from m5.proxy import *
+from Device import BasicPioDevice, DmaDevice, PioDevice
+
+class PciConfigData(SimObject):
+ type = 'PciConfigData'
+ VendorID = Param.UInt16("Vendor ID")
+ DeviceID = Param.UInt16("Device ID")
+ Command = Param.UInt16(0, "Command")
+ Status = Param.UInt16(0, "Status")
+ Revision = Param.UInt8(0, "Device")
+ ProgIF = Param.UInt8(0, "Programming Interface")
+ SubClassCode = Param.UInt8(0, "Sub-Class Code")
+ ClassCode = Param.UInt8(0, "Class Code")
+ CacheLineSize = Param.UInt8(0, "System Cacheline Size")
+ LatencyTimer = Param.UInt8(0, "PCI Latency Timer")
+ HeaderType = Param.UInt8(0, "PCI Header Type")
+ BIST = Param.UInt8(0, "Built In Self Test")
+
+ BAR0 = Param.UInt32(0x00, "Base Address Register 0")
+ BAR1 = Param.UInt32(0x00, "Base Address Register 1")
+ BAR2 = Param.UInt32(0x00, "Base Address Register 2")
+ BAR3 = Param.UInt32(0x00, "Base Address Register 3")
+ BAR4 = Param.UInt32(0x00, "Base Address Register 4")
+ BAR5 = Param.UInt32(0x00, "Base Address Register 5")
+ BAR0Size = Param.MemorySize32('0B', "Base Address Register 0 Size")
+ BAR1Size = Param.MemorySize32('0B', "Base Address Register 1 Size")
+ BAR2Size = Param.MemorySize32('0B', "Base Address Register 2 Size")
+ BAR3Size = Param.MemorySize32('0B', "Base Address Register 3 Size")
+ BAR4Size = Param.MemorySize32('0B', "Base Address Register 4 Size")
+ BAR5Size = Param.MemorySize32('0B', "Base Address Register 5 Size")
+
+ CardbusCIS = Param.UInt32(0x00, "Cardbus Card Information Structure")
+ SubsystemID = Param.UInt16(0x00, "Subsystem ID")
+ SubsystemVendorID = Param.UInt16(0x00, "Subsystem Vendor ID")
+ ExpansionROM = Param.UInt32(0x00, "Expansion ROM Base Address")
+ InterruptLine = Param.UInt8(0x00, "Interrupt Line")
+ InterruptPin = Param.UInt8(0x00, "Interrupt Pin")
+ MaximumLatency = Param.UInt8(0x00, "Maximum Latency")
+ MinimumGrant = Param.UInt8(0x00, "Minimum Grant")
+
+class PciConfigAll(PioDevice):
+ type = 'PciConfigAll'
+ pio_latency = Param.Tick(1, "Programmed IO latency in simticks")
+ bus = Param.UInt8(0x00, "PCI bus to act as config space for")
+ size = Param.MemorySize32('16MB', "Size of config space")
+
+
+class PciDevice(DmaDevice):
+ type = 'PciDevice'
+ abstract = True
+ config = Port(Self.pio.peerObj.port, "PCI configuration space port")
+ pci_bus = Param.Int("PCI bus")
+ pci_dev = Param.Int("PCI device number")
+ pci_func = Param.Int("PCI function code")
+ pio_latency = Param.Latency('1ns', "Programmed IO latency in simticks")
+ configdata = Param.PciConfigData(Parent.any, "PCI Config data")
+ config_latency = Param.Latency('20ns', "Config read or write latency")
+
+class PciFake(PciDevice):
+ type = 'PciFake'
diff --git a/src/python/m5/objects/PhysicalMemory.py b/src/python/m5/objects/PhysicalMemory.py
new file mode 100644
index 000000000..dd3ffd651
--- /dev/null
+++ b/src/python/m5/objects/PhysicalMemory.py
@@ -0,0 +1,28 @@
+from m5.params import *
+from m5.proxy import *
+from MemObject import *
+
+class PhysicalMemory(MemObject):
+ type = 'PhysicalMemory'
+ port = Port("the access port")
+ range = Param.AddrRange(AddrRange('128MB'), "Device Address")
+ file = Param.String('', "memory mapped file")
+ latency = Param.Latency(Parent.clock, "latency of an access")
+
+class DRAMMemory(PhysicalMemory):
+ type = 'DRAMMemory'
+ # Many of these should be observed from the configuration
+ cpu_ratio = Param.Int(5,"ratio between CPU speed and memory bus speed")
+ mem_type = Param.String("SDRAM", "Type of DRAM (DRDRAM, SDRAM)")
+ mem_actpolicy = Param.String("open", "Open/Close policy")
+ memctrladdr_type = Param.String("interleaved", "Mapping interleaved or direct")
+ bus_width = Param.Int(16, "")
+ act_lat = Param.Int(2, "RAS to CAS delay")
+ cas_lat = Param.Int(1, "CAS delay")
+ war_lat = Param.Int(2, "write after read delay")
+ pre_lat = Param.Int(2, "precharge delay")
+ dpl_lat = Param.Int(2, "data in to precharge delay")
+ trc_lat = Param.Int(6, "row cycle delay")
+ num_banks = Param.Int(4, "Number of Banks")
+ num_cpus = Param.Int(4, "Number of CPUs connected to DRAM")
+
diff --git a/src/python/m5/objects/Platform.py b/src/python/m5/objects/Platform.py
new file mode 100644
index 000000000..ab2083eea
--- /dev/null
+++ b/src/python/m5/objects/Platform.py
@@ -0,0 +1,7 @@
+from m5.SimObject import SimObject
+from m5.params import *
+from m5.proxy import *
+class Platform(SimObject):
+ type = 'Platform'
+ abstract = True
+ intrctrl = Param.IntrControl(Parent.any, "interrupt controller")
diff --git a/src/python/m5/objects/Process.py b/src/python/m5/objects/Process.py
new file mode 100644
index 000000000..771ad4101
--- /dev/null
+++ b/src/python/m5/objects/Process.py
@@ -0,0 +1,35 @@
+from m5.SimObject import SimObject
+from m5.params import *
+from m5.proxy import *
+class Process(SimObject):
+ type = 'Process'
+ abstract = True
+ output = Param.String('cout', 'filename for stdout/stderr')
+ system = Param.System(Parent.any, "system process will run on")
+
+class LiveProcess(Process):
+ type = 'LiveProcess'
+ executable = Param.String('', "executable (overrides cmd[0] if set)")
+ cmd = VectorParam.String("command line (executable plus arguments)")
+ env = VectorParam.String('', "environment settings")
+ input = Param.String('cin', "filename for stdin")
+ uid = Param.Int(100, 'user id')
+ euid = Param.Int(100, 'effective user id')
+ gid = Param.Int(100, 'group id')
+ egid = Param.Int(100, 'effective group id')
+ pid = Param.Int(100, 'process id')
+ ppid = Param.Int(99, 'parent process id')
+
+class AlphaLiveProcess(LiveProcess):
+ type = 'AlphaLiveProcess'
+
+class SparcLiveProcess(LiveProcess):
+ type = 'SparcLiveProcess'
+
+class MipsLiveProcess(LiveProcess):
+ type = 'MipsLiveProcess'
+
+class EioProcess(Process):
+ type = 'EioProcess'
+ chkpt = Param.String('', "EIO checkpoint file name (optional)")
+ file = Param.String("EIO trace file name")
diff --git a/src/python/m5/objects/Repl.py b/src/python/m5/objects/Repl.py
new file mode 100644
index 000000000..10892cf6f
--- /dev/null
+++ b/src/python/m5/objects/Repl.py
@@ -0,0 +1,11 @@
+from m5.SimObject import SimObject
+from m5.params import *
+class Repl(SimObject):
+ type = 'Repl'
+ abstract = True
+
+class GenRepl(Repl):
+ type = 'GenRepl'
+ fresh_res = Param.Int("associativity")
+ num_pools = Param.Int("capacity in bytes")
+ pool_res = Param.Int("block size in bytes")
diff --git a/src/python/m5/objects/Root.py b/src/python/m5/objects/Root.py
new file mode 100644
index 000000000..8e8d87f6d
--- /dev/null
+++ b/src/python/m5/objects/Root.py
@@ -0,0 +1,25 @@
+from m5.SimObject import SimObject
+from m5.params import *
+from Serialize import Serialize
+from Serialize import Statreset
+from Statistics import Statistics
+from Trace import Trace
+from ExeTrace import ExecutionTrace
+from Debug import Debug
+
+class Root(SimObject):
+ type = 'Root'
+ clock = Param.RootClock('1THz', "tick frequency")
+ max_tick = Param.Tick('0', "maximum simulation ticks (0 = infinite)")
+ progress_interval = Param.Tick('0',
+ "print a progress message every n ticks (0 = never)")
+ output_file = Param.String('cout', "file to dump simulator output to")
+ checkpoint = Param.String('', "checkpoint file to load")
+# stats = Param.Statistics(Statistics(), "statistics object")
+# trace = Param.Trace(Trace(), "trace object")
+# serialize = Param.Serialize(Serialize(), "checkpoint generation options")
+ stats = Statistics()
+ trace = Trace()
+ exetrace = ExecutionTrace()
+ serialize = Serialize()
+ debug = Debug()
diff --git a/src/python/m5/objects/SimConsole.py b/src/python/m5/objects/SimConsole.py
new file mode 100644
index 000000000..bdd7f246d
--- /dev/null
+++ b/src/python/m5/objects/SimConsole.py
@@ -0,0 +1,14 @@
+from m5.SimObject import SimObject
+from m5.params import *
+from m5.proxy import *
+class ConsoleListener(SimObject):
+ type = 'ConsoleListener'
+ port = Param.TcpPort(3456, "listen port")
+
+class SimConsole(SimObject):
+ type = 'SimConsole'
+ append_name = Param.Bool(True, "append name() to filename")
+ intr_control = Param.IntrControl(Parent.any, "interrupt controller")
+ listener = Param.ConsoleListener("console listener")
+ number = Param.Int(0, "console number")
+ output = Param.String('console', "file to dump output to")
diff --git a/src/python/m5/objects/SimpleDisk.py b/src/python/m5/objects/SimpleDisk.py
new file mode 100644
index 000000000..099a77dbb
--- /dev/null
+++ b/src/python/m5/objects/SimpleDisk.py
@@ -0,0 +1,7 @@
+from m5.SimObject import SimObject
+from m5.params import *
+from m5.proxy import *
+class SimpleDisk(SimObject):
+ type = 'SimpleDisk'
+ disk = Param.DiskImage("Disk Image")
+ system = Param.System(Parent.any, "Sysetm Pointer")
diff --git a/src/python/m5/objects/SimpleOzoneCPU.py b/src/python/m5/objects/SimpleOzoneCPU.py
new file mode 100644
index 000000000..193f31b0f
--- /dev/null
+++ b/src/python/m5/objects/SimpleOzoneCPU.py
@@ -0,0 +1,87 @@
+from m5.params import *
+from m5 import build_env
+from BaseCPU import BaseCPU
+
+class SimpleOzoneCPU(BaseCPU):
+ type = 'SimpleOzoneCPU'
+
+ numThreads = Param.Unsigned("number of HW thread contexts")
+
+ if not build_env['FULL_SYSTEM']:
+ mem = Param.FunctionalMemory(NULL, "memory")
+
+ width = Param.Unsigned("Width")
+ frontEndWidth = Param.Unsigned("Front end width")
+ backEndWidth = Param.Unsigned("Back end width")
+ backEndSquashLatency = Param.Unsigned("Back end squash latency")
+ backEndLatency = Param.Unsigned("Back end latency")
+ maxInstBufferSize = Param.Unsigned("Maximum instruction buffer size")
+ decodeToFetchDelay = Param.Unsigned("Decode to fetch delay")
+ renameToFetchDelay = Param.Unsigned("Rename to fetch delay")
+ iewToFetchDelay = Param.Unsigned("Issue/Execute/Writeback to fetch "
+ "delay")
+ commitToFetchDelay = Param.Unsigned("Commit to fetch delay")
+ fetchWidth = Param.Unsigned("Fetch width")
+
+ renameToDecodeDelay = Param.Unsigned("Rename to decode delay")
+ iewToDecodeDelay = Param.Unsigned("Issue/Execute/Writeback to decode "
+ "delay")
+ commitToDecodeDelay = Param.Unsigned("Commit to decode delay")
+ fetchToDecodeDelay = Param.Unsigned("Fetch to decode delay")
+ decodeWidth = Param.Unsigned("Decode width")
+
+ iewToRenameDelay = Param.Unsigned("Issue/Execute/Writeback to rename "
+ "delay")
+ commitToRenameDelay = Param.Unsigned("Commit to rename delay")
+ decodeToRenameDelay = Param.Unsigned("Decode to rename delay")
+ renameWidth = Param.Unsigned("Rename width")
+
+ commitToIEWDelay = Param.Unsigned("Commit to "
+ "Issue/Execute/Writeback delay")
+ renameToIEWDelay = Param.Unsigned("Rename to "
+ "Issue/Execute/Writeback delay")
+ issueToExecuteDelay = Param.Unsigned("Issue to execute delay (internal "
+ "to the IEW stage)")
+ issueWidth = Param.Unsigned("Issue width")
+ executeWidth = Param.Unsigned("Execute width")
+ executeIntWidth = Param.Unsigned("Integer execute width")
+ executeFloatWidth = Param.Unsigned("Floating point execute width")
+ executeBranchWidth = Param.Unsigned("Branch execute width")
+ executeMemoryWidth = Param.Unsigned("Memory execute width")
+
+ iewToCommitDelay = Param.Unsigned("Issue/Execute/Writeback to commit "
+ "delay")
+ renameToROBDelay = Param.Unsigned("Rename to reorder buffer delay")
+ commitWidth = Param.Unsigned("Commit width")
+ squashWidth = Param.Unsigned("Squash width")
+
+ localPredictorSize = Param.Unsigned("Size of local predictor")
+ localCtrBits = Param.Unsigned("Bits per counter")
+ localHistoryTableSize = Param.Unsigned("Size of local history table")
+ localHistoryBits = Param.Unsigned("Bits for the local history")
+ globalPredictorSize = Param.Unsigned("Size of global predictor")
+ globalCtrBits = Param.Unsigned("Bits per counter")
+ globalHistoryBits = Param.Unsigned("Bits of history")
+ choicePredictorSize = Param.Unsigned("Size of choice predictor")
+ choiceCtrBits = Param.Unsigned("Bits of choice counters")
+
+ BTBEntries = Param.Unsigned("Number of BTB entries")
+ BTBTagSize = Param.Unsigned("Size of the BTB tags, in bits")
+
+ RASSize = Param.Unsigned("RAS size")
+
+ LQEntries = Param.Unsigned("Number of load queue entries")
+ SQEntries = Param.Unsigned("Number of store queue entries")
+ LFSTSize = Param.Unsigned("Last fetched store table size")
+ SSITSize = Param.Unsigned("Store set ID table size")
+
+ numPhysIntRegs = Param.Unsigned("Number of physical integer registers")
+ numPhysFloatRegs = Param.Unsigned("Number of physical floating point "
+ "registers")
+ numIQEntries = Param.Unsigned("Number of instruction queue entries")
+ numROBEntries = Param.Unsigned("Number of reorder buffer entries")
+
+ instShiftAmt = Param.Unsigned("Number of bits to shift instructions by")
+
+ function_trace = Param.Bool(False, "Enable function trace")
+ function_trace_start = Param.Tick(0, "Cycle to start function trace")
diff --git a/src/python/m5/objects/System.py b/src/python/m5/objects/System.py
new file mode 100644
index 000000000..e7dd1bc60
--- /dev/null
+++ b/src/python/m5/objects/System.py
@@ -0,0 +1,26 @@
+from m5.SimObject import SimObject
+from m5.params import *
+from m5.proxy import *
+from m5 import build_env
+
+class MemoryMode(Enum): vals = ['invalid', 'atomic', 'timing']
+
+class System(SimObject):
+ type = 'System'
+ physmem = Param.PhysicalMemory(Parent.any, "phsyical memory")
+ mem_mode = Param.MemoryMode('atomic', "The mode the memory system is in")
+ if build_env['FULL_SYSTEM']:
+ boot_cpu_frequency = Param.Frequency(Self.cpu[0].clock.frequency,
+ "boot processor frequency")
+ init_param = Param.UInt64(0, "numerical value to pass into simulator")
+ boot_osflags = Param.String("a", "boot flags to pass to the kernel")
+ kernel = Param.String("file that contains the kernel code")
+ readfile = Param.String("", "file to read startup script from")
+ symbolfile = Param.String("", "file to get the symbols from")
+
+class AlphaSystem(System):
+ type = 'AlphaSystem'
+ console = Param.String("file that contains the console code")
+ pal = Param.String("file that contains palcode")
+ system_type = Param.UInt64("Type of system we are emulating")
+ system_rev = Param.UInt64("Revision of system we are emulating")
diff --git a/src/python/m5/objects/Tsunami.py b/src/python/m5/objects/Tsunami.py
new file mode 100644
index 000000000..0b53153a0
--- /dev/null
+++ b/src/python/m5/objects/Tsunami.py
@@ -0,0 +1,95 @@
+from m5.params import *
+from m5.proxy import *
+from Device import BasicPioDevice
+from Platform import Platform
+from AlphaConsole import AlphaConsole
+from Uart import Uart8250
+from Pci import PciConfigAll
+from BadDevice import BadDevice
+
+class TsunamiCChip(BasicPioDevice):
+ type = 'TsunamiCChip'
+ tsunami = Param.Tsunami(Parent.any, "Tsunami")
+
+class IsaFake(BasicPioDevice):
+ type = 'IsaFake'
+ pio_size = Param.Addr(0x8, "Size of address range")
+
+class TsunamiIO(BasicPioDevice):
+ type = 'TsunamiIO'
+ time = Param.UInt64(1136073600,
+ "System time to use (0 for actual time, default is 1/1/06)")
+ tsunami = Param.Tsunami(Parent.any, "Tsunami")
+ frequency = Param.Frequency('1024Hz', "frequency of interrupts")
+
+class TsunamiPChip(BasicPioDevice):
+ type = 'TsunamiPChip'
+ tsunami = Param.Tsunami(Parent.any, "Tsunami")
+
+class Tsunami(Platform):
+ type = 'Tsunami'
+ system = Param.System(Parent.any, "system")
+
+ cchip = TsunamiCChip(pio_addr=0x801a0000000)
+ pchip = TsunamiPChip(pio_addr=0x80180000000)
+ pciconfig = PciConfigAll()
+ fake_sm_chip = IsaFake(pio_addr=0x801fc000370)
+
+ fake_uart1 = IsaFake(pio_addr=0x801fc0002f8)
+ fake_uart2 = IsaFake(pio_addr=0x801fc0003e8)
+ fake_uart3 = IsaFake(pio_addr=0x801fc0002e8)
+ fake_uart4 = IsaFake(pio_addr=0x801fc0003f0)
+
+ fake_ppc = IsaFake(pio_addr=0x801fc0003bc)
+
+ fake_OROM = IsaFake(pio_addr=0x800000a0000, pio_size=0x60000)
+
+ fake_pnp_addr = IsaFake(pio_addr=0x801fc000279)
+ fake_pnp_write = IsaFake(pio_addr=0x801fc000a79)
+ fake_pnp_read0 = IsaFake(pio_addr=0x801fc000203)
+ fake_pnp_read1 = IsaFake(pio_addr=0x801fc000243)
+ fake_pnp_read2 = IsaFake(pio_addr=0x801fc000283)
+ fake_pnp_read3 = IsaFake(pio_addr=0x801fc0002c3)
+ fake_pnp_read4 = IsaFake(pio_addr=0x801fc000303)
+ fake_pnp_read5 = IsaFake(pio_addr=0x801fc000343)
+ fake_pnp_read6 = IsaFake(pio_addr=0x801fc000383)
+ fake_pnp_read7 = IsaFake(pio_addr=0x801fc0003c3)
+
+ fake_ata0 = IsaFake(pio_addr=0x801fc0001f0)
+ fake_ata1 = IsaFake(pio_addr=0x801fc000170)
+
+ fb = BadDevice(pio_addr=0x801fc0003d0, devicename='FrameBuffer')
+ io = TsunamiIO(pio_addr=0x801fc000000)
+ uart = Uart8250(pio_addr=0x801fc0003f8)
+ console = AlphaConsole(pio_addr=0x80200000000, disk=Parent.simple_disk)
+
+ # Attach I/O devices to specified bus object. Can't do this
+ # earlier, since the bus object itself is typically defined at the
+ # System level.
+ def attachIO(self, bus):
+ self.cchip.pio = bus.port
+ self.pchip.pio = bus.port
+ self.pciconfig.pio = bus.default
+ self.fake_sm_chip.pio = bus.port
+ self.fake_uart1.pio = bus.port
+ self.fake_uart2.pio = bus.port
+ self.fake_uart3.pio = bus.port
+ self.fake_uart4.pio = bus.port
+ self.fake_ppc.pio = bus.port
+ self.fake_OROM.pio = bus.port
+ self.fake_pnp_addr.pio = bus.port
+ self.fake_pnp_write.pio = bus.port
+ self.fake_pnp_read0.pio = bus.port
+ self.fake_pnp_read1.pio = bus.port
+ self.fake_pnp_read2.pio = bus.port
+ self.fake_pnp_read3.pio = bus.port
+ self.fake_pnp_read4.pio = bus.port
+ self.fake_pnp_read5.pio = bus.port
+ self.fake_pnp_read6.pio = bus.port
+ self.fake_pnp_read7.pio = bus.port
+ self.fake_ata0.pio = bus.port
+ self.fake_ata1.pio = bus.port
+ self.fb.pio = bus.port
+ self.io.pio = bus.port
+ self.uart.pio = bus.port
+ self.console.pio = bus.port
diff --git a/src/python/m5/objects/Uart.py b/src/python/m5/objects/Uart.py
new file mode 100644
index 000000000..62062c6b1
--- /dev/null
+++ b/src/python/m5/objects/Uart.py
@@ -0,0 +1,17 @@
+from m5.params import *
+from m5.proxy import *
+from m5 import build_env
+from Device import BasicPioDevice
+
+class Uart(BasicPioDevice):
+ type = 'Uart'
+ abstract = True
+ sim_console = Param.SimConsole(Parent.any, "The console")
+
+class Uart8250(Uart):
+ type = 'Uart8250'
+
+if build_env['ALPHA_TLASER']:
+ class Uart8530(Uart):
+ type = 'Uart8530'
+
diff --git a/src/python/m5/params.py b/src/python/m5/params.py
new file mode 100644
index 000000000..cbbd23004
--- /dev/null
+++ b/src/python/m5/params.py
@@ -0,0 +1,968 @@
+# Copyright (c) 2004-2006 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Steve Reinhardt
+# Nathan Binkert
+
+#####################################################################
+#
+# Parameter description classes
+#
+# The _params dictionary in each class maps parameter names to either
+# a Param or a VectorParam object. These objects contain the
+# parameter description string, the parameter type, and the default
+# value (if any). The convert() method on these objects is used to
+# force whatever value is assigned to the parameter to the appropriate
+# type.
+#
+# Note that the default values are loaded into the class's attribute
+# space when the parameter dictionary is initialized (in
+# MetaSimObject._new_param()); after that point they aren't used.
+#
+#####################################################################
+
+import sys, inspect, copy
+import convert
+from util import *
+
+# Dummy base class to identify types that are legitimate for SimObject
+# parameters.
+class ParamValue(object):
+
+ cxx_predecls = []
+ swig_predecls = []
+
+ # default for printing to .ini file is regular string conversion.
+ # will be overridden in some cases
+ def ini_str(self):
+ return str(self)
+
+ # allows us to blithely call unproxy() on things without checking
+ # if they're really proxies or not
+ def unproxy(self, base):
+ return self
+
+# Regular parameter description.
+class ParamDesc(object):
+ def __init__(self, ptype_str, ptype, *args, **kwargs):
+ self.ptype_str = ptype_str
+ # remember ptype only if it is provided
+ if ptype != None:
+ self.ptype = ptype
+
+ if args:
+ if len(args) == 1:
+ self.desc = args[0]
+ elif len(args) == 2:
+ self.default = args[0]
+ self.desc = args[1]
+ else:
+ raise TypeError, 'too many arguments'
+
+ if kwargs.has_key('desc'):
+ assert(not hasattr(self, 'desc'))
+ self.desc = kwargs['desc']
+ del kwargs['desc']
+
+ if kwargs.has_key('default'):
+ assert(not hasattr(self, 'default'))
+ self.default = kwargs['default']
+ del kwargs['default']
+
+ if kwargs:
+ raise TypeError, 'extra unknown kwargs %s' % kwargs
+
+ if not hasattr(self, 'desc'):
+ raise TypeError, 'desc attribute missing'
+
+ def __getattr__(self, attr):
+ if attr == 'ptype':
+ try:
+ ptype = eval(self.ptype_str, objects.__dict__)
+ if not isinstance(ptype, type):
+ raise NameError
+ self.ptype = ptype
+ return ptype
+ except NameError:
+ raise TypeError, \
+ "Param qualifier '%s' is not a type" % self.ptype_str
+ raise AttributeError, "'%s' object has no attribute '%s'" % \
+ (type(self).__name__, attr)
+
+ def convert(self, value):
+ if isinstance(value, proxy.BaseProxy):
+ value.set_param_desc(self)
+ return value
+ if not hasattr(self, 'ptype') and isNullPointer(value):
+ # deferred evaluation of SimObject; continue to defer if
+ # we're just assigning a null pointer
+ return value
+ if isinstance(value, self.ptype):
+ return value
+ if isNullPointer(value) and isSimObjectClass(self.ptype):
+ return value
+ return self.ptype(value)
+
+ def cxx_predecls(self):
+ return self.ptype.cxx_predecls
+
+ def swig_predecls(self):
+ return self.ptype.swig_predecls
+
+ def cxx_decl(self):
+ return '%s %s;' % (self.ptype.cxx_type, self.name)
+
+# Vector-valued parameter description. Just like ParamDesc, except
+# that the value is a vector (list) of the specified type instead of a
+# single value.
+
+class VectorParamValue(list):
+ def ini_str(self):
+ return ' '.join([v.ini_str() for v in self])
+
+ def unproxy(self, base):
+ return [v.unproxy(base) for v in self]
+
+class SimObjVector(VectorParamValue):
+ def print_ini(self):
+ for v in self:
+ v.print_ini()
+
+class VectorParamDesc(ParamDesc):
+ # Convert assigned value to appropriate type. If the RHS is not a
+ # list or tuple, it generates a single-element list.
+ def convert(self, value):
+ if isinstance(value, (list, tuple)):
+ # list: coerce each element into new list
+ tmp_list = [ ParamDesc.convert(self, v) for v in value ]
+ if isSimObjectSequence(tmp_list):
+ return SimObjVector(tmp_list)
+ else:
+ return VectorParamValue(tmp_list)
+ else:
+ # singleton: leave it be (could coerce to a single-element
+ # list here, but for some historical reason we don't...
+ return ParamDesc.convert(self, value)
+
+ def cxx_predecls(self):
+ return ['#include <vector>'] + self.ptype.cxx_predecls
+
+ def swig_predecls(self):
+ return ['%include "std_vector.i"'] + self.ptype.swig_predecls
+
+ def cxx_decl(self):
+ return 'std::vector< %s > %s;' % (self.ptype.cxx_type, self.name)
+
+class ParamFactory(object):
+ def __init__(self, param_desc_class, ptype_str = None):
+ self.param_desc_class = param_desc_class
+ self.ptype_str = ptype_str
+
+ def __getattr__(self, attr):
+ if self.ptype_str:
+ attr = self.ptype_str + '.' + attr
+ return ParamFactory(self.param_desc_class, attr)
+
+ # E.g., Param.Int(5, "number of widgets")
+ def __call__(self, *args, **kwargs):
+ caller_frame = inspect.currentframe().f_back
+ ptype = None
+ try:
+ ptype = eval(self.ptype_str,
+ caller_frame.f_globals, caller_frame.f_locals)
+ if not isinstance(ptype, type):
+ raise TypeError, \
+ "Param qualifier is not a type: %s" % ptype
+ except NameError:
+ # if name isn't defined yet, assume it's a SimObject, and
+ # try to resolve it later
+ pass
+ return self.param_desc_class(self.ptype_str, ptype, *args, **kwargs)
+
+Param = ParamFactory(ParamDesc)
+VectorParam = ParamFactory(VectorParamDesc)
+
+#####################################################################
+#
+# Parameter Types
+#
+# Though native Python types could be used to specify parameter types
+# (the 'ptype' field of the Param and VectorParam classes), it's more
+# flexible to define our own set of types. This gives us more control
+# over how Python expressions are converted to values (via the
+# __init__() constructor) and how these values are printed out (via
+# the __str__() conversion method).
+#
+#####################################################################
+
+# String-valued parameter. Just mixin the ParamValue class with the
+# built-in str class.
+class String(ParamValue,str):
+ cxx_type = 'std::string'
+ cxx_predecls = ['#include <string>']
+ swig_predecls = ['%include "std_string.i"\n' +
+ '%apply const std::string& {std::string *};']
+ pass
+
+# superclass for "numeric" parameter values, to emulate math
+# operations in a type-safe way. e.g., a Latency times an int returns
+# a new Latency object.
+class NumericParamValue(ParamValue):
+ def __str__(self):
+ return str(self.value)
+
+ def __float__(self):
+ return float(self.value)
+
+ # hook for bounds checking
+ def _check(self):
+ return
+
+ def __mul__(self, other):
+ newobj = self.__class__(self)
+ newobj.value *= other
+ newobj._check()
+ return newobj
+
+ __rmul__ = __mul__
+
+ def __div__(self, other):
+ newobj = self.__class__(self)
+ newobj.value /= other
+ newobj._check()
+ return newobj
+
+ def __sub__(self, other):
+ newobj = self.__class__(self)
+ newobj.value -= other
+ newobj._check()
+ return newobj
+
+# Metaclass for bounds-checked integer parameters. See CheckedInt.
+class CheckedIntType(type):
+ def __init__(cls, name, bases, dict):
+ super(CheckedIntType, cls).__init__(name, bases, dict)
+
+ # CheckedInt is an abstract base class, so we actually don't
+ # want to do any processing on it... the rest of this code is
+ # just for classes that derive from CheckedInt.
+ if name == 'CheckedInt':
+ return
+
+ if not cls.cxx_predecls:
+ # most derived types require this, so we just do it here once
+ cls.cxx_predecls = ['#include "sim/host.hh"']
+
+ if not cls.swig_predecls:
+ # most derived types require this, so we just do it here once
+ cls.swig_predecls = ['%import "python/m5/swig/stdint.i"\n' +
+ '%import "sim/host.hh"']
+
+ if not (hasattr(cls, 'min') and hasattr(cls, 'max')):
+ if not (hasattr(cls, 'size') and hasattr(cls, 'unsigned')):
+ panic("CheckedInt subclass %s must define either\n" \
+ " 'min' and 'max' or 'size' and 'unsigned'\n" \
+ % name);
+ if cls.unsigned:
+ cls.min = 0
+ cls.max = 2 ** cls.size - 1
+ else:
+ cls.min = -(2 ** (cls.size - 1))
+ cls.max = (2 ** (cls.size - 1)) - 1
+
+# Abstract superclass for bounds-checked integer parameters. This
+# class is subclassed to generate parameter classes with specific
+# bounds. Initialization of the min and max bounds is done in the
+# metaclass CheckedIntType.__init__.
+class CheckedInt(NumericParamValue):
+ __metaclass__ = CheckedIntType
+
+ def _check(self):
+ if not self.min <= self.value <= self.max:
+ raise TypeError, 'Integer param out of bounds %d < %d < %d' % \
+ (self.min, self.value, self.max)
+
+ def __init__(self, value):
+ if isinstance(value, str):
+ self.value = convert.toInteger(value)
+ elif isinstance(value, (int, long, float)):
+ self.value = long(value)
+ self._check()
+
+class Int(CheckedInt): cxx_type = 'int'; size = 32; unsigned = False
+class Unsigned(CheckedInt): cxx_type = 'unsigned'; size = 32; unsigned = True
+
+class Int8(CheckedInt): cxx_type = 'int8_t'; size = 8; unsigned = False
+class UInt8(CheckedInt): cxx_type = 'uint8_t'; size = 8; unsigned = True
+class Int16(CheckedInt): cxx_type = 'int16_t'; size = 16; unsigned = False
+class UInt16(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
+class Int32(CheckedInt): cxx_type = 'int32_t'; size = 32; unsigned = False
+class UInt32(CheckedInt): cxx_type = 'uint32_t'; size = 32; unsigned = True
+class Int64(CheckedInt): cxx_type = 'int64_t'; size = 64; unsigned = False
+class UInt64(CheckedInt): cxx_type = 'uint64_t'; size = 64; unsigned = True
+
+class Counter(CheckedInt): cxx_type = 'Counter'; size = 64; unsigned = True
+class Tick(CheckedInt): cxx_type = 'Tick'; size = 64; unsigned = True
+class TcpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
+class UdpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
+
+class Percent(CheckedInt): cxx_type = 'int'; min = 0; max = 100
+
+class Float(ParamValue, float):
+ pass
+
+class MemorySize(CheckedInt):
+ cxx_type = 'uint64_t'
+ size = 64
+ unsigned = True
+ def __init__(self, value):
+ if isinstance(value, MemorySize):
+ self.value = value.value
+ else:
+ self.value = convert.toMemorySize(value)
+ self._check()
+
+class MemorySize32(CheckedInt):
+ size = 32
+ unsigned = True
+ def __init__(self, value):
+ if isinstance(value, MemorySize):
+ self.value = value.value
+ else:
+ self.value = convert.toMemorySize(value)
+ self._check()
+
+class Addr(CheckedInt):
+ cxx_type = 'Addr'
+ cxx_predecls = ['#include "targetarch/isa_traits.hh"']
+ size = 64
+ unsigned = True
+ def __init__(self, value):
+ if isinstance(value, Addr):
+ self.value = value.value
+ else:
+ try:
+ self.value = convert.toMemorySize(value)
+ except TypeError:
+ self.value = long(value)
+ self._check()
+
+
+class MetaRange(type):
+ def __init__(cls, name, bases, dict):
+ super(MetaRange, cls).__init__(name, bases, dict)
+ if name == 'Range':
+ return
+ cls.cxx_type = 'Range< %s >' % cls.type.cxx_type
+ cls.cxx_predecls = \
+ ['#include "base/range.hh"'] + cls.type.cxx_predecls
+
+class Range(ParamValue):
+ __metaclass__ = MetaRange
+ type = Int # default; can be overridden in subclasses
+ def __init__(self, *args, **kwargs):
+ def handle_kwargs(self, kwargs):
+ if 'end' in kwargs:
+ self.second = self.type(kwargs.pop('end'))
+ elif 'size' in kwargs:
+ self.second = self.first + self.type(kwargs.pop('size')) - 1
+ else:
+ raise TypeError, "Either end or size must be specified"
+
+ if len(args) == 0:
+ self.first = self.type(kwargs.pop('start'))
+ handle_kwargs(self, kwargs)
+
+ elif len(args) == 1:
+ if kwargs:
+ self.first = self.type(args[0])
+ handle_kwargs(self, kwargs)
+ elif isinstance(args[0], Range):
+ self.first = self.type(args[0].first)
+ self.second = self.type(args[0].second)
+ else:
+ self.first = self.type(0)
+ self.second = self.type(args[0]) - 1
+
+ elif len(args) == 2:
+ self.first = self.type(args[0])
+ self.second = self.type(args[1])
+ else:
+ raise TypeError, "Too many arguments specified"
+
+ if kwargs:
+ raise TypeError, "too many keywords: %s" % kwargs.keys()
+
+ def __str__(self):
+ return '%s:%s' % (self.first, self.second)
+
+class AddrRange(Range):
+ type = Addr
+
+class TickRange(Range):
+ type = Tick
+
+# Boolean parameter type. Python doesn't let you subclass bool, since
+# it doesn't want to let you create multiple instances of True and
+# False. Thus this is a little more complicated than String.
+class Bool(ParamValue):
+ cxx_type = 'bool'
+ def __init__(self, value):
+ try:
+ self.value = convert.toBool(value)
+ except TypeError:
+ self.value = bool(value)
+
+ def __str__(self):
+ return str(self.value)
+
+ def ini_str(self):
+ if self.value:
+ return 'true'
+ return 'false'
+
+def IncEthernetAddr(addr, val = 1):
+ bytes = map(lambda x: int(x, 16), addr.split(':'))
+ bytes[5] += val
+ for i in (5, 4, 3, 2, 1):
+ val,rem = divmod(bytes[i], 256)
+ bytes[i] = rem
+ if val == 0:
+ break
+ bytes[i - 1] += val
+ assert(bytes[0] <= 255)
+ return ':'.join(map(lambda x: '%02x' % x, bytes))
+
+class NextEthernetAddr(object):
+ addr = "00:90:00:00:00:01"
+
+ def __init__(self, inc = 1):
+ self.value = NextEthernetAddr.addr
+ NextEthernetAddr.addr = IncEthernetAddr(NextEthernetAddr.addr, inc)
+
+class EthernetAddr(ParamValue):
+ cxx_type = 'Net::EthAddr'
+ cxx_predecls = ['#include "base/inet.hh"']
+ swig_predecls = ['class Net::EthAddr;']
+ def __init__(self, value):
+ if value == NextEthernetAddr:
+ self.value = value
+ return
+
+ if not isinstance(value, str):
+ raise TypeError, "expected an ethernet address and didn't get one"
+
+ bytes = value.split(':')
+ if len(bytes) != 6:
+ raise TypeError, 'invalid ethernet address %s' % value
+
+ for byte in bytes:
+ if not 0 <= int(byte) <= 256:
+ raise TypeError, 'invalid ethernet address %s' % value
+
+ self.value = value
+
+ def unproxy(self, base):
+ if self.value == NextEthernetAddr:
+ self.addr = self.value().value
+ return self
+
+ def __str__(self):
+ if self.value == NextEthernetAddr:
+ if hasattr(self, 'addr'):
+ return self.addr
+ else:
+ return "NextEthernetAddr (unresolved)"
+ else:
+ return self.value
+
+# Enumerated types are a little more complex. The user specifies the
+# type as Enum(foo) where foo is either a list or dictionary of
+# alternatives (typically strings, but not necessarily so). (In the
+# long run, the integer value of the parameter will be the list index
+# or the corresponding dictionary value. For now, since we only check
+# that the alternative is valid and then spit it into a .ini file,
+# there's not much point in using the dictionary.)
+
+# What Enum() must do is generate a new type encapsulating the
+# provided list/dictionary so that specific values of the parameter
+# can be instances of that type. We define two hidden internal
+# classes (_ListEnum and _DictEnum) to serve as base classes, then
+# derive the new type from the appropriate base class on the fly.
+
+
+# Metaclass for Enum types
+class MetaEnum(type):
+ def __init__(cls, name, bases, init_dict):
+ if init_dict.has_key('map'):
+ if not isinstance(cls.map, dict):
+ raise TypeError, "Enum-derived class attribute 'map' " \
+ "must be of type dict"
+ # build list of value strings from map
+ cls.vals = cls.map.keys()
+ cls.vals.sort()
+ elif init_dict.has_key('vals'):
+ if not isinstance(cls.vals, list):
+ raise TypeError, "Enum-derived class attribute 'vals' " \
+ "must be of type list"
+ # build string->value map from vals sequence
+ cls.map = {}
+ for idx,val in enumerate(cls.vals):
+ cls.map[val] = idx
+ else:
+ raise TypeError, "Enum-derived class must define "\
+ "attribute 'map' or 'vals'"
+
+ cls.cxx_type = name + '::Enum'
+
+ super(MetaEnum, cls).__init__(name, bases, init_dict)
+
+ # Generate C++ class declaration for this enum type.
+ # Note that we wrap the enum in a class/struct to act as a namespace,
+ # so that the enum strings can be brief w/o worrying about collisions.
+ def cxx_decl(cls):
+ s = 'struct %s {\n enum Enum {\n ' % cls.__name__
+ s += ',\n '.join(['%s = %d' % (v,cls.map[v]) for v in cls.vals])
+ s += '\n };\n};\n'
+ return s
+
+# Base class for enum types.
+class Enum(ParamValue):
+ __metaclass__ = MetaEnum
+ vals = []
+
+ def __init__(self, value):
+ if value not in self.map:
+ raise TypeError, "Enum param got bad value '%s' (not in %s)" \
+ % (value, self.vals)
+ self.value = value
+
+ def __str__(self):
+ return self.value
+
+ticks_per_sec = None
+
+# how big does a rounding error need to be before we warn about it?
+frequency_tolerance = 0.001 # 0.1%
+
+# convert a floting-point # of ticks to integer, and warn if rounding
+# discards too much precision
+def tick_check(float_ticks):
+ if float_ticks == 0:
+ return 0
+ int_ticks = int(round(float_ticks))
+ err = (float_ticks - int_ticks) / float_ticks
+ if err > frequency_tolerance:
+ print >> sys.stderr, "Warning: rounding error > tolerance"
+ print >> sys.stderr, " %f rounded to %d" % (float_ticks, int_ticks)
+ #raise ValueError
+ return int_ticks
+
+def getLatency(value):
+ if isinstance(value, Latency) or isinstance(value, Clock):
+ return value.value
+ elif isinstance(value, Frequency) or isinstance(value, RootClock):
+ return 1 / value.value
+ elif isinstance(value, str):
+ try:
+ return convert.toLatency(value)
+ except ValueError:
+ try:
+ return 1 / convert.toFrequency(value)
+ except ValueError:
+ pass # fall through
+ raise ValueError, "Invalid Frequency/Latency value '%s'" % value
+
+
+class Latency(NumericParamValue):
+ cxx_type = 'Tick'
+ cxx_predecls = ['#include "sim/host.hh"']
+ swig_predecls = ['%import "python/m5/swig/stdint.i"\n' +
+ '%import "sim/host.hh"']
+ def __init__(self, value):
+ self.value = getLatency(value)
+
+ def __getattr__(self, attr):
+ if attr in ('latency', 'period'):
+ return self
+ if attr == 'frequency':
+ return Frequency(self)
+ raise AttributeError, "Latency object has no attribute '%s'" % attr
+
+ # convert latency to ticks
+ def ini_str(self):
+ return str(tick_check(self.value * ticks_per_sec))
+
+class Frequency(NumericParamValue):
+ cxx_type = 'Tick'
+ cxx_predecls = ['#include "sim/host.hh"']
+ swig_predecls = ['%import "python/m5/swig/stdint.i"\n' +
+ '%import "sim/host.hh"']
+ def __init__(self, value):
+ self.value = 1 / getLatency(value)
+
+ def __getattr__(self, attr):
+ if attr == 'frequency':
+ return self
+ if attr in ('latency', 'period'):
+ return Latency(self)
+ raise AttributeError, "Frequency object has no attribute '%s'" % attr
+
+ # convert frequency to ticks per period
+ def ini_str(self):
+ return self.period.ini_str()
+
+# Just like Frequency, except ini_str() is absolute # of ticks per sec (Hz).
+# We can't inherit from Frequency because we don't want it to be directly
+# assignable to a regular Frequency parameter.
+class RootClock(ParamValue):
+ cxx_type = 'Tick'
+ cxx_predecls = ['#include "sim/host.hh"']
+ swig_predecls = ['%import "python/m5/swig/stdint.i"\n' +
+ '%import "sim/host.hh"']
+ def __init__(self, value):
+ self.value = 1 / getLatency(value)
+
+ def __getattr__(self, attr):
+ if attr == 'frequency':
+ return Frequency(self)
+ if attr in ('latency', 'period'):
+ return Latency(self)
+ raise AttributeError, "Frequency object has no attribute '%s'" % attr
+
+ def ini_str(self):
+ return str(tick_check(self.value))
+
+# A generic frequency and/or Latency value. Value is stored as a latency,
+# but to avoid ambiguity this object does not support numeric ops (* or /).
+# An explicit conversion to a Latency or Frequency must be made first.
+class Clock(ParamValue):
+ cxx_type = 'Tick'
+ cxx_predecls = ['#include "sim/host.hh"']
+ swig_predecls = ['%import "python/m5/swig/stdint.i"\n' +
+ '%import "sim/host.hh"']
+ def __init__(self, value):
+ self.value = getLatency(value)
+
+ def __getattr__(self, attr):
+ if attr == 'frequency':
+ return Frequency(self)
+ if attr in ('latency', 'period'):
+ return Latency(self)
+ raise AttributeError, "Frequency object has no attribute '%s'" % attr
+
+ def ini_str(self):
+ return self.period.ini_str()
+
+class NetworkBandwidth(float,ParamValue):
+ cxx_type = 'float'
+ def __new__(cls, value):
+ val = convert.toNetworkBandwidth(value) / 8.0
+ return super(cls, NetworkBandwidth).__new__(cls, val)
+
+ def __str__(self):
+ return str(self.val)
+
+ def ini_str(self):
+ return '%f' % (ticks_per_sec / float(self))
+
+class MemoryBandwidth(float,ParamValue):
+ cxx_type = 'float'
+ def __new__(self, value):
+ val = convert.toMemoryBandwidth(value)
+ return super(cls, MemoryBandwidth).__new__(cls, val)
+
+ def __str__(self):
+ return str(self.val)
+
+ def ini_str(self):
+ return '%f' % (ticks_per_sec / float(self))
+
+#
+# "Constants"... handy aliases for various values.
+#
+
+# Special class for NULL pointers. Note the special check in
+# make_param_value() above that lets these be assigned where a
+# SimObject is required.
+# only one copy of a particular node
+class NullSimObject(object):
+ __metaclass__ = Singleton
+
+ def __call__(cls):
+ return cls
+
+ def _instantiate(self, parent = None, path = ''):
+ pass
+
+ def ini_str(self):
+ return 'Null'
+
+ def unproxy(self, base):
+ return self
+
+ def set_path(self, parent, name):
+ pass
+ def __str__(self):
+ return 'Null'
+
+# The only instance you'll ever need...
+NULL = NullSimObject()
+
+def isNullPointer(value):
+ return isinstance(value, NullSimObject)
+
+# Some memory range specifications use this as a default upper bound.
+MaxAddr = Addr.max
+MaxTick = Tick.max
+AllMemory = AddrRange(0, MaxAddr)
+
+
+#####################################################################
+#
+# Port objects
+#
+# Ports are used to interconnect objects in the memory system.
+#
+#####################################################################
+
+# Port reference: encapsulates a reference to a particular port on a
+# particular SimObject.
+class PortRef(object):
+ def __init__(self, simobj, name):
+ assert(isSimObject(simobj) or isSimObjectClass(simobj))
+ self.simobj = simobj
+ self.name = name
+ self.peer = None # not associated with another port yet
+ self.ccConnected = False # C++ port connection done?
+ self.index = -1 # always -1 for non-vector ports
+
+ def __str__(self):
+ return '%s.%s' % (self.simobj, self.name)
+
+ # for config.ini, print peer's name (not ours)
+ def ini_str(self):
+ return str(self.peer)
+
+ def __getattr__(self, attr):
+ if attr == 'peerObj':
+ # shorthand for proxies
+ return self.peer.simobj
+ raise AttributeError, "'%s' object has no attribute '%s'" % \
+ (self.__class__.__name__, attr)
+
+ # Full connection is symmetric (both ways). Called via
+ # SimObject.__setattr__ as a result of a port assignment, e.g.,
+ # "obj1.portA = obj2.portB", or via VectorPortElementRef.__setitem__,
+ # e.g., "obj1.portA[3] = obj2.portB".
+ def connect(self, other):
+ if isinstance(other, VectorPortRef):
+ # reference to plain VectorPort is implicit append
+ other = other._get_next()
+ if self.peer and not proxy.isproxy(self.peer):
+ print "warning: overwriting port", self, \
+ "value", self.peer, "with", other
+ self.peer = other
+ if proxy.isproxy(other):
+ other.set_param_desc(PortParamDesc())
+ elif isinstance(other, PortRef):
+ if other.peer is not self:
+ other.connect(self)
+ else:
+ raise TypeError, \
+ "assigning non-port reference '%s' to port '%s'" \
+ % (other, self)
+
+ def clone(self, simobj, memo):
+ if memo.has_key(self):
+ return memo[self]
+ newRef = copy.copy(self)
+ memo[self] = newRef
+ newRef.simobj = simobj
+ assert(isSimObject(newRef.simobj))
+ if self.peer and not proxy.isproxy(self.peer):
+ peerObj = memo[self.peer.simobj]
+ newRef.peer = self.peer.clone(peerObj, memo)
+ assert(not isinstance(newRef.peer, VectorPortRef))
+ return newRef
+
+ def unproxy(self, simobj):
+ assert(simobj is self.simobj)
+ if proxy.isproxy(self.peer):
+ try:
+ realPeer = self.peer.unproxy(self.simobj)
+ except:
+ print "Error in unproxying port '%s' of %s" % \
+ (self.name, self.simobj.path())
+ raise
+ self.connect(realPeer)
+
+ # Call C++ to create corresponding port connection between C++ objects
+ def ccConnect(self):
+ if self.ccConnected: # already done this
+ return
+ peer = self.peer
+ cc_main.connectPorts(self.simobj.getCCObject(), self.name, self.index,
+ peer.simobj.getCCObject(), peer.name, peer.index)
+ self.ccConnected = True
+ peer.ccConnected = True
+
+# A reference to an individual element of a VectorPort... much like a
+# PortRef, but has an index.
+class VectorPortElementRef(PortRef):
+ def __init__(self, simobj, name, index):
+ PortRef.__init__(self, simobj, name)
+ self.index = index
+
+ def __str__(self):
+ return '%s.%s[%d]' % (self.simobj, self.name, self.index)
+
+# A reference to a complete vector-valued port (not just a single element).
+# Can be indexed to retrieve individual VectorPortElementRef instances.
+class VectorPortRef(object):
+ def __init__(self, simobj, name):
+ assert(isSimObject(simobj) or isSimObjectClass(simobj))
+ self.simobj = simobj
+ self.name = name
+ self.elements = []
+
+ def __str__(self):
+ return '%s.%s[:]' % (self.simobj, self.name)
+
+ # for config.ini, print peer's name (not ours)
+ def ini_str(self):
+ return ' '.join([el.ini_str() for el in self.elements])
+
+ def __getitem__(self, key):
+ if not isinstance(key, int):
+ raise TypeError, "VectorPort index must be integer"
+ if key >= len(self.elements):
+ # need to extend list
+ ext = [VectorPortElementRef(self.simobj, self.name, i)
+ for i in range(len(self.elements), key+1)]
+ self.elements.extend(ext)
+ return self.elements[key]
+
+ def _get_next(self):
+ return self[len(self.elements)]
+
+ def __setitem__(self, key, value):
+ if not isinstance(key, int):
+ raise TypeError, "VectorPort index must be integer"
+ self[key].connect(value)
+
+ def connect(self, other):
+ if isinstance(other, (list, tuple)):
+ # Assign list of port refs to vector port.
+ # For now, append them... not sure if that's the right semantics
+ # or if it should replace the current vector.
+ for ref in other:
+ self._get_next().connect(ref)
+ else:
+ # scalar assignment to plain VectorPort is implicit append
+ self._get_next().connect(other)
+
+ def clone(self, simobj, memo):
+ if memo.has_key(self):
+ return memo[self]
+ newRef = copy.copy(self)
+ memo[self] = newRef
+ newRef.simobj = simobj
+ assert(isSimObject(newRef.simobj))
+ newRef.elements = [el.clone(simobj, memo) for el in self.elements]
+ return newRef
+
+ def unproxy(self, simobj):
+ [el.unproxy(simobj) for el in self.elements]
+
+ def ccConnect(self):
+ [el.ccConnect() for el in self.elements]
+
+# Port description object. Like a ParamDesc object, this represents a
+# logical port in the SimObject class, not a particular port on a
+# SimObject instance. The latter are represented by PortRef objects.
+class Port(object):
+ # Port("description") or Port(default, "description")
+ def __init__(self, *args):
+ if len(args) == 1:
+ self.desc = args[0]
+ elif len(args) == 2:
+ self.default = args[0]
+ self.desc = args[1]
+ else:
+ raise TypeError, 'wrong number of arguments'
+ # self.name is set by SimObject class on assignment
+ # e.g., pio_port = Port("blah") sets self.name to 'pio_port'
+
+ # Generate a PortRef for this port on the given SimObject with the
+ # given name
+ def makeRef(self, simobj):
+ return PortRef(simobj, self.name)
+
+ # Connect an instance of this port (on the given SimObject with
+ # the given name) with the port described by the supplied PortRef
+ def connect(self, simobj, ref):
+ self.makeRef(simobj).connect(ref)
+
+# VectorPort description object. Like Port, but represents a vector
+# of connections (e.g., as on a Bus).
+class VectorPort(Port):
+ def __init__(self, *args):
+ Port.__init__(self, *args)
+ self.isVec = True
+
+ def makeRef(self, simobj):
+ return VectorPortRef(simobj, self.name)
+
+# 'Fake' ParamDesc for Port references to assign to the _pdesc slot of
+# proxy objects (via set_param_desc()) so that proxy error messages
+# make sense.
+class PortParamDesc(object):
+ __metaclass__ = Singleton
+
+ ptype_str = 'Port'
+ ptype = Port
+
+
+__all__ = ['Param', 'VectorParam',
+ 'Enum', 'Bool', 'String', 'Float',
+ 'Int', 'Unsigned', 'Int8', 'UInt8', 'Int16', 'UInt16',
+ 'Int32', 'UInt32', 'Int64', 'UInt64',
+ 'Counter', 'Addr', 'Tick', 'Percent',
+ 'TcpPort', 'UdpPort', 'EthernetAddr',
+ 'MemorySize', 'MemorySize32',
+ 'Latency', 'Frequency', 'RootClock', 'Clock',
+ 'NetworkBandwidth', 'MemoryBandwidth',
+ 'Range', 'AddrRange', 'TickRange',
+ 'MaxAddr', 'MaxTick', 'AllMemory',
+ 'NextEthernetAddr', 'NULL',
+ 'Port', 'VectorPort']
+
+# see comment on imports at end of __init__.py.
+from SimObject import isSimObject, isSimObjectSequence, isSimObjectClass
+import proxy
+import objects
+import cc_main
diff --git a/src/python/m5/proxy.py b/src/python/m5/proxy.py
new file mode 100644
index 000000000..7ebc0ae19
--- /dev/null
+++ b/src/python/m5/proxy.py
@@ -0,0 +1,206 @@
+# Copyright (c) 2004-2006 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Steve Reinhardt
+# Nathan Binkert
+
+#####################################################################
+#
+# Proxy object support.
+#
+#####################################################################
+
+class BaseProxy(object):
+ def __init__(self, search_self, search_up):
+ self._search_self = search_self
+ self._search_up = search_up
+ self._multiplier = None
+
+ def __str__(self):
+ if self._search_self and not self._search_up:
+ s = 'Self'
+ elif not self._search_self and self._search_up:
+ s = 'Parent'
+ else:
+ s = 'ConfusedProxy'
+ return s + '.' + self.path()
+
+ def __setattr__(self, attr, value):
+ if not attr.startswith('_'):
+ raise AttributeError, \
+ "cannot set attribute '%s' on proxy object" % attr
+ super(BaseProxy, self).__setattr__(attr, value)
+
+ # support multiplying proxies by constants
+ def __mul__(self, other):
+ if not isinstance(other, (int, long, float)):
+ raise TypeError, "Proxy multiplier must be integer"
+ if self._multiplier == None:
+ self._multiplier = other
+ else:
+ # support chained multipliers
+ self._multiplier *= other
+ return self
+
+ __rmul__ = __mul__
+
+ def _mulcheck(self, result):
+ if self._multiplier == None:
+ return result
+ return result * self._multiplier
+
+ def unproxy(self, base):
+ obj = base
+ done = False
+
+ if self._search_self:
+ result, done = self.find(obj)
+
+ if self._search_up:
+ while not done:
+ obj = obj._parent
+ if not obj:
+ break
+ result, done = self.find(obj)
+
+ if not done:
+ raise AttributeError, \
+ "Can't resolve proxy '%s' of type '%s' from '%s'" % \
+ (self.path(), self._pdesc.ptype_str, base.path())
+
+ if isinstance(result, BaseProxy):
+ if result == self:
+ raise RuntimeError, "Cycle in unproxy"
+ result = result.unproxy(obj)
+
+ return self._mulcheck(result)
+
+ def getindex(obj, index):
+ if index == None:
+ return obj
+ try:
+ obj = obj[index]
+ except TypeError:
+ if index != 0:
+ raise
+ # if index is 0 and item is not subscriptable, just
+ # use item itself (so cpu[0] works on uniprocessors)
+ return obj
+ getindex = staticmethod(getindex)
+
+ # This method should be called once the proxy is assigned to a
+ # particular parameter or port to set the expected type of the
+ # resolved proxy
+ def set_param_desc(self, pdesc):
+ self._pdesc = pdesc
+
+class AttrProxy(BaseProxy):
+ def __init__(self, search_self, search_up, attr):
+ super(AttrProxy, self).__init__(search_self, search_up)
+ self._attr = attr
+ self._modifiers = []
+
+ def __getattr__(self, attr):
+ # python uses __bases__ internally for inheritance
+ if attr.startswith('_'):
+ return super(AttrProxy, self).__getattr__(self, attr)
+ if hasattr(self, '_pdesc'):
+ raise AttributeError, "Attribute reference on bound proxy"
+ self._modifiers.append(attr)
+ return self
+
+ # support indexing on proxies (e.g., Self.cpu[0])
+ def __getitem__(self, key):
+ if not isinstance(key, int):
+ raise TypeError, "Proxy object requires integer index"
+ self._modifiers.append(key)
+ return self
+
+ def find(self, obj):
+ try:
+ val = getattr(obj, self._attr)
+ except:
+ return None, False
+ while isproxy(val):
+ val = val.unproxy(obj)
+ for m in self._modifiers:
+ if isinstance(m, str):
+ val = getattr(val, m)
+ elif isinstance(m, int):
+ val = val[m]
+ else:
+ assert("Item must be string or integer")
+ while isproxy(val):
+ val = val.unproxy(obj)
+ return val, True
+
+ def path(self):
+ p = self._attr
+ for m in self._modifiers:
+ if isinstance(m, str):
+ p += '.%s' % m
+ elif isinstance(m, int):
+ p += '[%d]' % m
+ else:
+ assert("Item must be string or integer")
+ return p
+
+class AnyProxy(BaseProxy):
+ def find(self, obj):
+ return obj.find_any(self._pdesc.ptype)
+
+ def path(self):
+ return 'any'
+
+def isproxy(obj):
+ if isinstance(obj, (BaseProxy, params.EthernetAddr)):
+ return True
+ elif isinstance(obj, (list, tuple)):
+ for v in obj:
+ if isproxy(v):
+ return True
+ return False
+
+class ProxyFactory(object):
+ def __init__(self, search_self, search_up):
+ self.search_self = search_self
+ self.search_up = search_up
+
+ def __getattr__(self, attr):
+ if attr == 'any':
+ return AnyProxy(self.search_self, self.search_up)
+ else:
+ return AttrProxy(self.search_self, self.search_up, attr)
+
+# global objects for handling proxies
+Parent = ProxyFactory(search_self = False, search_up = True)
+Self = ProxyFactory(search_self = True, search_up = False)
+
+# limit exports on 'from proxy import *'
+__all__ = ['Parent', 'Self']
+
+# see comment on imports at end of __init__.py.
+import params # for EthernetAddr
diff --git a/src/python/m5/smartdict.py b/src/python/m5/smartdict.py
new file mode 100644
index 000000000..d85dbd517
--- /dev/null
+++ b/src/python/m5/smartdict.py
@@ -0,0 +1,154 @@
+# Copyright (c) 2005 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Nathan Binkert
+
+# The SmartDict class fixes a couple of issues with using the content
+# of os.environ or similar dicts of strings as Python variables:
+#
+# 1) Undefined variables should return False rather than raising KeyError.
+#
+# 2) String values of 'False', '0', etc., should evaluate to False
+# (not just the empty string).
+#
+# #1 is solved by overriding __getitem__, and #2 is solved by using a
+# proxy class for values and overriding __nonzero__ on the proxy.
+# Everything else is just to (a) make proxies behave like normal
+# values otherwise, (b) make sure any dict operation returns a proxy
+# rather than a normal value, and (c) coerce values written to the
+# dict to be strings.
+
+
+from convert import *
+
+class Variable(str):
+ """Intelligent proxy class for SmartDict. Variable will use the
+ various convert functions to attempt to convert values to useable
+ types"""
+ def __int__(self):
+ return toInteger(str(self))
+ def __long__(self):
+ return toLong(str(self))
+ def __float__(self):
+ return toFloat(str(self))
+ def __nonzero__(self):
+ return toBool(str(self))
+ def convert(self, other):
+ t = type(other)
+ if t == bool:
+ return bool(self)
+ if t == int:
+ return int(self)
+ if t == long:
+ return long(self)
+ if t == float:
+ return float(self)
+ return str(self)
+ def __lt__(self, other):
+ return self.convert(other) < other
+ def __le__(self, other):
+ return self.convert(other) <= other
+ def __eq__(self, other):
+ return self.convert(other) == other
+ def __ne__(self, other):
+ return self.convert(other) != other
+ def __gt__(self, other):
+ return self.convert(other) > other
+ def __ge__(self, other):
+ return self.convert(other) >= other
+
+ def __add__(self, other):
+ return self.convert(other) + other
+ def __sub__(self, other):
+ return self.convert(other) - other
+ def __mul__(self, other):
+ return self.convert(other) * other
+ def __div__(self, other):
+ return self.convert(other) / other
+ def __truediv__(self, other):
+ return self.convert(other) / other
+
+ def __radd__(self, other):
+ return other + self.convert(other)
+ def __rsub__(self, other):
+ return other - self.convert(other)
+ def __rmul__(self, other):
+ return other * self.convert(other)
+ def __rdiv__(self, other):
+ return other / self.convert(other)
+ def __rtruediv__(self, other):
+ return other / self.convert(other)
+
+class UndefinedVariable(object):
+ """Placeholder class to represent undefined variables. Will
+ generally cause an exception whenever it is used, but evaluates to
+ zero for boolean truth testing such as in an if statement"""
+ def __nonzero__(self):
+ return False
+
+class SmartDict(dict):
+ """Dictionary class that holds strings, but intelligently converts
+ those strings to other types depending on their usage"""
+
+ def __getitem__(self, key):
+ """returns a Variable proxy if the values exists in the database and
+ returns an UndefinedVariable otherwise"""
+
+ if key in self:
+ return Variable(dict.get(self, key))
+ else:
+ # Note that this does *not* change the contents of the dict,
+ # so that even after we call env['foo'] we still get a
+ # meaningful answer from "'foo' in env" (which
+ # calls dict.__contains__, which we do not override).
+ return UndefinedVariable()
+
+ def __setitem__(self, key, item):
+ """intercept the setting of any variable so that we always
+ store strings in the dict"""
+ dict.__setitem__(self, key, str(item))
+
+ def values(self):
+ return [ Variable(v) for v in dict.values(self) ]
+
+ def itervalues(self):
+ for value in dict.itervalues(self):
+ yield Variable(value)
+
+ def items(self):
+ return [ (k, Variable(v)) for k,v in dict.items(self) ]
+
+ def iteritems(self):
+ for key,value in dict.iteritems(self):
+ yield key, Variable(value)
+
+ def get(self, key, default='False'):
+ return Variable(dict.get(self, key, str(default)))
+
+ def setdefault(self, key, default='False'):
+ return Variable(dict.setdefault(self, key, str(default)))
+
+__all__ = [ 'SmartDict' ]
diff --git a/src/python/m5/util.py b/src/python/m5/util.py
new file mode 100644
index 000000000..28b8b1b94
--- /dev/null
+++ b/src/python/m5/util.py
@@ -0,0 +1,59 @@
+# Copyright (c) 2004-2006 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Steve Reinhardt
+# Nathan Binkert
+
+#############################
+#
+# Utility classes & methods
+#
+#############################
+
+class Singleton(type):
+ def __call__(cls, *args, **kwargs):
+ if hasattr(cls, '_instance'):
+ return cls._instance
+
+ cls._instance = super(Singleton, cls).__call__(*args, **kwargs)
+ return cls._instance
+
+# Apply method to object.
+# applyMethod(obj, 'meth', <args>) is equivalent to obj.meth(<args>)
+def applyMethod(obj, meth, *args, **kwargs):
+ return getattr(obj, meth)(*args, **kwargs)
+
+# If the first argument is an (non-sequence) object, apply the named
+# method with the given arguments. If the first argument is a
+# sequence, apply the method to each element of the sequence (a la
+# 'map').
+def applyOrMap(objOrSeq, meth, *args, **kwargs):
+ if not isinstance(objOrSeq, (list, tuple)):
+ return applyMethod(objOrSeq, meth, *args, **kwargs)
+ else:
+ return [applyMethod(o, meth, *args, **kwargs) for o in objOrSeq]
+
+