summaryrefslogtreecommitdiff
path: root/ext/ply/ply
diff options
context:
space:
mode:
authorNathan Binkert <nate@binkert.org>2009-08-16 13:39:58 -0700
committerNathan Binkert <nate@binkert.org>2009-08-16 13:39:58 -0700
commite1270f81bdc81f5a575b34c2d2c294bdde3e6f4f (patch)
treeb54af3469a338609faf04e67603c5264e79d59a5 /ext/ply/ply
parentbcaf93d182f43bf72d52104bb909324945904120 (diff)
downloadgem5-e1270f81bdc81f5a575b34c2d2c294bdde3e6f4f.tar.xz
ply: update PLY to version 3.2
Diffstat (limited to 'ext/ply/ply')
-rw-r--r--ext/ply/ply/cpp.py898
-rw-r--r--ext/ply/ply/ctokens.py133
-rw-r--r--ext/ply/ply/lex.py986
-rw-r--r--ext/ply/ply/yacc.py4412
4 files changed, 4351 insertions, 2078 deletions
diff --git a/ext/ply/ply/cpp.py b/ext/ply/ply/cpp.py
new file mode 100644
index 000000000..39f9d47f3
--- /dev/null
+++ b/ext/ply/ply/cpp.py
@@ -0,0 +1,898 @@
+# -----------------------------------------------------------------------------
+# cpp.py
+#
+# Author: David Beazley (http://www.dabeaz.com)
+# Copyright (C) 2007
+# All rights reserved
+#
+# This module implements an ANSI-C style lexical preprocessor for PLY.
+# -----------------------------------------------------------------------------
+from __future__ import generators
+
+# -----------------------------------------------------------------------------
+# Default preprocessor lexer definitions. These tokens are enough to get
+# a basic preprocessor working. Other modules may import these if they want
+# -----------------------------------------------------------------------------
+
+tokens = (
+ 'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT', 'CPP_POUND','CPP_DPOUND'
+)
+
+literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
+
+# Whitespace
+def t_CPP_WS(t):
+ r'\s+'
+ t.lexer.lineno += t.value.count("\n")
+ return t
+
+t_CPP_POUND = r'\#'
+t_CPP_DPOUND = r'\#\#'
+
+# Identifier
+t_CPP_ID = r'[A-Za-z_][\w_]*'
+
+# Integer literal
+def CPP_INTEGER(t):
+ r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU]|[lL]|[uU][lL]|[lL][uU])?)'
+ return t
+
+t_CPP_INTEGER = CPP_INTEGER
+
+# Floating literal
+t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
+
+# String literal
+def t_CPP_STRING(t):
+ r'\"([^\\\n]|(\\(.|\n)))*?\"'
+ t.lexer.lineno += t.value.count("\n")
+ return t
+
+# Character constant 'c' or L'c'
+def t_CPP_CHAR(t):
+ r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
+ t.lexer.lineno += t.value.count("\n")
+ return t
+
+# Comment
+def t_CPP_COMMENT(t):
+ r'(/\*(.|\n)*?\*/)|(//.*?\n)'
+ t.lexer.lineno += t.value.count("\n")
+ return t
+
+def t_error(t):
+ t.type = t.value[0]
+ t.value = t.value[0]
+ t.lexer.skip(1)
+ return t
+
+import re
+import copy
+import time
+import os.path
+
+# -----------------------------------------------------------------------------
+# trigraph()
+#
+# Given an input string, this function replaces all trigraph sequences.
+# The following mapping is used:
+#
+# ??= #
+# ??/ \
+# ??' ^
+# ??( [
+# ??) ]
+# ??! |
+# ??< {
+# ??> }
+# ??- ~
+# -----------------------------------------------------------------------------
+
+_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
+_trigraph_rep = {
+ '=':'#',
+ '/':'\\',
+ "'":'^',
+ '(':'[',
+ ')':']',
+ '!':'|',
+ '<':'{',
+ '>':'}',
+ '-':'~'
+}
+
+def trigraph(input):
+ return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
+
+# ------------------------------------------------------------------
+# Macro object
+#
+# This object holds information about preprocessor macros
+#
+# .name - Macro name (string)
+# .value - Macro value (a list of tokens)
+# .arglist - List of argument names
+# .variadic - Boolean indicating whether or not variadic macro
+# .vararg - Name of the variadic parameter
+#
+# When a macro is created, the macro replacement token sequence is
+# pre-scanned and used to create patch lists that are later used
+# during macro expansion
+# ------------------------------------------------------------------
+
+class Macro(object):
+ def __init__(self,name,value,arglist=None,variadic=False):
+ self.name = name
+ self.value = value
+ self.arglist = arglist
+ self.variadic = variadic
+ if variadic:
+ self.vararg = arglist[-1]
+ self.source = None
+
+# ------------------------------------------------------------------
+# Preprocessor object
+#
+# Object representing a preprocessor. Contains macro definitions,
+# include directories, and other information
+# ------------------------------------------------------------------
+
+class Preprocessor(object):
+ def __init__(self,lexer=None):
+ if lexer is None:
+ lexer = lex.lexer
+ self.lexer = lexer
+ self.macros = { }
+ self.path = []
+ self.temp_path = []
+
+ # Probe the lexer for selected tokens
+ self.lexprobe()
+
+ tm = time.localtime()
+ self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
+ self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
+ self.parser = None
+
+ # -----------------------------------------------------------------------------
+ # tokenize()
+ #
+ # Utility function. Given a string of text, tokenize into a list of tokens
+ # -----------------------------------------------------------------------------
+
+ def tokenize(self,text):
+ tokens = []
+ self.lexer.input(text)
+ while True:
+ tok = self.lexer.token()
+ if not tok: break
+ tokens.append(tok)
+ return tokens
+
+ # ---------------------------------------------------------------------
+ # error()
+ #
+ # Report a preprocessor error/warning of some kind
+ # ----------------------------------------------------------------------
+
+ def error(self,file,line,msg):
+ print >>sys.stderr,"%s:%d %s" % (file,line,msg)
+
+ # ----------------------------------------------------------------------
+ # lexprobe()
+ #
+ # This method probes the preprocessor lexer object to discover
+ # the token types of symbols that are important to the preprocessor.
+ # If this works right, the preprocessor will simply "work"
+ # with any suitable lexer regardless of how tokens have been named.
+ # ----------------------------------------------------------------------
+
+ def lexprobe(self):
+
+ # Determine the token type for identifiers
+ self.lexer.input("identifier")
+ tok = self.lexer.token()
+ if not tok or tok.value != "identifier":
+ print "Couldn't determine identifier type"
+ else:
+ self.t_ID = tok.type
+
+ # Determine the token type for integers
+ self.lexer.input("12345")
+ tok = self.lexer.token()
+ if not tok or int(tok.value) != 12345:
+ print "Couldn't determine integer type"
+ else:
+ self.t_INTEGER = tok.type
+ self.t_INTEGER_TYPE = type(tok.value)
+
+ # Determine the token type for strings enclosed in double quotes
+ self.lexer.input("\"filename\"")
+ tok = self.lexer.token()
+ if not tok or tok.value != "\"filename\"":
+ print "Couldn't determine string type"
+ else:
+ self.t_STRING = tok.type
+
+ # Determine the token type for whitespace--if any
+ self.lexer.input(" ")
+ tok = self.lexer.token()
+ if not tok or tok.value != " ":
+ self.t_SPACE = None
+ else:
+ self.t_SPACE = tok.type
+
+ # Determine the token type for newlines
+ self.lexer.input("\n")
+ tok = self.lexer.token()
+ if not tok or tok.value != "\n":
+ self.t_NEWLINE = None
+ print "Couldn't determine token for newlines"
+ else:
+ self.t_NEWLINE = tok.type
+
+ self.t_WS = (self.t_SPACE, self.t_NEWLINE)
+
+ # Check for other characters used by the preprocessor
+ chars = [ '<','>','#','##','\\','(',')',',','.']
+ for c in chars:
+ self.lexer.input(c)
+ tok = self.lexer.token()
+ if not tok or tok.value != c:
+ print "Unable to lex '%s' required for preprocessor" % c
+
+ # ----------------------------------------------------------------------
+ # add_path()
+ #
+ # Adds a search path to the preprocessor.
+ # ----------------------------------------------------------------------
+
+ def add_path(self,path):
+ self.path.append(path)
+
+ # ----------------------------------------------------------------------
+ # group_lines()
+ #
+ # Given an input string, this function splits it into lines. Trailing whitespace
+ # is removed. Any line ending with \ is grouped with the next line. This
+ # function forms the lowest level of the preprocessor---grouping into text into
+ # a line-by-line format.
+ # ----------------------------------------------------------------------
+
+ def group_lines(self,input):
+ lex = self.lexer.clone()
+ lines = [x.rstrip() for x in input.splitlines()]
+ for i in xrange(len(lines)):
+ j = i+1
+ while lines[i].endswith('\\') and (j < len(lines)):
+ lines[i] = lines[i][:-1]+lines[j]
+ lines[j] = ""
+ j += 1
+
+ input = "\n".join(lines)
+ lex.input(input)
+ lex.lineno = 1
+
+ current_line = []
+ while True:
+ tok = lex.token()
+ if not tok:
+ break
+ current_line.append(tok)
+ if tok.type in self.t_WS and '\n' in tok.value:
+ yield current_line
+ current_line = []
+
+ if current_line:
+ yield current_line
+
+ # ----------------------------------------------------------------------
+ # tokenstrip()
+ #
+ # Remove leading/trailing whitespace tokens from a token list
+ # ----------------------------------------------------------------------
+
+ def tokenstrip(self,tokens):
+ i = 0
+ while i < len(tokens) and tokens[i].type in self.t_WS:
+ i += 1
+ del tokens[:i]
+ i = len(tokens)-1
+ while i >= 0 and tokens[i].type in self.t_WS:
+ i -= 1
+ del tokens[i+1:]
+ return tokens
+
+
+ # ----------------------------------------------------------------------
+ # collect_args()
+ #
+ # Collects comma separated arguments from a list of tokens. The arguments
+ # must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
+ # where tokencount is the number of tokens consumed, args is a list of arguments,
+ # and positions is a list of integers containing the starting index of each
+ # argument. Each argument is represented by a list of tokens.
+ #
+ # When collecting arguments, leading and trailing whitespace is removed
+ # from each argument.
+ #
+ # This function properly handles nested parenthesis and commas---these do not
+ # define new arguments.
+ # ----------------------------------------------------------------------
+
+ def collect_args(self,tokenlist):
+ args = []
+ positions = []
+ current_arg = []
+ nesting = 1
+ tokenlen = len(tokenlist)
+
+ # Search for the opening '('.
+ i = 0
+ while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
+ i += 1
+
+ if (i < tokenlen) and (tokenlist[i].value == '('):
+ positions.append(i+1)
+ else:
+ self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
+ return 0, [], []
+
+ i += 1
+
+ while i < tokenlen:
+ t = tokenlist[i]
+ if t.value == '(':
+ current_arg.append(t)
+ nesting += 1
+ elif t.value == ')':
+ nesting -= 1
+ if nesting == 0:
+ if current_arg:
+ args.append(self.tokenstrip(current_arg))
+ positions.append(i)
+ return i+1,args,positions
+ current_arg.append(t)
+ elif t.value == ',' and nesting == 1:
+ args.append(self.tokenstrip(current_arg))
+ positions.append(i+1)
+ current_arg = []
+ else:
+ current_arg.append(t)
+ i += 1
+
+ # Missing end argument
+ self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
+ return 0, [],[]
+
+ # ----------------------------------------------------------------------
+ # macro_prescan()
+ #
+ # Examine the macro value (token sequence) and identify patch points
+ # This is used to speed up macro expansion later on---we'll know
+ # right away where to apply patches to the value to form the expansion
+ # ----------------------------------------------------------------------
+
+ def macro_prescan(self,macro):
+ macro.patch = [] # Standard macro arguments
+ macro.str_patch = [] # String conversion expansion
+ macro.var_comma_patch = [] # Variadic macro comma patch
+ i = 0
+ while i < len(macro.value):
+ if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
+ argnum = macro.arglist.index(macro.value[i].value)
+ # Conversion of argument to a string
+ if i > 0 and macro.value[i-1].value == '#':
+ macro.value[i] = copy.copy(macro.value[i])
+ macro.value[i].type = self.t_STRING
+ del macro.value[i-1]
+ macro.str_patch.append((argnum,i-1))
+ continue
+ # Concatenation
+ elif (i > 0 and macro.value[i-1].value == '##'):
+ macro.patch.append(('c',argnum,i-1))
+ del macro.value[i-1]
+ continue
+ elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
+ macro.patch.append(('c',argnum,i))
+ i += 1
+ continue
+ # Standard expansion
+ else:
+ macro.patch.append(('e',argnum,i))
+ elif macro.value[i].value == '##':
+ if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
+ ((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
+ (macro.value[i+1].value == macro.vararg):
+ macro.var_comma_patch.append(i-1)
+ i += 1
+ macro.patch.sort(key=lambda x: x[2],reverse=True)
+
+ # ----------------------------------------------------------------------
+ # macro_expand_args()
+ #
+ # Given a Macro and list of arguments (each a token list), this method
+ # returns an expanded version of a macro. The return value is a token sequence
+ # representing the replacement macro tokens
+ # ----------------------------------------------------------------------
+
+ def macro_expand_args(self,macro,args):
+ # Make a copy of the macro token sequence
+ rep = [copy.copy(_x) for _x in macro.value]
+
+ # Make string expansion patches. These do not alter the length of the replacement sequence
+
+ str_expansion = {}
+ for argnum, i in macro.str_patch:
+ if argnum not in str_expansion:
+ str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
+ rep[i] = copy.copy(rep[i])
+ rep[i].value = str_expansion[argnum]
+
+ # Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
+ comma_patch = False
+ if macro.variadic and not args[-1]:
+ for i in macro.var_comma_patch:
+ rep[i] = None
+ comma_patch = True
+
+ # Make all other patches. The order of these matters. It is assumed that the patch list
+ # has been sorted in reverse order of patch location since replacements will cause the
+ # size of the replacement sequence to expand from the patch point.
+
+ expanded = { }
+ for ptype, argnum, i in macro.patch:
+ # Concatenation. Argument is left unexpanded
+ if ptype == 'c':
+ rep[i:i+1] = args[argnum]
+ # Normal expansion. Argument is macro expanded first
+ elif ptype == 'e':
+ if argnum not in expanded:
+ expanded[argnum] = self.expand_macros(args[argnum])
+ rep[i:i+1] = expanded[argnum]
+
+ # Get rid of removed comma if necessary
+ if comma_patch:
+ rep = [_i for _i in rep if _i]
+
+ return rep
+
+
+ # ----------------------------------------------------------------------
+ # expand_macros()
+ #
+ # Given a list of tokens, this function performs macro expansion.
+ # The expanded argument is a dictionary that contains macros already
+ # expanded. This is used to prevent infinite recursion.
+ # ----------------------------------------------------------------------
+
+ def expand_macros(self,tokens,expanded=None):
+ if expanded is None:
+ expanded = {}
+ i = 0
+ while i < len(tokens):
+ t = tokens[i]
+ if t.type == self.t_ID:
+ if t.value in self.macros and t.value not in expanded:
+ # Yes, we found a macro match
+ expanded[t.value] = True
+
+ m = self.macros[t.value]
+ if not m.arglist:
+ # A simple macro
+ ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
+ for e in ex:
+ e.lineno = t.lineno
+ tokens[i:i+1] = ex
+ i += len(ex)
+ else:
+ # A macro with arguments
+ j = i + 1
+ while j < len(tokens) and tokens[j].type in self.t_WS:
+ j += 1
+ if tokens[j].value == '(':
+ tokcount,args,positions = self.collect_args(tokens[j:])
+ if not m.variadic and len(args) != len(m.arglist):
+ self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
+ i = j + tokcount
+ elif m.variadic and len(args) < len(m.arglist)-1:
+ if len(m.arglist) > 2:
+ self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
+ else:
+ self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
+ i = j + tokcount
+ else:
+ if m.variadic:
+ if len(args) == len(m.arglist)-1:
+ args.append([])
+ else:
+ args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
+ del args[len(m.arglist):]
+
+ # Get macro replacement text
+ rep = self.macro_expand_args(m,args)
+ rep = self.expand_macros(rep,expanded)
+ for r in rep:
+ r.lineno = t.lineno
+ tokens[i:j+tokcount] = rep
+ i += len(rep)
+ del expanded[t.value]
+ continue
+ elif t.value == '__LINE__':
+ t.type = self.t_INTEGER
+ t.value = self.t_INTEGER_TYPE(t.lineno)
+
+ i += 1
+ return tokens
+
+ # ----------------------------------------------------------------------
+ # evalexpr()
+ #
+ # Evaluate an expression token sequence for the purposes of evaluating
+ # integral expressions.
+ # ----------------------------------------------------------------------
+
+ def evalexpr(self,tokens):
+ # tokens = tokenize(line)
+ # Search for defined macros
+ i = 0
+ while i < len(tokens):
+ if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
+ j = i + 1
+ needparen = False
+ result = "0L"
+ while j < len(tokens):
+ if tokens[j].type in self.t_WS:
+ j += 1
+ continue
+ elif tokens[j].type == self.t_ID:
+ if tokens[j].value in self.macros:
+ result = "1L"
+ else:
+ result = "0L"
+ if not needparen: break
+ elif tokens[j].value == '(':
+ needparen = True
+ elif tokens[j].value == ')':
+ break
+ else:
+ self.error(self.source,tokens[i].lineno,"Malformed defined()")
+ j += 1
+ tokens[i].type = self.t_INTEGER
+ tokens[i].value = self.t_INTEGER_TYPE(result)
+ del tokens[i+1:j+1]
+ i += 1
+ tokens = self.expand_macros(tokens)
+ for i,t in enumerate(tokens):
+ if t.type == self.t_ID:
+ tokens[i] = copy.copy(t)
+ tokens[i].type = self.t_INTEGER
+ tokens[i].value = self.t_INTEGER_TYPE("0L")
+ elif t.type == self.t_INTEGER:
+ tokens[i] = copy.copy(t)
+ # Strip off any trailing suffixes
+ tokens[i].value = str(tokens[i].value)
+ while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
+ tokens[i].value = tokens[i].value[:-1]
+
+ expr = "".join([str(x.value) for x in tokens])
+ expr = expr.replace("&&"," and ")
+ expr = expr.replace("||"," or ")
+ expr = expr.replace("!"," not ")
+ try:
+ result = eval(expr)
+ except StandardError:
+ self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
+ result = 0
+ return result
+
+ # ----------------------------------------------------------------------
+ # parsegen()
+ #
+ # Parse an input string/
+ # ----------------------------------------------------------------------
+ def parsegen(self,input,source=None):
+
+ # Replace trigraph sequences
+ t = trigraph(input)
+ lines = self.group_lines(t)
+
+ if not source:
+ source = ""
+
+ self.define("__FILE__ \"%s\"" % source)
+
+ self.source = source
+ chunk = []
+ enable = True
+ iftrigger = False
+ ifstack = []
+
+ for x in lines:
+ for i,tok in enumerate(x):
+ if tok.type not in self.t_WS: break
+ if tok.value == '#':
+ # Preprocessor directive
+
+ for tok in x:
+ if tok in self.t_WS and '\n' in tok.value:
+ chunk.append(tok)
+
+ dirtokens = self.tokenstrip(x[i+1:])
+ if dirtokens:
+ name = dirtokens[0].value
+ args = self.tokenstrip(dirtokens[1:])
+ else:
+ name = ""
+ args = []
+
+ if name == 'define':
+ if enable:
+ for tok in self.expand_macros(chunk):
+ yield tok
+ chunk = []
+ self.define(args)
+ elif name == 'include':
+ if enable:
+ for tok in self.expand_macros(chunk):
+ yield tok
+ chunk = []
+ oldfile = self.macros['__FILE__']
+ for tok in self.include(args):
+ yield tok
+ self.macros['__FILE__'] = oldfile
+ self.source = source
+ elif name == 'undef':
+ if enable:
+ for tok in self.expand_macros(chunk):
+ yield tok
+ chunk = []
+ self.undef(args)
+ elif name == 'ifdef':
+ ifstack.append((enable,iftrigger))
+ if enable:
+ if not args[0].value in self.macros:
+ enable = False
+ iftrigger = False
+ else:
+ iftrigger = True
+ elif name == 'ifndef':
+ ifstack.append((enable,iftrigger))
+ if enable:
+ if args[0].value in self.macros:
+ enable = False
+ iftrigger = False
+ else:
+ iftrigger = True
+ elif name == 'if':
+ ifstack.append((enable,iftrigger))
+ if enable:
+ result = self.evalexpr(args)
+ if not result:
+ enable = False
+ iftrigger = False
+ else:
+ iftrigger = True
+ elif name == 'elif':
+ if ifstack:
+ if ifstack[-1][0]: # We only pay attention if outer "if" allows this
+ if enable: # If already true, we flip enable False
+ enable = False
+ elif not iftrigger: # If False, but not triggered yet, we'll check expression
+ result = self.evalexpr(args)
+ if result:
+ enable = True
+ iftrigger = True
+ else:
+ self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
+
+ elif name == 'else':
+ if ifstack:
+ if ifstack[-1][0]:
+ if enable:
+ enable = False
+ elif not iftrigger:
+ enable = True
+ iftrigger = True
+ else:
+ self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
+
+ elif name == 'endif':
+ if ifstack:
+ enable,iftrigger = ifstack.pop()
+ else:
+ self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
+ else:
+ # Unknown preprocessor directive
+ pass
+
+ else:
+ # Normal text
+ if enable:
+ chunk.extend(x)
+
+ for tok in self.expand_macros(chunk):
+ yield tok
+ chunk = []
+
+ # ----------------------------------------------------------------------
+ # include()
+ #
+ # Implementation of file-inclusion
+ # ----------------------------------------------------------------------
+
+ def include(self,tokens):
+ # Try to extract the filename and then process an include file
+ if not tokens:
+ return
+ if tokens:
+ if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
+ tokens = self.expand_macros(tokens)
+
+ if tokens[0].value == '<':
+ # Include <...>
+ i = 1
+ while i < len(tokens):
+ if tokens[i].value == '>':
+ break
+ i += 1
+ else:
+ print "Malformed #include <...>"
+ return
+ filename = "".join([x.value for x in tokens[1:i]])
+ path = self.path + [""] + self.temp_path
+ elif tokens[0].type == self.t_STRING:
+ filename = tokens[0].value[1:-1]
+ path = self.temp_path + [""] + self.path
+ else:
+ print "Malformed #include statement"
+ return
+ for p in path:
+ iname = os.path.join(p,filename)
+ try:
+ data = open(iname,"r").read()
+ dname = os.path.dirname(iname)
+ if dname:
+ self.temp_path.insert(0,dname)
+ for tok in self.parsegen(data,filename):
+ yield tok
+ if dname:
+ del self.temp_path[0]
+ break
+ except IOError,e:
+ pass
+ else:
+ print "Couldn't find '%s'" % filename
+
+ # ----------------------------------------------------------------------
+ # define()
+ #
+ # Define a new macro
+ # ----------------------------------------------------------------------
+
+ def define(self,tokens):
+ if isinstance(tokens,(str,unicode)):
+ tokens = self.tokenize(tokens)
+
+ linetok = tokens
+ try:
+ name = linetok[0]
+ if len(linetok) > 1:
+ mtype = linetok[1]
+ else:
+ mtype = None
+ if not mtype:
+ m = Macro(name.value,[])
+ self.macros[name.value] = m
+ elif mtype.type in self.t_WS:
+ # A normal macro
+ m = Macro(name.value,self.tokenstrip(linetok[2:]))
+ self.macros[name.value] = m
+ elif mtype.value == '(':
+ # A macro with arguments
+ tokcount, args, positions = self.collect_args(linetok[1:])
+ variadic = False
+ for a in args:
+ if variadic:
+ print "No more arguments may follow a variadic argument"
+ break
+ astr = "".join([str(_i.value) for _i in a])
+ if astr == "...":
+ variadic = True
+ a[0].type = self.t_ID
+ a[0].value = '__VA_ARGS__'
+ variadic = True
+ del a[1:]
+ continue
+ elif astr[-3:] == "..." and a[0].type == self.t_ID:
+ variadic = True
+ del a[1:]
+ # If, for some reason, "." is part of the identifier, strip off the name for the purposes
+ # of macro expansion
+ if a[0].value[-3:] == '...':
+ a[0].value = a[0].value[:-3]
+ continue
+ if len(a) > 1 or a[0].type != self.t_ID:
+ print "Invalid macro argument"
+ break
+ else:
+ mvalue = self.tokenstrip(linetok[1+tokcount:])
+ i = 0
+ while i < len(mvalue):
+ if i+1 < len(mvalue):
+ if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
+ del mvalue[i]
+ continue
+ elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
+ del mvalue[i+1]
+ i += 1
+ m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
+ self.macro_prescan(m)
+ self.macros[name.value] = m
+ else:
+ print "Bad macro definition"
+ except LookupError:
+ print "Bad macro definition"
+
+ # ----------------------------------------------------------------------
+ # undef()
+ #
+ # Undefine a macro
+ # ----------------------------------------------------------------------
+
+ def undef(self,tokens):
+ id = tokens[0].value
+ try:
+ del self.macros[id]
+ except LookupError:
+ pass
+
+ # ----------------------------------------------------------------------
+ # parse()
+ #
+ # Parse input text.
+ # ----------------------------------------------------------------------
+ def parse(self,input,source=None,ignore={}):
+ self.ignore = ignore
+ self.parser = self.parsegen(input,source)
+
+ # ----------------------------------------------------------------------
+ # token()
+ #
+ # Method to return individual tokens
+ # ----------------------------------------------------------------------
+ def token(self):
+ try:
+ while True:
+ tok = self.parser.next()
+ if tok.type not in self.ignore: return tok
+ except StopIteration:
+ self.parser = None
+ return None
+
+if __name__ == '__main__':
+ import ply.lex as lex
+ lexer = lex.lex()
+
+ # Run a preprocessor
+ import sys
+ f = open(sys.argv[1])
+ input = f.read()
+
+ p = Preprocessor(lexer)
+ p.parse(input,sys.argv[1])
+ while True:
+ tok = p.token()
+ if not tok: break
+ print p.source, tok
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ext/ply/ply/ctokens.py b/ext/ply/ply/ctokens.py
new file mode 100644
index 000000000..dd5f102dc
--- /dev/null
+++ b/ext/ply/ply/ctokens.py
@@ -0,0 +1,133 @@
+# ----------------------------------------------------------------------
+# ctokens.py
+#
+# Token specifications for symbols in ANSI C and C++. This file is
+# meant to be used as a library in other tokenizers.
+# ----------------------------------------------------------------------
+
+# Reserved words
+
+tokens = [
+ # Literals (identifier, integer constant, float constant, string constant, char const)
+ 'ID', 'TYPEID', 'ICONST', 'FCONST', 'SCONST', 'CCONST',
+
+ # Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
+ 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
+ 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
+ 'LOR', 'LAND', 'LNOT',
+ 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
+
+ # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
+ 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
+ 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
+
+ # Increment/decrement (++,--)
+ 'PLUSPLUS', 'MINUSMINUS',
+
+ # Structure dereference (->)
+ 'ARROW',
+
+ # Ternary operator (?)
+ 'TERNARY',
+
+ # Delimeters ( ) [ ] { } , . ; :
+ 'LPAREN', 'RPAREN',
+ 'LBRACKET', 'RBRACKET',
+ 'LBRACE', 'RBRACE',
+ 'COMMA', 'PERIOD', 'SEMI', 'COLON',
+
+ # Ellipsis (...)
+ 'ELLIPSIS',
+]
+
+# Operators
+t_PLUS = r'\+'
+t_MINUS = r'-'
+t_TIMES = r'\*'
+t_DIVIDE = r'/'
+t_MODULO = r'%'
+t_OR = r'\|'
+t_AND = r'&'
+t_NOT = r'~'
+t_XOR = r'\^'
+t_LSHIFT = r'<<'
+t_RSHIFT = r'>>'
+t_LOR = r'\|\|'
+t_LAND = r'&&'
+t_LNOT = r'!'
+t_LT = r'<'
+t_GT = r'>'
+t_LE = r'<='
+t_GE = r'>='
+t_EQ = r'=='
+t_NE = r'!='
+
+# Assignment operators
+
+t_EQUALS = r'='
+t_TIMESEQUAL = r'\*='
+t_DIVEQUAL = r'/='
+t_MODEQUAL = r'%='
+t_PLUSEQUAL = r'\+='
+t_MINUSEQUAL = r'-='
+t_LSHIFTEQUAL = r'<<='
+t_RSHIFTEQUAL = r'>>='
+t_ANDEQUAL = r'&='
+t_OREQUAL = r'\|='
+t_XOREQUAL = r'^='
+
+# Increment/decrement
+t_INCREMENT = r'\+\+'
+t_DECREMENT = r'--'
+
+# ->
+t_ARROW = r'->'
+
+# ?
+t_TERNARY = r'\?'
+
+# Delimeters
+t_LPAREN = r'\('
+t_RPAREN = r'\)'
+t_LBRACKET = r'\['
+t_RBRACKET = r'\]'
+t_LBRACE = r'\{'
+t_RBRACE = r'\}'
+t_COMMA = r','
+t_PERIOD = r'\.'
+t_SEMI = r';'
+t_COLON = r':'
+t_ELLIPSIS = r'\.\.\.'
+
+# Identifiers
+t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
+
+# Integer literal
+t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
+
+# Floating literal
+t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
+
+# String literal
+t_STRING = r'\"([^\\\n]|(\\.))*?\"'
+
+# Character constant 'c' or L'c'
+t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
+
+# Comment (C-Style)
+def t_COMMENT(t):
+ r'/\*(.|\n)*?\*/'
+ t.lexer.lineno += t.value.count('\n')
+ return t
+
+# Comment (C++-Style)
+def t_CPPCOMMENT(t):
+ r'//.*\n'
+ t.lexer.lineno += 1
+ return t
+
+
+
+
+
+
diff --git a/ext/ply/ply/lex.py b/ext/ply/ply/lex.py
index 782b29286..4759d1b7a 100644
--- a/ext/ply/ply/lex.py
+++ b/ext/ply/ply/lex.py
@@ -1,67 +1,114 @@
-#-----------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
# ply: lex.py
#
-# Author: David M. Beazley (dave@dabeaz.com)
-#
-# Copyright (C) 2001-2007, David M. Beazley
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
+# Copyright (C) 2001-2009,
+# David M. Beazley (Dabeaz LLC)
+# All rights reserved.
#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of the David Beazley or Dabeaz LLC may be used to
+# endorse or promote products derived from this software without
+# specific prior written permission.
#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-# See the file COPYING for a complete copy of the LGPL.
-#-----------------------------------------------------------------------------
-
-__version__ = "2.3"
-
-import re, sys, types
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# -----------------------------------------------------------------------------
-# Regular expression used to match valid token names
-_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
+__version__ = "3.2"
+__tabversion__ = "3.2" # Version of table file used
-# Available instance types. This is used when lexers are defined by a class.
-# It's a little funky because I want to preserve backwards compatibility
-# with Python 2.0 where types.ObjectType is undefined.
+import re, sys, types, copy, os
+# This tuple contains known string types
try:
- _INSTANCETYPE = (types.InstanceType, types.ObjectType)
+ # Python 2.6
+ StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
- _INSTANCETYPE = types.InstanceType
- class object: pass # Note: needed if no new-style classes present
+ # Python 3.0
+ StringTypes = (str, bytes)
+
+# Extract the code attribute of a function. Different implementations
+# are for Python 2/3 compatibility.
+
+if sys.version_info[0] < 3:
+ def func_code(f):
+ return f.func_code
+else:
+ def func_code(f):
+ return f.__code__
+
+# This regular expression is used to match valid token names
+_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
+
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
-# Token class
+# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
- def skip(self,n):
- self.lexer.skip(n)
+
+# This object is a stand-in for a logging object created by the
+# logging module.
+
+class PlyLogger(object):
+ def __init__(self,f):
+ self.f = f
+ def critical(self,msg,*args,**kwargs):
+ self.f.write((msg % args) + "\n")
+
+ def warning(self,msg,*args,**kwargs):
+ self.f.write("WARNING: "+ (msg % args) + "\n")
+
+ def error(self,msg,*args,**kwargs):
+ self.f.write("ERROR: " + (msg % args) + "\n")
+
+ info = critical
+ debug = critical
+
+# Null logger is used when no output is generated. Does nothing.
+class NullLogger(object):
+ def __getattribute__(self,name):
+ return self
+ def __call__(self,*args,**kwargs):
+ return self
# -----------------------------------------------------------------------------
-# Lexer class
+# === Lexing Engine ===
#
-# This class encapsulates all of the methods and data associated with a lexer.
+# The following Lexer class implements the lexer runtime. There are only
+# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
+# clone() - Clone the lexer
+#
+# lineno - Current line number
+# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
@@ -73,6 +120,7 @@ class Lexer:
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
+ self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
@@ -88,28 +136,10 @@ class Lexer:
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
- self.lexdebug = 0 # Debugging mode
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
- c = Lexer()
- c.lexstatere = self.lexstatere
- c.lexstateinfo = self.lexstateinfo
- c.lexstateretext = self.lexstateretext
- c.lexstate = self.lexstate
- c.lexstatestack = self.lexstatestack
- c.lexstateignore = self.lexstateignore
- c.lexstateerrorf = self.lexstateerrorf
- c.lexreflags = self.lexreflags
- c.lexdata = self.lexdata
- c.lexpos = self.lexpos
- c.lexlen = self.lexlen
- c.lextokens = self.lextokens
- c.lexdebug = self.lexdebug
- c.lineno = self.lineno
- c.lexoptimize = self.lexoptimize
- c.lexliterals = self.lexliterals
- c.lexmodule = self.lexmodule
+ c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
@@ -133,27 +163,37 @@ class Lexer:
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
-
- # Set up other attributes
- c.begin(c.lexstate)
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
- def writetab(self,tabfile):
- tf = open(tabfile+".py","w")
+ def writetab(self,tabfile,outputdir=""):
+ if isinstance(tabfile,types.ModuleType):
+ return
+ basetabfilename = tabfile.split(".")[-1]
+ filename = os.path.join(outputdir,basetabfilename)+".py"
+ tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
+ tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
+ # Collect all functions in the initial state
+ initial = self.lexstatere["INITIAL"]
+ initialfuncs = []
+ for part in initial:
+ for f in part[1]:
+ if f and f[0]:
+ initialfuncs.append(f)
+
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
- titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1])))
+ titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
@@ -172,7 +212,19 @@ class Lexer:
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
- exec "import %s as lextab" % tabfile
+ if isinstance(tabfile,types.ModuleType):
+ lextab = tabfile
+ else:
+ if sys.version_info[0] < 3:
+ exec("import %s as lextab" % tabfile)
+ else:
+ env = { }
+ exec("import %s as lextab" % tabfile, env,env)
+ lextab = env['lextab']
+
+ if getattr(lextab,"_tabversion","0.0") != __version__:
+ raise ImportError("Inconsistent PLY version")
+
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
@@ -197,8 +249,10 @@ class Lexer:
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
- if not (isinstance(s,types.StringType) or isinstance(s,types.UnicodeType)):
- raise ValueError, "Expected a string"
+ # Pull off the first character to see if s looks like a string
+ c = s[:1]
+ if not isinstance(c,StringTypes):
+ raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
@@ -207,8 +261,8 @@ class Lexer:
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
- if not self.lexstatere.has_key(state):
- raise ValueError, "Undefined state"
+ if not state in self.lexstatere:
+ raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
@@ -241,7 +295,7 @@ class Lexer:
self.lexpos += n
# ------------------------------------------------------------
- # token() - Return the next token from the Lexer
+ # opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
@@ -265,43 +319,45 @@ class Lexer:
m = lexre.match(lexdata,lexpos)
if not m: continue
- # Set last match in lexer so that rules can access it if they want
- self.lexmatch = m
-
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
- tok.lexer = self
- lexpos = m.end()
i = m.lastindex
func,tok.type = lexindexfunc[i]
- self.lexpos = lexpos
if not func:
# If no token type was set, it's an ignored token
- if tok.type: return tok
- break
+ if tok.type:
+ self.lexpos = m.end()
+ return tok
+ else:
+ lexpos = m.end()
+ break
- # if func not callable, it means it's an ignored token
- if not callable(func):
- break
+ lexpos = m.end()
# If token is processed by a function, call it
+
+ tok.lexer = self # Set additional attributes useful in token rules
+ self.lexmatch = m
+ self.lexpos = lexpos
+
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
- lexpos = self.lexpos # This is here in case user has updated lexpos.
+ lexpos = self.lexpos # This is here in case user has updated lexpos.
+ lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
- if not self.lextokens.has_key(newtok.type):
- raise LexError, ("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
- func.func_code.co_filename, func.func_code.co_firstlineno,
+ if not newtok.type in self.lextokens:
+ raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
+ func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
@@ -311,7 +367,6 @@ class Lexer:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
- tok.lexer = self
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
@@ -329,58 +384,60 @@ class Lexer:
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
- raise LexError, ("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
+ raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
- raise LexError, ("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
+ raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
- raise RuntimeError, "No input string given with input()"
+ raise RuntimeError("No input string given with input()")
return None
+ # Iterator interface
+ def __iter__(self):
+ return self
+
+ def next(self):
+ t = self.token()
+ if t is None:
+ raise StopIteration
+ return t
+
+ __next__ = next
+
# -----------------------------------------------------------------------------
-# _validate_file()
+# ==== Lex Builder ===
#
-# This checks to see if there are duplicated t_rulename() functions or strings
-# in the parser input file. This is done using a simple regular expression
-# match on each line in the filename.
+# The functions and classes below are used to collect lexing information
+# and build a Lexer object from it.
# -----------------------------------------------------------------------------
-def _validate_file(filename):
- import os.path
- base,ext = os.path.splitext(filename)
- if ext != '.py': return 1 # No idea what the file is. Return OK
+# -----------------------------------------------------------------------------
+# get_caller_module_dict()
+#
+# This function returns a dictionary containing all of the symbols defined within
+# a caller further down the call stack. This is used to get the environment
+# associated with the yacc() call if none was provided.
+# -----------------------------------------------------------------------------
+def get_caller_module_dict(levels):
try:
- f = open(filename)
- lines = f.readlines()
- f.close()
- except IOError:
- return 1 # Oh well
-
- fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
- sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
- counthash = { }
- linen = 1
- noerror = 1
- for l in lines:
- m = fre.match(l)
- if not m:
- m = sre.match(l)
- if m:
- name = m.group(1)
- prev = counthash.get(name)
- if not prev:
- counthash[name] = linen
- else:
- print >>sys.stderr, "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev)
- noerror = 0
- linen += 1
- return noerror
+ raise RuntimeError
+ except RuntimeError:
+ e,b,t = sys.exc_info()
+ f = t.tb_frame
+ while levels > 0:
+ f = f.f_back
+ levels -= 1
+ ldict = f.f_globals.copy()
+ if f.f_globals != f.f_locals:
+ ldict.update(f.f_locals)
+
+ return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
@@ -389,11 +446,11 @@ def _validate_file(filename):
# suitable for output to a table file
# -----------------------------------------------------------------------------
-def _funcs_to_names(funclist):
+def _funcs_to_names(funclist,namelist):
result = []
- for f in funclist:
+ for f,name in zip(funclist,namelist):
if f and f[0]:
- result.append((f[0].__name__,f[1]))
+ result.append((name, f[1]))
else:
result.append(f)
return result
@@ -430,25 +487,27 @@ def _form_master_re(relist,reflags,ldict,toknames):
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
+ lexindexnames = lexindexfunc[:]
+
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
- lexindexfunc[i] = (handle,toknames[handle.__name__])
+ lexindexfunc[i] = (handle,toknames[f])
+ lexindexnames[i] = f
elif handle is not None:
- # If rule was specified as a string, we build an anonymous
- # callback function to carry out the action
+ lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
-
- return [(lexre,lexindexfunc)],[regex]
- except Exception,e:
+
+ return [(lexre,lexindexfunc)],[regex],[lexindexnames]
+ except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
- llist, lre = _form_master_re(relist[:m],reflags,ldict,toknames)
- rlist, rre = _form_master_re(relist[m:],reflags,ldict,toknames)
- return llist+rlist, lre+rre
+ llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
+ rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
+ return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
@@ -463,61 +522,376 @@ def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
- if not names.has_key(parts[i]) and parts[i] != 'ANY': break
+ if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
- states = tuple(names.keys())
+ states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
+
+# -----------------------------------------------------------------------------
+# LexerReflect()
+#
+# This class represents information needed to build a lexer as extracted from a
+# user's input file.
+# -----------------------------------------------------------------------------
+class LexerReflect(object):
+ def __init__(self,ldict,log=None,reflags=0):
+ self.ldict = ldict
+ self.error_func = None
+ self.tokens = []
+ self.reflags = reflags
+ self.stateinfo = { 'INITIAL' : 'inclusive'}
+ self.files = {}
+ self.error = 0
+
+ if log is None:
+ self.log = PlyLogger(sys.stderr)
+ else:
+ self.log = log
+
+ # Get all of the basic information
+ def get_all(self):
+ self.get_tokens()
+ self.get_literals()
+ self.get_states()
+ self.get_rules()
+
+ # Validate all of the information
+ def validate_all(self):
+ self.validate_tokens()
+ self.validate_literals()
+ self.validate_rules()
+ return self.error
+
+ # Get the tokens map
+ def get_tokens(self):
+ tokens = self.ldict.get("tokens",None)
+ if not tokens:
+ self.log.error("No token list is defined")
+ self.error = 1
+ return
+
+ if not isinstance(tokens,(list, tuple)):
+ self.log.error("tokens must be a list or tuple")
+ self.error = 1
+ return
+
+ if not tokens:
+ self.log.error("tokens is empty")
+ self.error = 1
+ return
+
+ self.tokens = tokens
+
+ # Validate the tokens
+ def validate_tokens(self):
+ terminals = {}
+ for n in self.tokens:
+ if not _is_identifier.match(n):
+ self.log.error("Bad token name '%s'",n)
+ self.error = 1
+ if n in terminals:
+ self.log.warning("Token '%s' multiply defined", n)
+ terminals[n] = 1
+
+ # Get the literals specifier
+ def get_literals(self):
+ self.literals = self.ldict.get("literals","")
+
+ # Validate literals
+ def validate_literals(self):
+ try:
+ for c in self.literals:
+ if not isinstance(c,StringTypes) or len(c) > 1:
+ self.log.error("Invalid literal %s. Must be a single character", repr(c))
+ self.error = 1
+ continue
+
+ except TypeError:
+ self.log.error("Invalid literals specification. literals must be a sequence of characters")
+ self.error = 1
+
+ def get_states(self):
+ self.states = self.ldict.get("states",None)
+ # Build statemap
+ if self.states:
+ if not isinstance(self.states,(tuple,list)):
+ self.log.error("states must be defined as a tuple or list")
+ self.error = 1
+ else:
+ for s in self.states:
+ if not isinstance(s,tuple) or len(s) != 2:
+ self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
+ self.error = 1
+ continue
+ name, statetype = s
+ if not isinstance(name,StringTypes):
+ self.log.error("State name %s must be a string", repr(name))
+ self.error = 1
+ continue
+ if not (statetype == 'inclusive' or statetype == 'exclusive'):
+ self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
+ self.error = 1
+ continue
+ if name in self.stateinfo:
+ self.log.error("State '%s' already defined",name)
+ self.error = 1
+ continue
+ self.stateinfo[name] = statetype
+
+ # Get all of the symbols with a t_ prefix and sort them into various
+ # categories (functions, strings, error functions, and ignore characters)
+
+ def get_rules(self):
+ tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
+
+ # Now build up a list of functions and a list of strings
+
+ self.toknames = { } # Mapping of symbols to token names
+ self.funcsym = { } # Symbols defined as functions
+ self.strsym = { } # Symbols defined as strings
+ self.ignore = { } # Ignore strings by state
+ self.errorf = { } # Error functions by state
+
+ for s in self.stateinfo:
+ self.funcsym[s] = []
+ self.strsym[s] = []
+
+ if len(tsymbols) == 0:
+ self.log.error("No rules of the form t_rulename are defined")
+ self.error = 1
+ return
+
+ for f in tsymbols:
+ t = self.ldict[f]
+ states, tokname = _statetoken(f,self.stateinfo)
+ self.toknames[f] = tokname
+
+ if hasattr(t,"__call__"):
+ if tokname == 'error':
+ for s in states:
+ self.errorf[s] = t
+ elif tokname == 'ignore':
+ line = func_code(t).co_firstlineno
+ file = func_code(t).co_filename
+ self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
+ self.error = 1
+ else:
+ for s in states:
+ self.funcsym[s].append((f,t))
+ elif isinstance(t, StringTypes):
+ if tokname == 'ignore':
+ for s in states:
+ self.ignore[s] = t
+ if "\\" in t:
+ self.log.warning("%s contains a literal backslash '\\'",f)
+
+ elif tokname == 'error':
+ self.log.error("Rule '%s' must be defined as a function", f)
+ self.error = 1
+ else:
+ for s in states:
+ self.strsym[s].append((f,t))
+ else:
+ self.log.error("%s not defined as a function or string", f)
+ self.error = 1
+
+ # Sort the functions by line number
+ for f in self.funcsym.values():
+ if sys.version_info[0] < 3:
+ f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
+ else:
+ # Python 3.0
+ f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
+
+ # Sort the strings by regular expression length
+ for s in self.strsym.values():
+ if sys.version_info[0] < 3:
+ s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
+ else:
+ # Python 3.0
+ s.sort(key=lambda x: len(x[1]),reverse=True)
+
+ # Validate all of the t_rules collected
+ def validate_rules(self):
+ for state in self.stateinfo:
+ # Validate all rules defined by functions
+
+
+
+ for fname, f in self.funcsym[state]:
+ line = func_code(f).co_firstlineno
+ file = func_code(f).co_filename
+ self.files[file] = 1
+
+ tokname = self.toknames[fname]
+ if isinstance(f, types.MethodType):
+ reqargs = 2
+ else:
+ reqargs = 1
+ nargs = func_code(f).co_argcount
+ if nargs > reqargs:
+ self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
+ self.error = 1
+ continue
+
+ if nargs < reqargs:
+ self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
+ self.error = 1
+ continue
+
+ if not f.__doc__:
+ self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
+ self.error = 1
+ continue
+
+ try:
+ c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
+ if c.match(""):
+ self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
+ self.error = 1
+ except re.error:
+ _etype, e, _etrace = sys.exc_info()
+ self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
+ if '#' in f.__doc__:
+ self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
+ self.error = 1
+
+ # Validate all rules defined by strings
+ for name,r in self.strsym[state]:
+ tokname = self.toknames[name]
+ if tokname == 'error':
+ self.log.error("Rule '%s' must be defined as a function", name)
+ self.error = 1
+ continue
+
+ if not tokname in self.tokens and tokname.find("ignore_") < 0:
+ self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
+ self.error = 1
+ continue
+
+ try:
+ c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
+ if (c.match("")):
+ self.log.error("Regular expression for rule '%s' matches empty string",name)
+ self.error = 1
+ except re.error:
+ _etype, e, _etrace = sys.exc_info()
+ self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
+ if '#' in r:
+ self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
+ self.error = 1
+
+ if not self.funcsym[state] and not self.strsym[state]:
+ self.log.error("No rules defined for state '%s'",state)
+ self.error = 1
+
+ # Validate the error function
+ efunc = self.errorf.get(state,None)
+ if efunc:
+ f = efunc
+ line = func_code(f).co_firstlineno
+ file = func_code(f).co_filename
+ self.files[file] = 1
+
+ if isinstance(f, types.MethodType):
+ reqargs = 2
+ else:
+ reqargs = 1
+ nargs = func_code(f).co_argcount
+ if nargs > reqargs:
+ self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
+ self.error = 1
+
+ if nargs < reqargs:
+ self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
+ self.error = 1
+
+ for f in self.files:
+ self.validate_file(f)
+
+
+ # -----------------------------------------------------------------------------
+ # validate_file()
+ #
+ # This checks to see if there are duplicated t_rulename() functions or strings
+ # in the parser input file. This is done using a simple regular expression
+ # match on each line in the given file.
+ # -----------------------------------------------------------------------------
+
+ def validate_file(self,filename):
+ import os.path
+ base,ext = os.path.splitext(filename)
+ if ext != '.py': return # No idea what the file is. Return OK
+
+ try:
+ f = open(filename)
+ lines = f.readlines()
+ f.close()
+ except IOError:
+ return # Couldn't find the file. Don't worry about it
+
+ fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
+ sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
+
+ counthash = { }
+ linen = 1
+ for l in lines:
+ m = fre.match(l)
+ if not m:
+ m = sre.match(l)
+ if m:
+ name = m.group(1)
+ prev = counthash.get(name)
+ if not prev:
+ counthash[name] = linen
+ else:
+ self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
+ self.error = 1
+ linen += 1
+
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
-def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0):
+def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
- error = 0
- files = { }
lexobj = Lexer()
- lexobj.lexdebug = debug
lexobj.lexoptimize = optimize
global token,input
- if nowarn: warn = 0
- else: warn = 1
+ if errorlog is None:
+ errorlog = PlyLogger(sys.stderr)
+
+ if debug:
+ if debuglog is None:
+ debuglog = PlyLogger(sys.stderr)
+ # Get the module dictionary used for the lexer
if object: module = object
if module:
- # User supplied a module object.
- if isinstance(module, types.ModuleType):
- ldict = module.__dict__
- elif isinstance(module, _INSTANCETYPE):
- _items = [(k,getattr(module,k)) for k in dir(module)]
- ldict = { }
- for (i,v) in _items:
- ldict[i] = v
- else:
- raise ValueError,"Expected a module or instance"
- lexobj.lexmodule = module
-
+ _items = [(k,getattr(module,k)) for k in dir(module)]
+ ldict = dict(_items)
else:
- # No module given. We might be able to get information from the caller.
- try:
- raise RuntimeError
- except RuntimeError:
- e,b,t = sys.exc_info()
- f = t.tb_frame
- f = f.f_back # Walk out to our calling function
- ldict = f.f_globals # Grab its globals dictionary
+ ldict = get_caller_module_dict(2)
+
+ # Collect parser information from the dictionary
+ linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
+ linfo.get_all()
+ if not optimize:
+ if linfo.validate_all():
+ raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
@@ -530,280 +904,94 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
except ImportError:
pass
- # Get the tokens, states, and literals variables (if any)
- if (module and isinstance(module,_INSTANCETYPE)):
- tokens = getattr(module,"tokens",None)
- states = getattr(module,"states",None)
- literals = getattr(module,"literals","")
- else:
- tokens = ldict.get("tokens",None)
- states = ldict.get("states",None)
- literals = ldict.get("literals","")
-
- if not tokens:
- raise SyntaxError,"lex: module does not define 'tokens'"
- if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
- raise SyntaxError,"lex: tokens must be a list or tuple."
+ # Dump some basic debugging information
+ if debug:
+ debuglog.info("lex: tokens = %r", linfo.tokens)
+ debuglog.info("lex: literals = %r", linfo.literals)
+ debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
- if not optimize:
- for n in tokens:
- if not _is_identifier.match(n):
- print >>sys.stderr, "lex: Bad token name '%s'" % n
- error = 1
- if warn and lexobj.lextokens.has_key(n):
- print >>sys.stderr, "lex: Warning. Token '%s' multiply defined." % n
- lexobj.lextokens[n] = None
- else:
- for n in tokens: lexobj.lextokens[n] = None
-
- if debug:
- print "lex: tokens = '%s'" % lexobj.lextokens.keys()
+ for n in linfo.tokens:
+ lexobj.lextokens[n] = 1
- try:
- for c in literals:
- if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)) or len(c) > 1:
- print >>sys.stderr, "lex: Invalid literal %s. Must be a single character" % repr(c)
- error = 1
- continue
-
- except TypeError:
- print >>sys.stderr, "lex: Invalid literals specification. literals must be a sequence of characters."
- error = 1
-
- lexobj.lexliterals = literals
-
- # Build statemap
- if states:
- if not (isinstance(states,types.TupleType) or isinstance(states,types.ListType)):
- print >>sys.stderr, "lex: states must be defined as a tuple or list."
- error = 1
- else:
- for s in states:
- if not isinstance(s,types.TupleType) or len(s) != 2:
- print >>sys.stderr, "lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s)
- error = 1
- continue
- name, statetype = s
- if not isinstance(name,types.StringType):
- print >>sys.stderr, "lex: state name %s must be a string" % repr(name)
- error = 1
- continue
- if not (statetype == 'inclusive' or statetype == 'exclusive'):
- print >>sys.stderr, "lex: state type for state %s must be 'inclusive' or 'exclusive'" % name
- error = 1
- continue
- if stateinfo.has_key(name):
- print >>sys.stderr, "lex: state '%s' already defined." % name
- error = 1
- continue
- stateinfo[name] = statetype
-
- # Get a list of symbols with the t_ or s_ prefix
- tsymbols = [f for f in ldict.keys() if f[:2] == 't_' ]
-
- # Now build up a list of functions and a list of strings
-
- funcsym = { } # Symbols defined as functions
- strsym = { } # Symbols defined as strings
- toknames = { } # Mapping of symbols to token names
-
- for s in stateinfo.keys():
- funcsym[s] = []
- strsym[s] = []
-
- ignore = { } # Ignore strings by state
- errorf = { } # Error functions by state
-
- if len(tsymbols) == 0:
- raise SyntaxError,"lex: no rules of the form t_rulename are defined."
-
- for f in tsymbols:
- t = ldict[f]
- states, tokname = _statetoken(f,stateinfo)
- toknames[f] = tokname
-
- if callable(t):
- for s in states: funcsym[s].append((f,t))
- elif (isinstance(t, types.StringType) or isinstance(t,types.UnicodeType)):
- for s in states: strsym[s].append((f,t))
- else:
- print >>sys.stderr, "lex: %s not defined as a function or string" % f
- error = 1
-
- # Sort the functions by line number
- for f in funcsym.values():
- f.sort(lambda x,y: cmp(x[1].func_code.co_firstlineno,y[1].func_code.co_firstlineno))
+ # Get literals specification
+ if isinstance(linfo.literals,(list,tuple)):
+ lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
+ else:
+ lexobj.lexliterals = linfo.literals
- # Sort the strings by regular expression length
- for s in strsym.values():
- s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
+ # Get the stateinfo dictionary
+ stateinfo = linfo.stateinfo
regexs = { }
-
# Build the master regular expressions
- for state in stateinfo.keys():
+ for state in stateinfo:
regex_list = []
# Add rules defined by functions first
- for fname, f in funcsym[state]:
- line = f.func_code.co_firstlineno
- file = f.func_code.co_filename
- files[file] = None
- tokname = toknames[fname]
-
- ismethod = isinstance(f, types.MethodType)
-
- if not optimize:
- nargs = f.func_code.co_argcount
- if ismethod:
- reqargs = 2
- else:
- reqargs = 1
- if nargs > reqargs:
- print >>sys.stderr, "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__)
- error = 1
- continue
-
- if nargs < reqargs:
- print >>sys.stderr, "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__)
- error = 1
- continue
-
- if tokname == 'ignore':
- print >>sys.stderr, "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__)
- error = 1
- continue
-
- if tokname == 'error':
- errorf[state] = f
- continue
-
- if f.__doc__:
- if not optimize:
- try:
- c = re.compile("(?P<%s>%s)" % (f.__name__,f.__doc__), re.VERBOSE | reflags)
- if c.match(""):
- print >>sys.stderr, "%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__)
- error = 1
- continue
- except re.error,e:
- print >>sys.stderr, "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e)
- if '#' in f.__doc__:
- print >>sys.stderr, "%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__)
- error = 1
- continue
-
- if debug:
- print "lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state)
-
- # Okay. The regular expression seemed okay. Let's append it to the master regular
- # expression we're building
-
- regex_list.append("(?P<%s>%s)" % (f.__name__,f.__doc__))
- else:
- print >>sys.stderr, "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__)
+ for fname, f in linfo.funcsym[state]:
+ line = func_code(f).co_firstlineno
+ file = func_code(f).co_filename
+ regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
+ if debug:
+ debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
- for name,r in strsym[state]:
- tokname = toknames[name]
-
- if tokname == 'ignore':
- if "\\" in r:
- print >>sys.stderr, "lex: Warning. %s contains a literal backslash '\\'" % name
- ignore[state] = r
- continue
-
- if not optimize:
- if tokname == 'error':
- raise SyntaxError,"lex: Rule '%s' must be defined as a function" % name
- error = 1
- continue
-
- if not lexobj.lextokens.has_key(tokname) and tokname.find("ignore_") < 0:
- print >>sys.stderr, "lex: Rule '%s' defined for an unspecified token %s." % (name,tokname)
- error = 1
- continue
- try:
- c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | reflags)
- if (c.match("")):
- print >>sys.stderr, "lex: Regular expression for rule '%s' matches empty string." % name
- error = 1
- continue
- except re.error,e:
- print >>sys.stderr, "lex: Invalid regular expression for rule '%s'. %s" % (name,e)
- if '#' in r:
- print >>sys.stderr, "lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name
-
- error = 1
- continue
- if debug:
- print "lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state)
-
+ for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
-
- if not regex_list:
- print >>sys.stderr, "lex: No rules defined for state '%s'" % state
- error = 1
+ if debug:
+ debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
-
- if not optimize:
- for f in files.keys():
- if not _validate_file(f):
- error = 1
-
- if error:
- raise SyntaxError,"lex: Unable to build lexer."
-
- # From this point forward, we're reasonably confident that we can build the lexer.
- # No more errors will be generated, but there might be some warning messages.
-
# Build the master regular expressions
- for state in regexs.keys():
- lexre, re_text = _form_master_re(regexs[state],reflags,ldict,toknames)
+ if debug:
+ debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
+
+ for state in regexs:
+ lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
+ lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
- print "lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i])
+ debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
- # For inclusive states, we need to add the INITIAL state
- for state,type in stateinfo.items():
- if state != "INITIAL" and type == 'inclusive':
+ # For inclusive states, we need to add the regular expressions from the INITIAL state
+ for state,stype in stateinfo.items():
+ if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
+ lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
# Set up ignore variables
- lexobj.lexstateignore = ignore
+ lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
- lexobj.lexstateerrorf = errorf
- lexobj.lexerrorf = errorf.get("INITIAL",None)
- if warn and not lexobj.lexerrorf:
- print >>sys.stderr, "lex: Warning. no t_error rule is defined."
+ lexobj.lexstateerrorf = linfo.errorf
+ lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
+ if not lexobj.lexerrorf:
+ errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
- if warn and not errorf.has_key(s):
- print >>sys.stderr, "lex: Warning. no error rule is defined for exclusive state '%s'" % s
- if warn and not ignore.has_key(s) and lexobj.lexignore:
- print >>sys.stderr, "lex: Warning. no ignore rule is defined for exclusive state '%s'" % s
+ if not s in linfo.errorf:
+ errorlog.warning("No error rule is defined for exclusive state '%s'", s)
+ if not s in linfo.ignore and lexobj.lexignore:
+ errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
- if not errorf.has_key(s):
- errorf[s] = errorf.get("INITIAL",None)
- if not ignore.has_key(s):
- ignore[s] = ignore.get("INITIAL","")
-
+ if not s in linfo.errorf:
+ linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
+ if not s in linfo.ignore:
+ linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
@@ -812,7 +1000,7 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
# If in optimize mode, we write the lextab
if lextab and optimize:
- lexobj.writetab(lextab)
+ lexobj.writetab(lextab,outputdir)
return lexobj
@@ -830,7 +1018,7 @@ def runmain(lexer=None,data=None):
data = f.read()
f.close()
except IndexError:
- print "Reading from standard input (type EOF to end):"
+ sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
@@ -846,8 +1034,7 @@ def runmain(lexer=None,data=None):
while 1:
tok = _token()
if not tok: break
- print "(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos)
-
+ sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
@@ -858,7 +1045,10 @@ def runmain(lexer=None,data=None):
def TOKEN(r):
def set_doc(f):
- f.__doc__ = r
+ if hasattr(r,"__call__"):
+ f.__doc__ = r.__doc__
+ else:
+ f.__doc__ = r
return f
return set_doc
diff --git a/ext/ply/ply/yacc.py b/ext/ply/ply/yacc.py
index 39c17a9ed..3bf6e8e4d 100644
--- a/ext/ply/ply/yacc.py
+++ b/ext/ply/ply/yacc.py
@@ -1,26 +1,35 @@
-#-----------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
# ply: yacc.py
#
-# Author(s): David M. Beazley (dave@dabeaz.com)
-#
-# Copyright (C) 2001-2007, David M. Beazley
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-# See the file COPYING for a complete copy of the LGPL.
-#
+# Copyright (C) 2001-2009,
+# David M. Beazley (Dabeaz LLC)
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of the David Beazley or Dabeaz LLC may be used to
+# endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
@@ -50,7 +59,8 @@
# own risk!
# ----------------------------------------------------------------------------
-__version__ = "2.3"
+__version__ = "3.2"
+__tabversion__ = "3.2" # Table version
#-----------------------------------------------------------------------------
# === User configurable parameters ===
@@ -67,20 +77,86 @@ default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
-import re, types, sys, cStringIO, md5, os.path
+yaccdevel = 0 # Set to True if developing yacc. This turns off optimized
+ # implementations of certain functions.
-# Exception raised for yacc-related errors
-class YaccError(Exception): pass
+resultlimit = 40 # Size limit of results when running in debug mode.
+
+pickle_protocol = 0 # Protocol to use when writing pickle files
-# Available instance types. This is used when parsers are defined by a class.
-# it's a little funky because I want to preserve backwards compatibility
-# with Python 2.0 where types.ObjectType is undefined.
+import re, types, sys, os.path
+# Compatibility function for python 2.6/3.0
+if sys.version_info[0] < 3:
+ def func_code(f):
+ return f.func_code
+else:
+ def func_code(f):
+ return f.__code__
+
+# Compatibility
try:
- _INSTANCETYPE = (types.InstanceType, types.ObjectType)
+ MAXINT = sys.maxint
except AttributeError:
- _INSTANCETYPE = types.InstanceType
- class object: pass # Note: needed if no new-style classes present
+ MAXINT = sys.maxsize
+
+# Python 2.x/3.0 compatibility.
+def load_ply_lex():
+ if sys.version_info[0] < 3:
+ import lex
+ else:
+ import ply.lex as lex
+ return lex
+
+# This object is a stand-in for a logging object created by the
+# logging module. PLY will use this by default to create things
+# such as the parser.out file. If a user wants more detailed
+# information, they can create their own logging object and pass
+# it into PLY.
+
+class PlyLogger(object):
+ def __init__(self,f):
+ self.f = f
+ def debug(self,msg,*args,**kwargs):
+ self.f.write((msg % args) + "\n")
+ info = debug
+
+ def warning(self,msg,*args,**kwargs):
+ self.f.write("WARNING: "+ (msg % args) + "\n")
+
+ def error(self,msg,*args,**kwargs):
+ self.f.write("ERROR: " + (msg % args) + "\n")
+
+ critical = debug
+
+# Null logger is used when no output is generated. Does nothing.
+class NullLogger(object):
+ def __getattribute__(self,name):
+ return self
+ def __call__(self,*args,**kwargs):
+ return self
+
+# Exception raised for yacc-related errors
+class YaccError(Exception): pass
+
+# Format the result message that the parser produces when running in debug mode.
+def format_result(r):
+ repr_str = repr(r)
+ if '\n' in repr_str: repr_str = repr(repr_str)
+ if len(repr_str) > resultlimit:
+ repr_str = repr_str[:resultlimit]+" ..."
+ result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
+ return result
+
+
+# Format stack entries when the parser is running in debug mode
+def format_stack_entry(r):
+ repr_str = repr(r)
+ if '\n' in repr_str: repr_str = repr(repr_str)
+ if len(repr_str) < 16:
+ return repr_str
+ else:
+ return "<%s @ 0x%x>" % (type(r).__name__,id(r))
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
@@ -99,7 +175,7 @@ except AttributeError:
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
-class YaccSymbol(object):
+class YaccSymbol:
def __str__(self): return self.type
def __repr__(self): return str(self)
@@ -115,8 +191,9 @@ class YaccSymbol(object):
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
- self.pbstack = []
self.stack = stack
+ self.lexer = None
+ self.parser= None
def __getitem__(self,n):
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
@@ -133,6 +210,9 @@ class YaccProduction:
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
+ def set_lineno(self,n,lineno):
+ self.slice[n].lineno = n
+
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
@@ -146,35 +226,22 @@ class YaccProduction:
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
- def pushback(self,n):
- if n <= 0:
- raise ValueError, "Expected a positive value"
- if n > (len(self.slice)-1):
- raise ValueError, "Can't push %d tokens. Only %d are available." % (n,len(self.slice)-1)
- for i in range(0,n):
- self.pbstack.append(self.slice[-i-1])
+ def error(self):
+ raise SyntaxError
-# The LR Parsing engine. This is defined as a class so that multiple parsers
-# can exist in the same process. A user never instantiates this directly.
-# Instead, the global yacc() function should be used to create a suitable Parser
-# object.
-class Parser:
- def __init__(self,magic=None):
-
- # This is a hack to keep users from trying to instantiate a Parser
- # object directly.
-
- if magic != "xyzzy":
- raise YaccError, "Can't instantiate Parser. Use yacc() instead."
+# -----------------------------------------------------------------------------
+# == LRParser ==
+#
+# The LR Parsing engine.
+# -----------------------------------------------------------------------------
- # Reset internal state
- self.productions = None # List of productions
- self.errorfunc = None # Error handling function
- self.action = { } # LR Action table
- self.goto = { } # LR goto table
- self.require = { } # Attribute require table
- self.method = "Unknown LR" # Table construction method used
+class LRParser:
+ def __init__(self,lrtab,errorf):
+ self.productions = lrtab.lr_productions
+ self.action = lrtab.lr_action
+ self.goto = lrtab.lr_goto
+ self.errorfunc = errorf
def errok(self):
self.errorok = 1
@@ -187,29 +254,64 @@ class Parser:
self.symstack.append(sym)
self.statestack.append(0)
- def parse(self,input=None,lexer=None,debug=0,tracking=0):
+ def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
+ if debug or yaccdevel:
+ if isinstance(debug,int):
+ debug = PlyLogger(sys.stderr)
+ return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
+ elif tracking:
+ return self.parseopt(input,lexer,debug,tracking,tokenfunc)
+ else:
+ return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
+
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # parsedebug().
+ #
+ # This is the debugging enabled version of parse(). All changes made to the
+ # parsing engine should be made here. For the non-debugging version,
+ # copy this code to a method parseopt() and delete all of the sections
+ # enclosed in:
+ #
+ # #--! DEBUG
+ # statements
+ # #--! DEBUG
+ #
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
- actions = self.action # Local reference to action table
- goto = self.goto # Local reference to goto table
- prod = self.productions # Local reference to production list
+ actions = self.action # Local reference to action table (to avoid lookup on self.)
+ goto = self.goto # Local reference to goto table (to avoid lookup on self.)
+ prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
- errorcount = 0 # Used during error recovery
+ errorcount = 0 # Used during error recovery
+
+ # --! DEBUG
+ debug.info("PLY: PARSE DEBUG START")
+ # --! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
- import lex
+ lex = load_ply_lex()
lexer = lex.lexer
+ # Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
- if input:
+ if input is not None:
lexer.input(input)
- # Tokenize function
- get_token = lexer.token
+ if tokenfunc is None:
+ # Tokenize function
+ get_token = lexer.token
+ else:
+ get_token = tokenfunc
+
+ # Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
@@ -223,15 +325,19 @@ class Parser:
statestack.append(0)
sym = YaccSymbol()
- sym.type = '$end'
+ sym.type = "$end"
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
- if debug > 1:
- print 'state', state
+
+ # --! DEBUG
+ debug.debug('')
+ debug.debug('State : %s', state)
+ # --! DEBUG
+
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
@@ -239,27 +345,27 @@ class Parser:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
- lookahead.type = '$end'
- if debug:
- errorlead = ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()
+ lookahead.type = "$end"
+
+ # --! DEBUG
+ debug.debug('Stack : %s',
+ ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
+ # --! DEBUG
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
- if debug > 1:
- print 'action', t
if t is not None:
if t > 0:
# shift a symbol on the stack
- if ltype == '$end':
- # Error, end of input
- sys.stderr.write("yacc: Parse error. EOF\n")
- return
statestack.append(t)
state = t
- if debug > 1:
- sys.stderr.write("%-60s shift state %s\n" % (errorlead, t))
+
+ # --! DEBUG
+ debug.debug("Action : Shift and goto state %s", t)
+ # --! DEBUG
+
symstack.append(lookahead)
lookahead = None
@@ -277,12 +383,20 @@ class Parser:
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
- if debug > 1:
- sys.stderr.write("%-60s reduce %d\n" % (errorlead, -t))
+
+ # --! DEBUG
+ if plen:
+ debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
+ else:
+ debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
+
+ # --! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
+
+ # --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
@@ -290,38 +404,368 @@ class Parser:
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
- del symstack[-plen:]
- del statestack[-plen:]
+
+ # --! TRACKING
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # below as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ del symstack[-plen:]
+ del statestack[-plen:]
+ p.callable(pslice)
+ # --! DEBUG
+ debug.info("Result : %s", format_result(pslice[0]))
+ # --! DEBUG
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = 0
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
else:
+
+ # --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
+ # --! TRACKING
+
targ = [ sym ]
- pslice.slice = targ
- # Call the grammar rule with our special slice object
- p.func(pslice)
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # above as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ p.callable(pslice)
+ # --! DEBUG
+ debug.info("Result : %s", format_result(pslice[0]))
+ # --! DEBUG
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = 0
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ if t == 0:
+ n = symstack[-1]
+ result = getattr(n,"value",None)
+ # --! DEBUG
+ debug.info("Done : Returning %s", format_result(result))
+ debug.info("PLY: PARSE DEBUG END")
+ # --! DEBUG
+ return result
+
+ if t == None:
+
+ # --! DEBUG
+ debug.error('Error : %s',
+ ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
+ # --! DEBUG
+
+ # We have some kind of parsing error here. To handle
+ # this, we are going to push the current token onto
+ # the tokenstack and replace it with an 'error' token.
+ # If there are any synchronization rules, they may
+ # catch it.
+ #
+ # In addition to pushing the error token, we call call
+ # the user defined p_error() function if this is the
+ # first syntax error. This function is only called if
+ # errorcount == 0.
+ if errorcount == 0 or self.errorok:
+ errorcount = error_count
+ self.errorok = 0
+ errtoken = lookahead
+ if errtoken.type == "$end":
+ errtoken = None # End of file!
+ if self.errorfunc:
+ global errok,token,restart
+ errok = self.errok # Set some special functions available in error recovery
+ token = get_token
+ restart = self.restart
+ if errtoken and not hasattr(errtoken,'lexer'):
+ errtoken.lexer = lexer
+ tok = self.errorfunc(errtoken)
+ del errok, token, restart # Delete special functions
- # If there was a pushback, put that on the stack
- if pslice.pbstack:
- lookaheadstack.append(lookahead)
- for _t in pslice.pbstack:
- lookaheadstack.append(_t)
+ if self.errorok:
+ # User must have done some kind of panic
+ # mode recovery on their own. The
+ # returned token is the next lookahead
+ lookahead = tok
+ errtoken = None
+ continue
+ else:
+ if errtoken:
+ if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
+ else: lineno = 0
+ if lineno:
+ sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
+ else:
+ sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
+ else:
+ sys.stderr.write("yacc: Parse error in input. EOF\n")
+ return
+
+ else:
+ errorcount = error_count
+
+ # case 1: the statestack only has 1 entry on it. If we're in this state, the
+ # entire parse has been rolled back and we're completely hosed. The token is
+ # discarded and we just keep going.
+
+ if len(statestack) <= 1 and lookahead.type != "$end":
+ lookahead = None
+ errtoken = None
+ state = 0
+ # Nuke the pushback stack
+ del lookaheadstack[:]
+ continue
+
+ # case 2: the statestack has a couple of entries on it, but we're
+ # at the end of the file. nuke the top entry and generate an error token
+
+ # Start nuking entries on the stack
+ if lookahead.type == "$end":
+ # Whoa. We're really hosed here. Bail out
+ return
+
+ if lookahead.type != 'error':
+ sym = symstack[-1]
+ if sym.type == 'error':
+ # Hmmm. Error is on top of stack, we'll just nuke input
+ # symbol and continue
lookahead = None
- pslice.pbstack = []
+ continue
+ t = YaccSymbol()
+ t.type = 'error'
+ if hasattr(lookahead,"lineno"):
+ t.lineno = lookahead.lineno
+ t.value = lookahead
+ lookaheadstack.append(lookahead)
+ lookahead = t
+ else:
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1] # Potential bug fix
+
+ continue
+
+ # Call an error function here
+ raise RuntimeError("yacc: internal parser error!!!\n")
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # parseopt().
+ #
+ # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY.
+ # Edit the debug version above, then copy any modifications to the method
+ # below while removing #--! DEBUG sections.
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+
+ def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
+ lookahead = None # Current lookahead symbol
+ lookaheadstack = [ ] # Stack of lookahead symbols
+ actions = self.action # Local reference to action table (to avoid lookup on self.)
+ goto = self.goto # Local reference to goto table (to avoid lookup on self.)
+ prod = self.productions # Local reference to production list (to avoid lookup on self.)
+ pslice = YaccProduction(None) # Production object passed to grammar rules
+ errorcount = 0 # Used during error recovery
+
+ # If no lexer was given, we will try to use the lex module
+ if not lexer:
+ lex = load_ply_lex()
+ lexer = lex.lexer
+
+ # Set up the lexer and parser objects on pslice
+ pslice.lexer = lexer
+ pslice.parser = self
+
+ # If input was supplied, pass to lexer
+ if input is not None:
+ lexer.input(input)
+
+ if tokenfunc is None:
+ # Tokenize function
+ get_token = lexer.token
+ else:
+ get_token = tokenfunc
+
+ # Set up the state and symbol stacks
+
+ statestack = [ ] # Stack of parsing states
+ self.statestack = statestack
+ symstack = [ ] # Stack of grammar symbols
+ self.symstack = symstack
+
+ pslice.stack = symstack # Put in the production
+ errtoken = None # Err token
+
+ # The start state is assumed to be (0,$end)
+
+ statestack.append(0)
+ sym = YaccSymbol()
+ sym.type = '$end'
+ symstack.append(sym)
+ state = 0
+ while 1:
+ # Get the next symbol on the input. If a lookahead symbol
+ # is already set, we just use that. Otherwise, we'll pull
+ # the next token off of the lookaheadstack or from the lexer
+
+ if not lookahead:
+ if not lookaheadstack:
+ lookahead = get_token() # Get the next token
+ else:
+ lookahead = lookaheadstack.pop()
+ if not lookahead:
+ lookahead = YaccSymbol()
+ lookahead.type = '$end'
- symstack.append(sym)
- state = goto[statestack[-1]][pname]
- statestack.append(state)
+ # Check the action table
+ ltype = lookahead.type
+ t = actions[state].get(ltype)
+
+ if t is not None:
+ if t > 0:
+ # shift a symbol on the stack
+ statestack.append(t)
+ state = t
+
+ symstack.append(lookahead)
+ lookahead = None
+
+ # Decrease error count on successful shift
+ if errorcount: errorcount -=1
continue
+ if t < 0:
+ # reduce a symbol on the stack, emit a production
+ p = prod[-t]
+ pname = p.name
+ plen = p.len
+
+ # Get production function
+ sym = YaccSymbol()
+ sym.type = pname # Production name
+ sym.value = None
+
+ if plen:
+ targ = symstack[-plen-1:]
+ targ[0] = sym
+
+ # --! TRACKING
+ if tracking:
+ t1 = targ[1]
+ sym.lineno = t1.lineno
+ sym.lexpos = t1.lexpos
+ t1 = targ[-1]
+ sym.endlineno = getattr(t1,"endlineno",t1.lineno)
+ sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
+
+ # --! TRACKING
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # below as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ del symstack[-plen:]
+ del statestack[-plen:]
+ p.callable(pslice)
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = 0
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ else:
+
+ # --! TRACKING
+ if tracking:
+ sym.lineno = lexer.lineno
+ sym.lexpos = lexer.lexpos
+ # --! TRACKING
+
+ targ = [ sym ]
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # above as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ p.callable(pslice)
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = 0
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
- if debug:
- sys.stderr.write(errorlead + "\n")
+
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
@@ -343,6 +787,8 @@ class Parser:
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
+ if errtoken and not hasattr(errtoken,'lexer'):
+ errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
@@ -375,6 +821,7 @@ class Parser:
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
+ state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
@@ -404,1086 +851,1033 @@ class Parser:
else:
symstack.pop()
statestack.pop()
+ state = statestack[-1] # Potential bug fix
continue
# Call an error function here
- raise RuntimeError, "yacc: internal parser error!!!\n"
+ raise RuntimeError("yacc: internal parser error!!!\n")
-# -----------------------------------------------------------------------------
-# === Parser Construction ===
-#
-# The following functions and variables are used to implement the yacc() function
-# itself. This is pretty hairy stuff involving lots of error checking,
-# construction of LR items, kernels, and so forth. Although a lot of
-# this work is done using global variables, the resulting Parser object
-# is completely self contained--meaning that it is safe to repeatedly
-# call yacc() with different grammars in the same application.
-# -----------------------------------------------------------------------------
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # parseopt_notrack().
+ #
+ # Optimized version of parseopt() with line number tracking removed.
+ # DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
+ # code in the #--! TRACKING sections
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-# -----------------------------------------------------------------------------
-# validate_file()
-#
-# This function checks to see if there are duplicated p_rulename() functions
-# in the parser module file. Without this function, it is really easy for
-# users to make mistakes by cutting and pasting code fragments (and it's a real
-# bugger to try and figure out why the resulting parser doesn't work). Therefore,
-# we just do a little regular expression pattern matching of def statements
-# to try and detect duplicates.
-# -----------------------------------------------------------------------------
+ def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
+ lookahead = None # Current lookahead symbol
+ lookaheadstack = [ ] # Stack of lookahead symbols
+ actions = self.action # Local reference to action table (to avoid lookup on self.)
+ goto = self.goto # Local reference to goto table (to avoid lookup on self.)
+ prod = self.productions # Local reference to production list (to avoid lookup on self.)
+ pslice = YaccProduction(None) # Production object passed to grammar rules
+ errorcount = 0 # Used during error recovery
+
+ # If no lexer was given, we will try to use the lex module
+ if not lexer:
+ lex = load_ply_lex()
+ lexer = lex.lexer
+
+ # Set up the lexer and parser objects on pslice
+ pslice.lexer = lexer
+ pslice.parser = self
-def validate_file(filename):
- base,ext = os.path.splitext(filename)
- if ext != '.py': return 1 # No idea. Assume it's okay.
+ # If input was supplied, pass to lexer
+ if input is not None:
+ lexer.input(input)
- try:
- f = open(filename)
- lines = f.readlines()
- f.close()
- except IOError:
- return 1 # Oh well
-
- # Match def p_funcname(
- fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
- counthash = { }
- linen = 1
- noerror = 1
- for l in lines:
- m = fre.match(l)
- if m:
- name = m.group(1)
- prev = counthash.get(name)
- if not prev:
- counthash[name] = linen
- else:
- sys.stderr.write("%s:%d: Function %s redefined. Previously defined on line %d\n" % (filename,linen,name,prev))
- noerror = 0
- linen += 1
- return noerror
-
-# This function looks for functions that might be grammar rules, but which don't have the proper p_suffix.
-def validate_dict(d):
- for n,v in d.items():
- if n[0:2] == 'p_' and type(v) in (types.FunctionType, types.MethodType): continue
- if n[0:2] == 't_': continue
-
- if n[0:2] == 'p_':
- sys.stderr.write("yacc: Warning. '%s' not defined as a function\n" % n)
- if 1 and isinstance(v,types.FunctionType) and v.func_code.co_argcount == 1:
- try:
- doc = v.__doc__.split(" ")
- if doc[1] == ':':
- sys.stderr.write("%s:%d: Warning. Possible grammar rule '%s' defined without p_ prefix.\n" % (v.func_code.co_filename, v.func_code.co_firstlineno,n))
- except StandardError:
- pass
+ if tokenfunc is None:
+ # Tokenize function
+ get_token = lexer.token
+ else:
+ get_token = tokenfunc
-# -----------------------------------------------------------------------------
-# === GRAMMAR FUNCTIONS ===
-#
-# The following global variables and functions are used to store, manipulate,
-# and verify the grammar rules specified by the user.
-# -----------------------------------------------------------------------------
+ # Set up the state and symbol stacks
-# Initialize all of the global variables used during grammar construction
-def initialize_vars():
- global Productions, Prodnames, Prodmap, Terminals
- global Nonterminals, First, Follow, Precedence, LRitems
- global Errorfunc, Signature, Requires
+ statestack = [ ] # Stack of parsing states
+ self.statestack = statestack
+ symstack = [ ] # Stack of grammar symbols
+ self.symstack = symstack
- Productions = [None] # A list of all of the productions. The first
- # entry is always reserved for the purpose of
- # building an augmented grammar
+ pslice.stack = symstack # Put in the production
+ errtoken = None # Err token
+
+ # The start state is assumed to be (0,$end)
+
+ statestack.append(0)
+ sym = YaccSymbol()
+ sym.type = '$end'
+ symstack.append(sym)
+ state = 0
+ while 1:
+ # Get the next symbol on the input. If a lookahead symbol
+ # is already set, we just use that. Otherwise, we'll pull
+ # the next token off of the lookaheadstack or from the lexer
+
+ if not lookahead:
+ if not lookaheadstack:
+ lookahead = get_token() # Get the next token
+ else:
+ lookahead = lookaheadstack.pop()
+ if not lookahead:
+ lookahead = YaccSymbol()
+ lookahead.type = '$end'
+
+ # Check the action table
+ ltype = lookahead.type
+ t = actions[state].get(ltype)
+
+ if t is not None:
+ if t > 0:
+ # shift a symbol on the stack
+ statestack.append(t)
+ state = t
+
+ symstack.append(lookahead)
+ lookahead = None
+
+ # Decrease error count on successful shift
+ if errorcount: errorcount -=1
+ continue
+
+ if t < 0:
+ # reduce a symbol on the stack, emit a production
+ p = prod[-t]
+ pname = p.name
+ plen = p.len
+
+ # Get production function
+ sym = YaccSymbol()
+ sym.type = pname # Production name
+ sym.value = None
+
+ if plen:
+ targ = symstack[-plen-1:]
+ targ[0] = sym
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # below as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ del symstack[-plen:]
+ del statestack[-plen:]
+ p.callable(pslice)
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = 0
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ else:
+
+ targ = [ sym ]
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # above as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ p.callable(pslice)
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = 0
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ if t == 0:
+ n = symstack[-1]
+ return getattr(n,"value",None)
+
+ if t == None:
+
+ # We have some kind of parsing error here. To handle
+ # this, we are going to push the current token onto
+ # the tokenstack and replace it with an 'error' token.
+ # If there are any synchronization rules, they may
+ # catch it.
+ #
+ # In addition to pushing the error token, we call call
+ # the user defined p_error() function if this is the
+ # first syntax error. This function is only called if
+ # errorcount == 0.
+ if errorcount == 0 or self.errorok:
+ errorcount = error_count
+ self.errorok = 0
+ errtoken = lookahead
+ if errtoken.type == '$end':
+ errtoken = None # End of file!
+ if self.errorfunc:
+ global errok,token,restart
+ errok = self.errok # Set some special functions available in error recovery
+ token = get_token
+ restart = self.restart
+ if errtoken and not hasattr(errtoken,'lexer'):
+ errtoken.lexer = lexer
+ tok = self.errorfunc(errtoken)
+ del errok, token, restart # Delete special functions
- Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
- # productions of that nonterminal.
+ if self.errorok:
+ # User must have done some kind of panic
+ # mode recovery on their own. The
+ # returned token is the next lookahead
+ lookahead = tok
+ errtoken = None
+ continue
+ else:
+ if errtoken:
+ if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
+ else: lineno = 0
+ if lineno:
+ sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
+ else:
+ sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
+ else:
+ sys.stderr.write("yacc: Parse error in input. EOF\n")
+ return
- Prodmap = { } # A dictionary that is only used to detect duplicate
- # productions.
+ else:
+ errorcount = error_count
- Terminals = { } # A dictionary mapping the names of terminal symbols to a
- # list of the rules where they are used.
+ # case 1: the statestack only has 1 entry on it. If we're in this state, the
+ # entire parse has been rolled back and we're completely hosed. The token is
+ # discarded and we just keep going.
- Nonterminals = { } # A dictionary mapping names of nonterminals to a list
- # of rule numbers where they are used.
+ if len(statestack) <= 1 and lookahead.type != '$end':
+ lookahead = None
+ errtoken = None
+ state = 0
+ # Nuke the pushback stack
+ del lookaheadstack[:]
+ continue
- First = { } # A dictionary of precomputed FIRST(x) symbols
+ # case 2: the statestack has a couple of entries on it, but we're
+ # at the end of the file. nuke the top entry and generate an error token
- Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
+ # Start nuking entries on the stack
+ if lookahead.type == '$end':
+ # Whoa. We're really hosed here. Bail out
+ return
- Precedence = { } # Precedence rules for each terminal. Contains tuples of the
- # form ('right',level) or ('nonassoc', level) or ('left',level)
+ if lookahead.type != 'error':
+ sym = symstack[-1]
+ if sym.type == 'error':
+ # Hmmm. Error is on top of stack, we'll just nuke input
+ # symbol and continue
+ lookahead = None
+ continue
+ t = YaccSymbol()
+ t.type = 'error'
+ if hasattr(lookahead,"lineno"):
+ t.lineno = lookahead.lineno
+ t.value = lookahead
+ lookaheadstack.append(lookahead)
+ lookahead = t
+ else:
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1] # Potential bug fix
- LRitems = [ ] # A list of all LR items for the grammar. These are the
- # productions with the "dot" like E -> E . PLUS E
+ continue
- Errorfunc = None # User defined error handler
+ # Call an error function here
+ raise RuntimeError("yacc: internal parser error!!!\n")
- Signature = md5.new() # Digital signature of the grammar rules, precedence
- # and other information. Used to determined when a
- # parsing table needs to be regenerated.
+# -----------------------------------------------------------------------------
+# === Grammar Representation ===
+#
+# The following functions, classes, and variables are used to represent and
+# manipulate the rules that make up a grammar.
+# -----------------------------------------------------------------------------
- Requires = { } # Requires list
+import re
- # File objects used when creating the parser.out debugging file
- global _vf, _vfc
- _vf = cStringIO.StringIO()
- _vfc = cStringIO.StringIO()
+# regex matching identifiers
+_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
-# It has a few required attributes:
+# A grammar rule refers to a specification such as this:
#
-# name - Name of the production (nonterminal)
-# prod - A list of symbols making up its production
+# expr : expr PLUS term
+#
+# Here are the basic attributes defined on all productions
+#
+# name - Name of the production. For example 'expr'
+# prod - A list of symbols on the right side ['expr','PLUS','term']
+# prec - Production precedence level
# number - Production number.
+# func - Function that executes on reduce
+# file - File where production function is defined
+# lineno - Line number where production function is defined
#
-# In addition, a few additional attributes are used to help with debugging or
-# optimization of table generation.
+# The following attributes are defined or optional.
#
-# file - File where production action is defined.
-# lineno - Line number where action is defined
-# func - Action function
-# prec - Precedence level
-# lr_next - Next LR item. Example, if we are ' E -> E . PLUS E'
-# then lr_next refers to 'E -> E PLUS . E'
-# lr_index - LR item index (location of the ".") in the prod list.
-# lookaheads - LALR lookahead symbols for this item
-# len - Length of the production (number of symbols on right hand side)
+# len - Length of the production (number of symbols on right hand side)
+# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
-class Production:
- def __init__(self,**kw):
- for k,v in kw.items():
- setattr(self,k,v)
- self.lr_index = -1
- self.lr0_added = 0 # Flag indicating whether or not added to LR0 closure
- self.lr1_added = 0 # Flag indicating whether or not added to LR1
- self.usyms = [ ]
- self.lookaheads = { }
- self.lk_added = { }
- self.setnumbers = [ ]
-
- def __str__(self):
+class Production(object):
+ reduced = 0
+ def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
+ self.name = name
+ self.prod = tuple(prod)
+ self.number = number
+ self.func = func
+ self.callable = None
+ self.file = file
+ self.line = line
+ self.prec = precedence
+
+ # Internal settings used during table construction
+
+ self.len = len(self.prod) # Length of the production
+
+ # Create a list of unique production symbols used in the production
+ self.usyms = [ ]
+ for s in self.prod:
+ if s not in self.usyms:
+ self.usyms.append(s)
+
+ # List of all LR items for the production
+ self.lr_items = []
+ self.lr_next = None
+
+ # Create a string representation
if self.prod:
- s = "%s -> %s" % (self.name," ".join(self.prod))
+ self.str = "%s -> %s" % (self.name," ".join(self.prod))
else:
- s = "%s -> <empty>" % self.name
- return s
+ self.str = "%s -> <empty>" % self.name
+
+ def __str__(self):
+ return self.str
def __repr__(self):
- return str(self)
+ return "Production("+str(self)+")"
+
+ def __len__(self):
+ return len(self.prod)
+
+ def __nonzero__(self):
+ return 1
- # Compute lr_items from the production
+ def __getitem__(self,index):
+ return self.prod[index]
+
+ # Return the nth lr_item from the production (or None if at the end)
def lr_item(self,n):
if n > len(self.prod): return None
- p = Production()
- p.name = self.name
- p.prod = list(self.prod)
- p.number = self.number
- p.lr_index = n
- p.lookaheads = { }
- p.setnumbers = self.setnumbers
- p.prod.insert(n,".")
- p.prod = tuple(p.prod)
- p.len = len(p.prod)
- p.usyms = self.usyms
-
- # Precompute list of productions immediately following
+ p = LRItem(self,n)
+
+ # Precompute the list of productions immediately following. Hack. Remove later
try:
- p.lrafter = Prodnames[p.prod[n+1]]
- except (IndexError,KeyError),e:
- p.lrafter = []
+ p.lr_after = Prodnames[p.prod[n+1]]
+ except (IndexError,KeyError):
+ p.lr_after = []
try:
- p.lrbefore = p.prod[n-1]
+ p.lr_before = p.prod[n-1]
except IndexError:
- p.lrbefore = None
+ p.lr_before = None
return p
+
+ # Bind the production function name to a callable
+ def bind(self,pdict):
+ if self.func:
+ self.callable = pdict[self.func]
+
+# This class serves as a minimal standin for Production objects when
+# reading table data from files. It only contains information
+# actually used by the LR parsing engine, plus some additional
+# debugging information.
+class MiniProduction(object):
+ def __init__(self,str,name,len,func,file,line):
+ self.name = name
+ self.len = len
+ self.func = func
+ self.callable = None
+ self.file = file
+ self.line = line
+ self.str = str
+ def __str__(self):
+ return self.str
+ def __repr__(self):
+ return "MiniProduction(%s)" % self.str
-class MiniProduction:
- pass
+ # Bind the production function name to a callable
+ def bind(self,pdict):
+ if self.func:
+ self.callable = pdict[self.func]
-# regex matching identifiers
-_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
-# add_production()
+# class LRItem
#
-# Given an action function, this function assembles a production rule.
-# The production rule is assumed to be found in the function's docstring.
-# This rule has the general syntax:
+# This class represents a specific stage of parsing a production rule. For
+# example:
#
-# name1 ::= production1
-# | production2
-# | production3
-# ...
-# | productionn
-# name2 ::= production1
-# | production2
-# ...
+# expr : expr . PLUS term
+#
+# In the above, the "." represents the current location of the parse. Here
+# basic attributes:
+#
+# name - Name of the production. For example 'expr'
+# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
+# number - Production number.
+#
+# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
+# then lr_next refers to 'expr -> expr PLUS . term'
+# lr_index - LR item index (location of the ".") in the prod list.
+# lookaheads - LALR lookahead symbols for this item
+# len - Length of the production (number of symbols on right hand side)
+# lr_after - List of all productions that immediately follow
+# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
-def add_production(f,file,line,prodname,syms):
-
- if Terminals.has_key(prodname):
- sys.stderr.write("%s:%d: Illegal rule name '%s'. Already defined as a token.\n" % (file,line,prodname))
- return -1
- if prodname == 'error':
- sys.stderr.write("%s:%d: Illegal rule name '%s'. error is a reserved word.\n" % (file,line,prodname))
- return -1
-
- if not _is_identifier.match(prodname):
- sys.stderr.write("%s:%d: Illegal rule name '%s'\n" % (file,line,prodname))
- return -1
-
- for x in range(len(syms)):
- s = syms[x]
- if s[0] in "'\"":
- try:
- c = eval(s)
- if (len(c) > 1):
- sys.stderr.write("%s:%d: Literal token %s in rule '%s' may only be a single character\n" % (file,line,s, prodname))
- return -1
- if not Terminals.has_key(c):
- Terminals[c] = []
- syms[x] = c
- continue
- except SyntaxError:
- pass
- if not _is_identifier.match(s) and s != '%prec':
- sys.stderr.write("%s:%d: Illegal name '%s' in rule '%s'\n" % (file,line,s, prodname))
- return -1
-
- # See if the rule is already in the rulemap
- map = "%s -> %s" % (prodname,syms)
- if Prodmap.has_key(map):
- m = Prodmap[map]
- sys.stderr.write("%s:%d: Duplicate rule %s.\n" % (file,line, m))
- sys.stderr.write("%s:%d: Previous definition at %s:%d\n" % (file,line, m.file, m.line))
- return -1
-
- p = Production()
- p.name = prodname
- p.prod = syms
- p.file = file
- p.line = line
- p.func = f
- p.number = len(Productions)
-
-
- Productions.append(p)
- Prodmap[map] = p
- if not Nonterminals.has_key(prodname):
- Nonterminals[prodname] = [ ]
-
- # Add all terminals to Terminals
- i = 0
- while i < len(p.prod):
- t = p.prod[i]
- if t == '%prec':
- try:
- precname = p.prod[i+1]
- except IndexError:
- sys.stderr.write("%s:%d: Syntax error. Nothing follows %%prec.\n" % (p.file,p.line))
- return -1
-
- prec = Precedence.get(precname,None)
- if not prec:
- sys.stderr.write("%s:%d: Nothing known about the precedence of '%s'\n" % (p.file,p.line,precname))
- return -1
- else:
- p.prec = prec
- del p.prod[i]
- del p.prod[i]
- continue
-
- if Terminals.has_key(t):
- Terminals[t].append(p.number)
- # Is a terminal. We'll assign a precedence to p based on this
- if not hasattr(p,"prec"):
- p.prec = Precedence.get(t,('right',0))
- else:
- if not Nonterminals.has_key(t):
- Nonterminals[t] = [ ]
- Nonterminals[t].append(p.number)
- i += 1
+class LRItem(object):
+ def __init__(self,p,n):
+ self.name = p.name
+ self.prod = list(p.prod)
+ self.number = p.number
+ self.lr_index = n
+ self.lookaheads = { }
+ self.prod.insert(n,".")
+ self.prod = tuple(self.prod)
+ self.len = len(self.prod)
+ self.usyms = p.usyms
- if not hasattr(p,"prec"):
- p.prec = ('right',0)
+ def __str__(self):
+ if self.prod:
+ s = "%s -> %s" % (self.name," ".join(self.prod))
+ else:
+ s = "%s -> <empty>" % self.name
+ return s
- # Set final length of productions
- p.len = len(p.prod)
- p.prod = tuple(p.prod)
+ def __repr__(self):
+ return "LRItem("+str(self)+")"
- # Calculate unique syms in the production
- p.usyms = [ ]
- for s in p.prod:
- if s not in p.usyms:
- p.usyms.append(s)
+# -----------------------------------------------------------------------------
+# rightmost_terminal()
+#
+# Return the rightmost terminal from a list of symbols. Used in add_production()
+# -----------------------------------------------------------------------------
+def rightmost_terminal(symbols, terminals):
+ i = len(symbols) - 1
+ while i >= 0:
+ if symbols[i] in terminals:
+ return symbols[i]
+ i -= 1
+ return None
- # Add to the global productions list
- try:
- Prodnames[p.name].append(p)
- except KeyError:
- Prodnames[p.name] = [ p ]
- return 0
+# -----------------------------------------------------------------------------
+# === GRAMMAR CLASS ===
+#
+# The following class represents the contents of the specified grammar along
+# with various computed properties such as first sets, follow sets, LR items, etc.
+# This data is used for critical parts of the table generation process later.
+# -----------------------------------------------------------------------------
-# Given a raw rule function, this function rips out its doc string
-# and adds rules to the grammar
+class GrammarError(YaccError): pass
-def add_function(f):
- line = f.func_code.co_firstlineno
- file = f.func_code.co_filename
- error = 0
+class Grammar(object):
+ def __init__(self,terminals):
+ self.Productions = [None] # A list of all of the productions. The first
+ # entry is always reserved for the purpose of
+ # building an augmented grammar
- if isinstance(f,types.MethodType):
- reqdargs = 2
- else:
- reqdargs = 1
-
- if f.func_code.co_argcount > reqdargs:
- sys.stderr.write("%s:%d: Rule '%s' has too many arguments.\n" % (file,line,f.__name__))
- return -1
-
- if f.func_code.co_argcount < reqdargs:
- sys.stderr.write("%s:%d: Rule '%s' requires an argument.\n" % (file,line,f.__name__))
- return -1
-
- if f.__doc__:
- # Split the doc string into lines
- pstrings = f.__doc__.splitlines()
- lastp = None
- dline = line
- for ps in pstrings:
- dline += 1
- p = ps.split()
- if not p: continue
- try:
- if p[0] == '|':
- # This is a continuation of a previous rule
- if not lastp:
- sys.stderr.write("%s:%d: Misplaced '|'.\n" % (file,dline))
- return -1
- prodname = lastp
- if len(p) > 1:
- syms = p[1:]
- else:
- syms = [ ]
- else:
- prodname = p[0]
- lastp = prodname
- assign = p[1]
- if len(p) > 2:
- syms = p[2:]
- else:
- syms = [ ]
- if assign != ':' and assign != '::=':
- sys.stderr.write("%s:%d: Syntax error. Expected ':'\n" % (file,dline))
- return -1
+ self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
+ # productions of that nonterminal.
+ self.Prodmap = { } # A dictionary that is only used to detect duplicate
+ # productions.
- e = add_production(f,file,dline,prodname,syms)
- error += e
+ self.Terminals = { } # A dictionary mapping the names of terminal symbols to a
+ # list of the rules where they are used.
+ for term in terminals:
+ self.Terminals[term] = []
- except StandardError:
- sys.stderr.write("%s:%d: Syntax error in rule '%s'\n" % (file,dline,ps))
- error -= 1
- else:
- sys.stderr.write("%s:%d: No documentation string specified in function '%s'\n" % (file,line,f.__name__))
- return error
-
-
-# Cycle checking code (Michael Dyck)
-
-def compute_reachable():
- '''
- Find each symbol that can be reached from the start symbol.
- Print a warning for any nonterminals that can't be reached.
- (Unused terminals have already had their warning.)
- '''
- Reachable = { }
- for s in Terminals.keys() + Nonterminals.keys():
- Reachable[s] = 0
-
- mark_reachable_from( Productions[0].prod[0], Reachable )
-
- for s in Nonterminals.keys():
- if not Reachable[s]:
- sys.stderr.write("yacc: Symbol '%s' is unreachable.\n" % s)
-
-def mark_reachable_from(s, Reachable):
- '''
- Mark all symbols that are reachable from symbol s.
- '''
- if Reachable[s]:
- # We've already reached symbol s.
- return
- Reachable[s] = 1
- for p in Prodnames.get(s,[]):
- for r in p.prod:
- mark_reachable_from(r, Reachable)
+ self.Terminals['error'] = []
-# -----------------------------------------------------------------------------
-# compute_terminates()
-#
-# This function looks at the various parsing rules and tries to detect
-# infinite recursion cycles (grammar rules where there is no possible way
-# to derive a string of only terminals).
-# -----------------------------------------------------------------------------
-def compute_terminates():
- '''
- Raise an error for any symbols that don't terminate.
- '''
- Terminates = {}
-
- # Terminals:
- for t in Terminals.keys():
- Terminates[t] = 1
-
- Terminates['$end'] = 1
-
- # Nonterminals:
-
- # Initialize to false:
- for n in Nonterminals.keys():
- Terminates[n] = 0
-
- # Then propagate termination until no change:
- while 1:
- some_change = 0
- for (n,pl) in Prodnames.items():
- # Nonterminal n terminates iff any of its productions terminates.
- for p in pl:
- # Production p terminates iff all of its rhs symbols terminate.
- for s in p.prod:
- if not Terminates[s]:
- # The symbol s does not terminate,
- # so production p does not terminate.
- p_terminates = 0
- break
- else:
- # didn't break from the loop,
- # so every symbol s terminates
- # so production p terminates.
- p_terminates = 1
-
- if p_terminates:
- # symbol n terminates!
- if not Terminates[n]:
- Terminates[n] = 1
- some_change = 1
- # Don't need to consider any more productions for this n.
- break
-
- if not some_change:
- break
-
- some_error = 0
- for (s,terminates) in Terminates.items():
- if not terminates:
- if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
- # s is used-but-not-defined, and we've already warned of that,
- # so it would be overkill to say that it's also non-terminating.
- pass
- else:
- sys.stderr.write("yacc: Infinite recursion detected for symbol '%s'.\n" % s)
- some_error = 1
+ self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list
+ # of rule numbers where they are used.
- return some_error
+ self.First = { } # A dictionary of precomputed FIRST(x) symbols
-# -----------------------------------------------------------------------------
-# verify_productions()
-#
-# This function examines all of the supplied rules to see if they seem valid.
-# -----------------------------------------------------------------------------
-def verify_productions(cycle_check=1):
- error = 0
- for p in Productions:
- if not p: continue
+ self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
- for s in p.prod:
- if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
- sys.stderr.write("%s:%d: Symbol '%s' used, but not defined as a token or a rule.\n" % (p.file,p.line,s))
- error = 1
- continue
+ self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the
+ # form ('right',level) or ('nonassoc', level) or ('left',level)
- unused_tok = 0
- # Now verify all of the tokens
- if yaccdebug:
- _vf.write("Unused terminals:\n\n")
- for s,v in Terminals.items():
- if s != 'error' and not v:
- sys.stderr.write("yacc: Warning. Token '%s' defined, but not used.\n" % s)
- if yaccdebug: _vf.write(" %s\n"% s)
- unused_tok += 1
-
- # Print out all of the productions
- if yaccdebug:
- _vf.write("\nGrammar\n\n")
- for i in range(1,len(Productions)):
- _vf.write("Rule %-5d %s\n" % (i, Productions[i]))
-
- unused_prod = 0
- # Verify the use of all productions
- for s,v in Nonterminals.items():
- if not v:
- p = Prodnames[s][0]
- sys.stderr.write("%s:%d: Warning. Rule '%s' defined, but not used.\n" % (p.file,p.line, s))
- unused_prod += 1
-
-
- if unused_tok == 1:
- sys.stderr.write("yacc: Warning. There is 1 unused token.\n")
- if unused_tok > 1:
- sys.stderr.write("yacc: Warning. There are %d unused tokens.\n" % unused_tok)
-
- if unused_prod == 1:
- sys.stderr.write("yacc: Warning. There is 1 unused rule.\n")
- if unused_prod > 1:
- sys.stderr.write("yacc: Warning. There are %d unused rules.\n" % unused_prod)
-
- if yaccdebug:
- _vf.write("\nTerminals, with rules where they appear\n\n")
- ks = Terminals.keys()
- ks.sort()
- for k in ks:
- _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Terminals[k]])))
- _vf.write("\nNonterminals, with rules where they appear\n\n")
- ks = Nonterminals.keys()
- ks.sort()
- for k in ks:
- _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Nonterminals[k]])))
-
- if (cycle_check):
- compute_reachable()
- error += compute_terminates()
-# error += check_cycles()
- return error
+ self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer.
+ # This is only used to provide error checking and to generate
+ # a warning about unused precedence rules.
-# -----------------------------------------------------------------------------
-# build_lritems()
-#
-# This function walks the list of productions and builds a complete set of the
-# LR items. The LR items are stored in two ways: First, they are uniquely
-# numbered and placed in the list _lritems. Second, a linked list of LR items
-# is built for each production. For example:
-#
-# E -> E PLUS E
-#
-# Creates the list
-#
-# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
-# -----------------------------------------------------------------------------
+ self.Start = None # Starting symbol for the grammar
-def build_lritems():
- for p in Productions:
- lastlri = p
- lri = p.lr_item(0)
- i = 0
- while 1:
- lri = p.lr_item(i)
- lastlri.lr_next = lri
- if not lri: break
- lri.lr_num = len(LRitems)
- LRitems.append(lri)
- lastlri = lri
- i += 1
- # In order for the rest of the parser generator to work, we need to
- # guarantee that no more lritems are generated. Therefore, we nuke
- # the p.lr_item method. (Only used in debugging)
- # Production.lr_item = None
+ def __len__(self):
+ return len(self.Productions)
+
+ def __getitem__(self,index):
+ return self.Productions[index]
+
+ # -----------------------------------------------------------------------------
+ # set_precedence()
+ #
+ # Sets the precedence for a given terminal. assoc is the associativity such as
+ # 'left','right', or 'nonassoc'. level is a numeric level.
+ #
+ # -----------------------------------------------------------------------------
+
+ def set_precedence(self,term,assoc,level):
+ assert self.Productions == [None],"Must call set_precedence() before add_production()"
+ if term in self.Precedence:
+ raise GrammarError("Precedence already specified for terminal '%s'" % term)
+ if assoc not in ['left','right','nonassoc']:
+ raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
+ self.Precedence[term] = (assoc,level)
+
+ # -----------------------------------------------------------------------------
+ # add_production()
+ #
+ # Given an action function, this function assembles a production rule and
+ # computes its precedence level.
+ #
+ # The production rule is supplied as a list of symbols. For example,
+ # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
+ # symbols ['expr','PLUS','term'].
+ #
+ # Precedence is determined by the precedence of the right-most non-terminal
+ # or the precedence of a terminal specified by %prec.
+ #
+ # A variety of error checks are performed to make sure production symbols
+ # are valid and that %prec is used correctly.
+ # -----------------------------------------------------------------------------
+
+ def add_production(self,prodname,syms,func=None,file='',line=0):
+
+ if prodname in self.Terminals:
+ raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname))
+ if prodname == 'error':
+ raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname))
+ if not _is_identifier.match(prodname):
+ raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname))
+
+ # Look for literal tokens
+ for n,s in enumerate(syms):
+ if s[0] in "'\"":
+ try:
+ c = eval(s)
+ if (len(c) > 1):
+ raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname))
+ if not c in self.Terminals:
+ self.Terminals[c] = []
+ syms[n] = c
+ continue
+ except SyntaxError:
+ pass
+ if not _is_identifier.match(s) and s != '%prec':
+ raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname))
+
+ # Determine the precedence level
+ if '%prec' in syms:
+ if syms[-1] == '%prec':
+ raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
+ if syms[-2] != '%prec':
+ raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
+ precname = syms[-1]
+ prodprec = self.Precedence.get(precname,None)
+ if not prodprec:
+ raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname))
+ else:
+ self.UsedPrecedence[precname] = 1
+ del syms[-2:] # Drop %prec from the rule
+ else:
+ # If no %prec, precedence is determined by the rightmost terminal symbol
+ precname = rightmost_terminal(syms,self.Terminals)
+ prodprec = self.Precedence.get(precname,('right',0))
+
+ # See if the rule is already in the rulemap
+ map = "%s -> %s" % (prodname,syms)
+ if map in self.Prodmap:
+ m = self.Prodmap[map]
+ raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
+ "Previous definition at %s:%d" % (m.file, m.line))
+
+ # From this point on, everything is valid. Create a new Production instance
+ pnumber = len(self.Productions)
+ if not prodname in self.Nonterminals:
+ self.Nonterminals[prodname] = [ ]
+
+ # Add the production number to Terminals and Nonterminals
+ for t in syms:
+ if t in self.Terminals:
+ self.Terminals[t].append(pnumber)
+ else:
+ if not t in self.Nonterminals:
+ self.Nonterminals[t] = [ ]
+ self.Nonterminals[t].append(pnumber)
-# -----------------------------------------------------------------------------
-# add_precedence()
-#
-# Given a list of precedence rules, add to the precedence table.
-# -----------------------------------------------------------------------------
+ # Create a production and add it to the list of productions
+ p = Production(pnumber,prodname,syms,prodprec,func,file,line)
+ self.Productions.append(p)
+ self.Prodmap[map] = p
-def add_precedence(plist):
- plevel = 0
- error = 0
- for p in plist:
- plevel += 1
+ # Add to the global productions list
try:
- prec = p[0]
- terms = p[1:]
- if prec != 'left' and prec != 'right' and prec != 'nonassoc':
- sys.stderr.write("yacc: Invalid precedence '%s'\n" % prec)
- return -1
- for t in terms:
- if Precedence.has_key(t):
- sys.stderr.write("yacc: Precedence already specified for terminal '%s'\n" % t)
- error += 1
- continue
- Precedence[t] = (prec,plevel)
- except:
- sys.stderr.write("yacc: Invalid precedence table.\n")
- error += 1
-
- return error
-
-# -----------------------------------------------------------------------------
-# augment_grammar()
-#
-# Compute the augmented grammar. This is just a rule S' -> start where start
-# is the starting symbol.
-# -----------------------------------------------------------------------------
+ self.Prodnames[prodname].append(p)
+ except KeyError:
+ self.Prodnames[prodname] = [ p ]
+ return 0
-def augment_grammar(start=None):
- if not start:
- start = Productions[1].name
- Productions[0] = Production(name="S'",prod=[start],number=0,len=1,prec=('right',0),func=None)
- Productions[0].usyms = [ start ]
- Nonterminals[start].append(0)
+ # -----------------------------------------------------------------------------
+ # set_start()
+ #
+ # Sets the starting symbol and creates the augmented grammar. Production
+ # rule 0 is S' -> start where start is the start symbol.
+ # -----------------------------------------------------------------------------
+
+ def set_start(self,start=None):
+ if not start:
+ start = self.Productions[1].name
+ if start not in self.Nonterminals:
+ raise GrammarError("start symbol %s undefined" % start)
+ self.Productions[0] = Production(0,"S'",[start])
+ self.Nonterminals[start].append(0)
+ self.Start = start
+
+ # -----------------------------------------------------------------------------
+ # find_unreachable()
+ #
+ # Find all of the nonterminal symbols that can't be reached from the starting
+ # symbol. Returns a list of nonterminals that can't be reached.
+ # -----------------------------------------------------------------------------
+
+ def find_unreachable(self):
+
+ # Mark all symbols that are reachable from a symbol s
+ def mark_reachable_from(s):
+ if reachable[s]:
+ # We've already reached symbol s.
+ return
+ reachable[s] = 1
+ for p in self.Prodnames.get(s,[]):
+ for r in p.prod:
+ mark_reachable_from(r)
+
+ reachable = { }
+ for s in list(self.Terminals) + list(self.Nonterminals):
+ reachable[s] = 0
+
+ mark_reachable_from( self.Productions[0].prod[0] )
+
+ return [s for s in list(self.Nonterminals)
+ if not reachable[s]]
+
+ # -----------------------------------------------------------------------------
+ # infinite_cycles()
+ #
+ # This function looks at the various parsing rules and tries to detect
+ # infinite recursion cycles (grammar rules where there is no possible way
+ # to derive a string of only terminals).
+ # -----------------------------------------------------------------------------
+
+ def infinite_cycles(self):
+ terminates = {}
+
+ # Terminals:
+ for t in self.Terminals:
+ terminates[t] = 1
+
+ terminates['$end'] = 1
+
+ # Nonterminals:
+
+ # Initialize to false:
+ for n in self.Nonterminals:
+ terminates[n] = 0
+
+ # Then propagate termination until no change:
+ while 1:
+ some_change = 0
+ for (n,pl) in self.Prodnames.items():
+ # Nonterminal n terminates iff any of its productions terminates.
+ for p in pl:
+ # Production p terminates iff all of its rhs symbols terminate.
+ for s in p.prod:
+ if not terminates[s]:
+ # The symbol s does not terminate,
+ # so production p does not terminate.
+ p_terminates = 0
+ break
+ else:
+ # didn't break from the loop,
+ # so every symbol s terminates
+ # so production p terminates.
+ p_terminates = 1
+
+ if p_terminates:
+ # symbol n terminates!
+ if not terminates[n]:
+ terminates[n] = 1
+ some_change = 1
+ # Don't need to consider any more productions for this n.
+ break
+ if not some_change:
+ break
-# -------------------------------------------------------------------------
-# first()
-#
-# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
-#
-# During execution of compute_first1, the result may be incomplete.
-# Afterward (e.g., when called from compute_follow()), it will be complete.
-# -------------------------------------------------------------------------
-def first(beta):
-
- # We are computing First(x1,x2,x3,...,xn)
- result = [ ]
- for x in beta:
- x_produces_empty = 0
-
- # Add all the non-<empty> symbols of First[x] to the result.
- for f in First[x]:
- if f == '<empty>':
- x_produces_empty = 1
- else:
- if f not in result: result.append(f)
+ infinite = []
+ for (s,term) in terminates.items():
+ if not term:
+ if not s in self.Prodnames and not s in self.Terminals and s != 'error':
+ # s is used-but-not-defined, and we've already warned of that,
+ # so it would be overkill to say that it's also non-terminating.
+ pass
+ else:
+ infinite.append(s)
- if x_produces_empty:
- # We have to consider the next x in beta,
- # i.e. stay in the loop.
- pass
- else:
- # We don't have to consider any further symbols in beta.
- break
- else:
- # There was no 'break' from the loop,
- # so x_produces_empty was true for all x in beta,
- # so beta produces empty as well.
- result.append('<empty>')
+ return infinite
- return result
+ # -----------------------------------------------------------------------------
+ # undefined_symbols()
+ #
+ # Find all symbols that were used the grammar, but not defined as tokens or
+ # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
+ # and prod is the production where the symbol was used.
+ # -----------------------------------------------------------------------------
+ def undefined_symbols(self):
+ result = []
+ for p in self.Productions:
+ if not p: continue
-# FOLLOW(x)
-# Given a non-terminal. This function computes the set of all symbols
-# that might follow it. Dragon book, p. 189.
-
-def compute_follow(start=None):
- # Add '$end' to the follow list of the start symbol
- for k in Nonterminals.keys():
- Follow[k] = [ ]
-
- if not start:
- start = Productions[1].name
-
- Follow[start] = [ '$end' ]
-
- while 1:
- didadd = 0
- for p in Productions[1:]:
- # Here is the production set
- for i in range(len(p.prod)):
- B = p.prod[i]
- if Nonterminals.has_key(B):
- # Okay. We got a non-terminal in a production
- fst = first(p.prod[i+1:])
- hasempty = 0
- for f in fst:
- if f != '<empty>' and f not in Follow[B]:
- Follow[B].append(f)
- didadd = 1
- if f == '<empty>':
- hasempty = 1
- if hasempty or i == (len(p.prod)-1):
- # Add elements of follow(a) to follow(b)
- for f in Follow[p.name]:
- if f not in Follow[B]:
- Follow[B].append(f)
- didadd = 1
- if not didadd: break
+ for s in p.prod:
+ if not s in self.Prodnames and not s in self.Terminals and s != 'error':
+ result.append((s,p))
+ return result
+
+ # -----------------------------------------------------------------------------
+ # unused_terminals()
+ #
+ # Find all terminals that were defined, but not used by the grammar. Returns
+ # a list of all symbols.
+ # -----------------------------------------------------------------------------
+ def unused_terminals(self):
+ unused_tok = []
+ for s,v in self.Terminals.items():
+ if s != 'error' and not v:
+ unused_tok.append(s)
+
+ return unused_tok
+
+ # ------------------------------------------------------------------------------
+ # unused_rules()
+ #
+ # Find all grammar rules that were defined, but not used (maybe not reachable)
+ # Returns a list of productions.
+ # ------------------------------------------------------------------------------
+
+ def unused_rules(self):
+ unused_prod = []
+ for s,v in self.Nonterminals.items():
+ if not v:
+ p = self.Prodnames[s][0]
+ unused_prod.append(p)
+ return unused_prod
+
+ # -----------------------------------------------------------------------------
+ # unused_precedence()
+ #
+ # Returns a list of tuples (term,precedence) corresponding to precedence
+ # rules that were never used by the grammar. term is the name of the terminal
+ # on which precedence was applied and precedence is a string such as 'left' or
+ # 'right' corresponding to the type of precedence.
+ # -----------------------------------------------------------------------------
+
+ def unused_precedence(self):
+ unused = []
+ for termname in self.Precedence:
+ if not (termname in self.Terminals or termname in self.UsedPrecedence):
+ unused.append((termname,self.Precedence[termname][0]))
+
+ return unused
+
+ # -------------------------------------------------------------------------
+ # _first()
+ #
+ # Compute the value of FIRST1(beta) where beta is a tuple of symbols.
+ #
+ # During execution of compute_first1, the result may be incomplete.
+ # Afterward (e.g., when called from compute_follow()), it will be complete.
+ # -------------------------------------------------------------------------
+ def _first(self,beta):
+
+ # We are computing First(x1,x2,x3,...,xn)
+ result = [ ]
+ for x in beta:
+ x_produces_empty = 0
+
+ # Add all the non-<empty> symbols of First[x] to the result.
+ for f in self.First[x]:
+ if f == '<empty>':
+ x_produces_empty = 1
+ else:
+ if f not in result: result.append(f)
- if 0 and yaccdebug:
- _vf.write('\nFollow:\n')
- for k in Nonterminals.keys():
- _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Follow[k]])))
+ if x_produces_empty:
+ # We have to consider the next x in beta,
+ # i.e. stay in the loop.
+ pass
+ else:
+ # We don't have to consider any further symbols in beta.
+ break
+ else:
+ # There was no 'break' from the loop,
+ # so x_produces_empty was true for all x in beta,
+ # so beta produces empty as well.
+ result.append('<empty>')
-# -------------------------------------------------------------------------
-# compute_first1()
-#
-# Compute the value of FIRST1(X) for all symbols
-# -------------------------------------------------------------------------
-def compute_first1():
-
- # Terminals:
- for t in Terminals.keys():
- First[t] = [t]
-
- First['$end'] = ['$end']
- First['#'] = ['#'] # what's this for?
-
- # Nonterminals:
-
- # Initialize to the empty set:
- for n in Nonterminals.keys():
- First[n] = []
-
- # Then propagate symbols until no change:
- while 1:
- some_change = 0
- for n in Nonterminals.keys():
- for p in Prodnames[n]:
- for f in first(p.prod):
- if f not in First[n]:
- First[n].append( f )
- some_change = 1
- if not some_change:
- break
-
- if 0 and yaccdebug:
- _vf.write('\nFirst:\n')
- for k in Nonterminals.keys():
- _vf.write("%-20s : %s\n" %
- (k, " ".join([str(s) for s in First[k]])))
+ return result
-# -----------------------------------------------------------------------------
-# === SLR Generation ===
-#
-# The following functions are used to construct SLR (Simple LR) parsing tables
-# as described on p.221-229 of the dragon book.
-# -----------------------------------------------------------------------------
+ # -------------------------------------------------------------------------
+ # compute_first()
+ #
+ # Compute the value of FIRST1(X) for all symbols
+ # -------------------------------------------------------------------------
+ def compute_first(self):
+ if self.First:
+ return self.First
-# Global variables for the LR parsing engine
-def lr_init_vars():
- global _lr_action, _lr_goto, _lr_method
- global _lr_goto_cache, _lr0_cidhash
-
- _lr_action = { } # Action table
- _lr_goto = { } # Goto table
- _lr_method = "Unknown" # LR method used
- _lr_goto_cache = { }
- _lr0_cidhash = { }
-
-
-# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
-# prodlist is a list of productions.
-
-_add_count = 0 # Counter used to detect cycles
-
-def lr0_closure(I):
- global _add_count
-
- _add_count += 1
- prodlist = Productions
-
- # Add everything in I to J
- J = I[:]
- didadd = 1
- while didadd:
- didadd = 0
- for j in J:
- for x in j.lrafter:
- if x.lr0_added == _add_count: continue
- # Add B --> .G to J
- J.append(x.lr_next)
- x.lr0_added = _add_count
- didadd = 1
-
- return J
-
-# Compute the LR(0) goto function goto(I,X) where I is a set
-# of LR(0) items and X is a grammar symbol. This function is written
-# in a way that guarantees uniqueness of the generated goto sets
-# (i.e. the same goto set will never be returned as two different Python
-# objects). With uniqueness, we can later do fast set comparisons using
-# id(obj) instead of element-wise comparison.
-
-def lr0_goto(I,x):
- # First we look for a previously cached entry
- g = _lr_goto_cache.get((id(I),x),None)
- if g: return g
-
- # Now we generate the goto set in a way that guarantees uniqueness
- # of the result
-
- s = _lr_goto_cache.get(x,None)
- if not s:
- s = { }
- _lr_goto_cache[x] = s
-
- gs = [ ]
- for p in I:
- n = p.lr_next
- if n and n.lrbefore == x:
- s1 = s.get(id(n),None)
- if not s1:
- s1 = { }
- s[id(n)] = s1
- gs.append(n)
- s = s1
- g = s.get('$end',None)
- if not g:
- if gs:
- g = lr0_closure(gs)
- s['$end'] = g
- else:
- s['$end'] = gs
- _lr_goto_cache[(id(I),x)] = g
- return g
-
-_lr0_cidhash = { }
-
-# Compute the LR(0) sets of item function
-def lr0_items():
-
- C = [ lr0_closure([Productions[0].lr_next]) ]
- i = 0
- for I in C:
- _lr0_cidhash[id(I)] = i
- i += 1
-
- # Loop over the items in C and each grammar symbols
- i = 0
- while i < len(C):
- I = C[i]
- i += 1
-
- # Collect all of the symbols that could possibly be in the goto(I,X) sets
- asyms = { }
- for ii in I:
- for s in ii.usyms:
- asyms[s] = None
-
- for x in asyms.keys():
- g = lr0_goto(I,x)
- if not g: continue
- if _lr0_cidhash.has_key(id(g)): continue
- _lr0_cidhash[id(g)] = len(C)
- C.append(g)
-
- return C
+ # Terminals:
+ for t in self.Terminals:
+ self.First[t] = [t]
-# -----------------------------------------------------------------------------
-# ==== LALR(1) Parsing ====
-#
-# LALR(1) parsing is almost exactly the same as SLR except that instead of
-# relying upon Follow() sets when performing reductions, a more selective
-# lookahead set that incorporates the state of the LR(0) machine is utilized.
-# Thus, we mainly just have to focus on calculating the lookahead sets.
-#
-# The method used here is due to DeRemer and Pennelo (1982).
-#
-# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
-# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
-# Vol. 4, No. 4, Oct. 1982, pp. 615-649
-#
-# Further details can also be found in:
-#
-# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
-# McGraw-Hill Book Company, (1985).
-#
-# Note: This implementation is a complete replacement of the LALR(1)
-# implementation in PLY-1.x releases. That version was based on
-# a less efficient algorithm and it had bugs in its implementation.
-# -----------------------------------------------------------------------------
+ self.First['$end'] = ['$end']
-# -----------------------------------------------------------------------------
-# compute_nullable_nonterminals()
-#
-# Creates a dictionary containing all of the non-terminals that might produce
-# an empty production.
-# -----------------------------------------------------------------------------
+ # Nonterminals:
-def compute_nullable_nonterminals():
- nullable = {}
- num_nullable = 0
- while 1:
- for p in Productions[1:]:
- if p.len == 0:
- nullable[p.name] = 1
- continue
- for t in p.prod:
- if not nullable.has_key(t): break
- else:
- nullable[p.name] = 1
- if len(nullable) == num_nullable: break
- num_nullable = len(nullable)
- return nullable
+ # Initialize to the empty set:
+ for n in self.Nonterminals:
+ self.First[n] = []
-# -----------------------------------------------------------------------------
-# find_nonterminal_trans(C)
-#
-# Given a set of LR(0) items, this functions finds all of the non-terminal
-# transitions. These are transitions in which a dot appears immediately before
-# a non-terminal. Returns a list of tuples of the form (state,N) where state
-# is the state number and N is the nonterminal symbol.
-#
-# The input C is the set of LR(0) items.
-# -----------------------------------------------------------------------------
+ # Then propagate symbols until no change:
+ while 1:
+ some_change = 0
+ for n in self.Nonterminals:
+ for p in self.Prodnames[n]:
+ for f in self._first(p.prod):
+ if f not in self.First[n]:
+ self.First[n].append( f )
+ some_change = 1
+ if not some_change:
+ break
+
+ return self.First
+
+ # ---------------------------------------------------------------------
+ # compute_follow()
+ #
+ # Computes all of the follow sets for every non-terminal symbol. The
+ # follow set is the set of all symbols that might follow a given
+ # non-terminal. See the Dragon book, 2nd Ed. p. 189.
+ # ---------------------------------------------------------------------
+ def compute_follow(self,start=None):
+ # If already computed, return the result
+ if self.Follow:
+ return self.Follow
+
+ # If first sets not computed yet, do that first.
+ if not self.First:
+ self.compute_first()
+
+ # Add '$end' to the follow list of the start symbol
+ for k in self.Nonterminals:
+ self.Follow[k] = [ ]
+
+ if not start:
+ start = self.Productions[1].name
+
+ self.Follow[start] = [ '$end' ]
-def find_nonterminal_transitions(C):
- trans = []
- for state in range(len(C)):
- for p in C[state]:
- if p.lr_index < p.len - 1:
- t = (state,p.prod[p.lr_index+1])
- if Nonterminals.has_key(t[1]):
- if t not in trans: trans.append(t)
- state = state + 1
- return trans
+ while 1:
+ didadd = 0
+ for p in self.Productions[1:]:
+ # Here is the production set
+ for i in range(len(p.prod)):
+ B = p.prod[i]
+ if B in self.Nonterminals:
+ # Okay. We got a non-terminal in a production
+ fst = self._first(p.prod[i+1:])
+ hasempty = 0
+ for f in fst:
+ if f != '<empty>' and f not in self.Follow[B]:
+ self.Follow[B].append(f)
+ didadd = 1
+ if f == '<empty>':
+ hasempty = 1
+ if hasempty or i == (len(p.prod)-1):
+ # Add elements of follow(a) to follow(b)
+ for f in self.Follow[p.name]:
+ if f not in self.Follow[B]:
+ self.Follow[B].append(f)
+ didadd = 1
+ if not didadd: break
+ return self.Follow
+
+
+ # -----------------------------------------------------------------------------
+ # build_lritems()
+ #
+ # This function walks the list of productions and builds a complete set of the
+ # LR items. The LR items are stored in two ways: First, they are uniquely
+ # numbered and placed in the list _lritems. Second, a linked list of LR items
+ # is built for each production. For example:
+ #
+ # E -> E PLUS E
+ #
+ # Creates the list
+ #
+ # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
+ # -----------------------------------------------------------------------------
+
+ def build_lritems(self):
+ for p in self.Productions:
+ lastlri = p
+ i = 0
+ lr_items = []
+ while 1:
+ if i > len(p):
+ lri = None
+ else:
+ lri = LRItem(p,i)
+ # Precompute the list of productions immediately following
+ try:
+ lri.lr_after = self.Prodnames[lri.prod[i+1]]
+ except (IndexError,KeyError):
+ lri.lr_after = []
+ try:
+ lri.lr_before = lri.prod[i-1]
+ except IndexError:
+ lri.lr_before = None
+
+ lastlri.lr_next = lri
+ if not lri: break
+ lr_items.append(lri)
+ lastlri = lri
+ i += 1
+ p.lr_items = lr_items
# -----------------------------------------------------------------------------
-# dr_relation()
-#
-# Computes the DR(p,A) relationships for non-terminal transitions. The input
-# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
+# == Class LRTable ==
#
-# Returns a list of terminals.
+# This basic class represents a basic table of LR parsing information.
+# Methods for generating the tables are not defined here. They are defined
+# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
-def dr_relation(C,trans,nullable):
- dr_set = { }
- state,N = trans
- terms = []
+class VersionError(YaccError): pass
- g = lr0_goto(C[state],N)
- for p in g:
- if p.lr_index < p.len - 1:
- a = p.prod[p.lr_index+1]
- if Terminals.has_key(a):
- if a not in terms: terms.append(a)
+class LRTable(object):
+ def __init__(self):
+ self.lr_action = None
+ self.lr_goto = None
+ self.lr_productions = None
+ self.lr_method = None
- # This extra bit is to handle the start state
- if state == 0 and N == Productions[0].prod[0]:
- terms.append('$end')
+ def read_table(self,module):
+ if isinstance(module,types.ModuleType):
+ parsetab = module
+ else:
+ if sys.version_info[0] < 3:
+ exec("import %s as parsetab" % module)
+ else:
+ env = { }
+ exec("import %s as parsetab" % module, env, env)
+ parsetab = env['parsetab']
- return terms
+ if parsetab._tabversion != __tabversion__:
+ raise VersionError("yacc table file version is out of date")
-# -----------------------------------------------------------------------------
-# reads_relation()
-#
-# Computes the READS() relation (p,A) READS (t,C).
-# -----------------------------------------------------------------------------
-
-def reads_relation(C, trans, empty):
- # Look for empty transitions
- rel = []
- state, N = trans
+ self.lr_action = parsetab._lr_action
+ self.lr_goto = parsetab._lr_goto
- g = lr0_goto(C[state],N)
- j = _lr0_cidhash.get(id(g),-1)
- for p in g:
- if p.lr_index < p.len - 1:
- a = p.prod[p.lr_index + 1]
- if empty.has_key(a):
- rel.append((j,a))
+ self.lr_productions = []
+ for p in parsetab._lr_productions:
+ self.lr_productions.append(MiniProduction(*p))
- return rel
+ self.lr_method = parsetab._lr_method
+ return parsetab._lr_signature
+ def read_pickle(self,filename):
+ try:
+ import cPickle as pickle
+ except ImportError:
+ import pickle
+
+ in_f = open(filename,"rb")
+
+ tabversion = pickle.load(in_f)
+ if tabversion != __tabversion__:
+ raise VersionError("yacc table file version is out of date")
+ self.lr_method = pickle.load(in_f)
+ signature = pickle.load(in_f)
+ self.lr_action = pickle.load(in_f)
+ self.lr_goto = pickle.load(in_f)
+ productions = pickle.load(in_f)
+
+ self.lr_productions = []
+ for p in productions:
+ self.lr_productions.append(MiniProduction(*p))
+
+ in_f.close()
+ return signature
+
+ # Bind all production function names to callable objects in pdict
+ def bind_callables(self,pdict):
+ for p in self.lr_productions:
+ p.bind(pdict)
+
# -----------------------------------------------------------------------------
-# compute_lookback_includes()
-#
-# Determines the lookback and includes relations
-#
-# LOOKBACK:
-#
-# This relation is determined by running the LR(0) state machine forward.
-# For example, starting with a production "N : . A B C", we run it forward
-# to obtain "N : A B C ." We then build a relationship between this final
-# state and the starting state. These relationships are stored in a dictionary
-# lookdict.
-#
-# INCLUDES:
-#
-# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
-#
-# This relation is used to determine non-terminal transitions that occur
-# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
-# if the following holds:
-#
-# B -> LAT, where T -> epsilon and p' -L-> p
-#
-# L is essentially a prefix (which may be empty), T is a suffix that must be
-# able to derive an empty string. State p' must lead to state p with the string L.
+# === LR Generator ===
#
+# The following classes and functions are used to generate LR parsing tables on
+# a grammar.
# -----------------------------------------------------------------------------
-def compute_lookback_includes(C,trans,nullable):
-
- lookdict = {} # Dictionary of lookback relations
- includedict = {} # Dictionary of include relations
-
- # Make a dictionary of non-terminal transitions
- dtrans = {}
- for t in trans:
- dtrans[t] = 1
-
- # Loop over all transitions and compute lookbacks and includes
- for state,N in trans:
- lookb = []
- includes = []
- for p in C[state]:
- if p.name != N: continue
-
- # Okay, we have a name match. We now follow the production all the way
- # through the state machine until we get the . on the right hand side
-
- lr_index = p.lr_index
- j = state
- while lr_index < p.len - 1:
- lr_index = lr_index + 1
- t = p.prod[lr_index]
-
- # Check to see if this symbol and state are a non-terminal transition
- if dtrans.has_key((j,t)):
- # Yes. Okay, there is some chance that this is an includes relation
- # the only way to know for certain is whether the rest of the
- # production derives empty
-
- li = lr_index + 1
- while li < p.len:
- if Terminals.has_key(p.prod[li]): break # No forget it
- if not nullable.has_key(p.prod[li]): break
- li = li + 1
- else:
- # Appears to be a relation between (j,t) and (state,N)
- includes.append((j,t))
-
- g = lr0_goto(C[j],t) # Go to next set
- j = _lr0_cidhash.get(id(g),-1) # Go to next state
-
- # When we get here, j is the final state, now we have to locate the production
- for r in C[j]:
- if r.name != p.name: continue
- if r.len != p.len: continue
- i = 0
- # This look is comparing a production ". A B C" with "A B C ."
- while i < r.lr_index:
- if r.prod[i] != p.prod[i+1]: break
- i = i + 1
- else:
- lookb.append((j,r))
- for i in includes:
- if not includedict.has_key(i): includedict[i] = []
- includedict[i].append((state,N))
- lookdict[(state,N)] = lookb
-
- return lookdict,includedict
-
# -----------------------------------------------------------------------------
# digraph()
# traverse()
@@ -1525,700 +1919,1358 @@ def traverse(x,N,stack,F,X,R,FP):
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
- N[stack[-1]] = sys.maxint
+ N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
- N[stack[-1]] = sys.maxint
+ N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
-# -----------------------------------------------------------------------------
-# compute_read_sets()
-#
-# Given a set of LR(0) items, this function computes the read sets.
-#
-# Inputs: C = Set of LR(0) items
-# ntrans = Set of nonterminal transitions
-# nullable = Set of empty transitions
-#
-# Returns a set containing the read sets
-# -----------------------------------------------------------------------------
-
-def compute_read_sets(C, ntrans, nullable):
- FP = lambda x: dr_relation(C,x,nullable)
- R = lambda x: reads_relation(C,x,nullable)
- F = digraph(ntrans,R,FP)
- return F
-
-# -----------------------------------------------------------------------------
-# compute_follow_sets()
-#
-# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
-# and an include set, this function computes the follow sets
-#
-# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
-#
-# Inputs:
-# ntrans = Set of nonterminal transitions
-# readsets = Readset (previously computed)
-# inclsets = Include sets (previously computed)
-#
-# Returns a set containing the follow sets
-# -----------------------------------------------------------------------------
-
-def compute_follow_sets(ntrans,readsets,inclsets):
- FP = lambda x: readsets[x]
- R = lambda x: inclsets.get(x,[])
- F = digraph(ntrans,R,FP)
- return F
+class LALRError(YaccError): pass
# -----------------------------------------------------------------------------
-# add_lookaheads()
-#
-# Attaches the lookahead symbols to grammar rules.
+# == LRGeneratedTable ==
#
-# Inputs: lookbacks - Set of lookback relations
-# followset - Computed follow set
-#
-# This function directly attaches the lookaheads to productions contained
-# in the lookbacks set
+# This class implements the LR table generation algorithm. There are no
+# public methods except for write()
# -----------------------------------------------------------------------------
-def add_lookaheads(lookbacks,followset):
- for trans,lb in lookbacks.items():
- # Loop over productions in lookback
- for state,p in lb:
- if not p.lookaheads.has_key(state):
- p.lookaheads[state] = []
- f = followset.get(trans,[])
- for a in f:
- if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
-
-# -----------------------------------------------------------------------------
-# add_lalr_lookaheads()
-#
-# This function does all of the work of adding lookahead information for use
-# with LALR parsing
-# -----------------------------------------------------------------------------
-
-def add_lalr_lookaheads(C):
- # Determine all of the nullable nonterminals
- nullable = compute_nullable_nonterminals()
-
- # Find all non-terminal transitions
- trans = find_nonterminal_transitions(C)
-
- # Compute read sets
- readsets = compute_read_sets(C,trans,nullable)
-
- # Compute lookback/includes relations
- lookd, included = compute_lookback_includes(C,trans,nullable)
-
- # Compute LALR FOLLOW sets
- followsets = compute_follow_sets(trans,readsets,included)
-
- # Add all of the lookaheads
- add_lookaheads(lookd,followsets)
-
-# -----------------------------------------------------------------------------
-# lr_parse_table()
-#
-# This function constructs the parse tables for SLR or LALR
-# -----------------------------------------------------------------------------
-def lr_parse_table(method):
- global _lr_method
- goto = _lr_goto # Goto array
- action = _lr_action # Action array
- actionp = { } # Action production array (temporary)
-
- _lr_method = method
-
- n_srconflict = 0
- n_rrconflict = 0
-
- if yaccdebug:
- sys.stderr.write("yacc: Generating %s parsing table...\n" % method)
- _vf.write("\n\nParsing method: %s\n\n" % method)
-
- # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
- # This determines the number of states
+class LRGeneratedTable(LRTable):
+ def __init__(self,grammar,method='LALR',log=None):
+ if method not in ['SLR','LALR']:
+ raise LALRError("Unsupported method %s" % method)
+
+ self.grammar = grammar
+ self.lr_method = method
+
+ # Set up the logger
+ if not log:
+ log = NullLogger()
+ self.log = log
+
+ # Internal attributes
+ self.lr_action = {} # Action table
+ self.lr_goto = {} # Goto table
+ self.lr_productions = grammar.Productions # Copy of grammar Production array
+ self.lr_goto_cache = {} # Cache of computed gotos
+ self.lr0_cidhash = {} # Cache of closures
+
+ self._add_count = 0 # Internal counter used to detect cycles
+
+ # Diagonistic information filled in by the table generator
+ self.sr_conflict = 0
+ self.rr_conflict = 0
+ self.conflicts = [] # List of conflicts
+
+ self.sr_conflicts = []
+ self.rr_conflicts = []
+
+ # Build the tables
+ self.grammar.build_lritems()
+ self.grammar.compute_first()
+ self.grammar.compute_follow()
+ self.lr_parse_table()
+
+ # Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
+
+ def lr0_closure(self,I):
+ self._add_count += 1
+
+ # Add everything in I to J
+ J = I[:]
+ didadd = 1
+ while didadd:
+ didadd = 0
+ for j in J:
+ for x in j.lr_after:
+ if getattr(x,"lr0_added",0) == self._add_count: continue
+ # Add B --> .G to J
+ J.append(x.lr_next)
+ x.lr0_added = self._add_count
+ didadd = 1
+
+ return J
+
+ # Compute the LR(0) goto function goto(I,X) where I is a set
+ # of LR(0) items and X is a grammar symbol. This function is written
+ # in a way that guarantees uniqueness of the generated goto sets
+ # (i.e. the same goto set will never be returned as two different Python
+ # objects). With uniqueness, we can later do fast set comparisons using
+ # id(obj) instead of element-wise comparison.
+
+ def lr0_goto(self,I,x):
+ # First we look for a previously cached entry
+ g = self.lr_goto_cache.get((id(I),x),None)
+ if g: return g
+
+ # Now we generate the goto set in a way that guarantees uniqueness
+ # of the result
+
+ s = self.lr_goto_cache.get(x,None)
+ if not s:
+ s = { }
+ self.lr_goto_cache[x] = s
+
+ gs = [ ]
+ for p in I:
+ n = p.lr_next
+ if n and n.lr_before == x:
+ s1 = s.get(id(n),None)
+ if not s1:
+ s1 = { }
+ s[id(n)] = s1
+ gs.append(n)
+ s = s1
+ g = s.get('$end',None)
+ if not g:
+ if gs:
+ g = self.lr0_closure(gs)
+ s['$end'] = g
+ else:
+ s['$end'] = gs
+ self.lr_goto_cache[(id(I),x)] = g
+ return g
- C = lr0_items()
+ # Compute the LR(0) sets of item function
+ def lr0_items(self):
- if method == 'LALR':
- add_lalr_lookaheads(C)
+ C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
+ i = 0
+ for I in C:
+ self.lr0_cidhash[id(I)] = i
+ i += 1
+ # Loop over the items in C and each grammar symbols
+ i = 0
+ while i < len(C):
+ I = C[i]
+ i += 1
- # Build the parser table, state by state
- st = 0
- for I in C:
- # Loop over each production in I
- actlist = [ ] # List of actions
- st_action = { }
- st_actionp = { }
- st_goto = { }
- if yaccdebug:
- _vf.write("\nstate %d\n\n" % st)
+ # Collect all of the symbols that could possibly be in the goto(I,X) sets
+ asyms = { }
+ for ii in I:
+ for s in ii.usyms:
+ asyms[s] = None
+
+ for x in asyms:
+ g = self.lr0_goto(I,x)
+ if not g: continue
+ if id(g) in self.lr0_cidhash: continue
+ self.lr0_cidhash[id(g)] = len(C)
+ C.append(g)
+
+ return C
+
+ # -----------------------------------------------------------------------------
+ # ==== LALR(1) Parsing ====
+ #
+ # LALR(1) parsing is almost exactly the same as SLR except that instead of
+ # relying upon Follow() sets when performing reductions, a more selective
+ # lookahead set that incorporates the state of the LR(0) machine is utilized.
+ # Thus, we mainly just have to focus on calculating the lookahead sets.
+ #
+ # The method used here is due to DeRemer and Pennelo (1982).
+ #
+ # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
+ # Lookahead Sets", ACM Transactions on Programming Languages and Systems,
+ # Vol. 4, No. 4, Oct. 1982, pp. 615-649
+ #
+ # Further details can also be found in:
+ #
+ # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
+ # McGraw-Hill Book Company, (1985).
+ #
+ # -----------------------------------------------------------------------------
+
+ # -----------------------------------------------------------------------------
+ # compute_nullable_nonterminals()
+ #
+ # Creates a dictionary containing all of the non-terminals that might produce
+ # an empty production.
+ # -----------------------------------------------------------------------------
+
+ def compute_nullable_nonterminals(self):
+ nullable = {}
+ num_nullable = 0
+ while 1:
+ for p in self.grammar.Productions[1:]:
+ if p.len == 0:
+ nullable[p.name] = 1
+ continue
+ for t in p.prod:
+ if not t in nullable: break
+ else:
+ nullable[p.name] = 1
+ if len(nullable) == num_nullable: break
+ num_nullable = len(nullable)
+ return nullable
+
+ # -----------------------------------------------------------------------------
+ # find_nonterminal_trans(C)
+ #
+ # Given a set of LR(0) items, this functions finds all of the non-terminal
+ # transitions. These are transitions in which a dot appears immediately before
+ # a non-terminal. Returns a list of tuples of the form (state,N) where state
+ # is the state number and N is the nonterminal symbol.
+ #
+ # The input C is the set of LR(0) items.
+ # -----------------------------------------------------------------------------
+
+ def find_nonterminal_transitions(self,C):
+ trans = []
+ for state in range(len(C)):
+ for p in C[state]:
+ if p.lr_index < p.len - 1:
+ t = (state,p.prod[p.lr_index+1])
+ if t[1] in self.grammar.Nonterminals:
+ if t not in trans: trans.append(t)
+ state = state + 1
+ return trans
+
+ # -----------------------------------------------------------------------------
+ # dr_relation()
+ #
+ # Computes the DR(p,A) relationships for non-terminal transitions. The input
+ # is a tuple (state,N) where state is a number and N is a nonterminal symbol.
+ #
+ # Returns a list of terminals.
+ # -----------------------------------------------------------------------------
+
+ def dr_relation(self,C,trans,nullable):
+ dr_set = { }
+ state,N = trans
+ terms = []
+
+ g = self.lr0_goto(C[state],N)
+ for p in g:
+ if p.lr_index < p.len - 1:
+ a = p.prod[p.lr_index+1]
+ if a in self.grammar.Terminals:
+ if a not in terms: terms.append(a)
+
+ # This extra bit is to handle the start state
+ if state == 0 and N == self.grammar.Productions[0].prod[0]:
+ terms.append('$end')
+
+ return terms
+
+ # -----------------------------------------------------------------------------
+ # reads_relation()
+ #
+ # Computes the READS() relation (p,A) READS (t,C).
+ # -----------------------------------------------------------------------------
+
+ def reads_relation(self,C, trans, empty):
+ # Look for empty transitions
+ rel = []
+ state, N = trans
+
+ g = self.lr0_goto(C[state],N)
+ j = self.lr0_cidhash.get(id(g),-1)
+ for p in g:
+ if p.lr_index < p.len - 1:
+ a = p.prod[p.lr_index + 1]
+ if a in empty:
+ rel.append((j,a))
+
+ return rel
+
+ # -----------------------------------------------------------------------------
+ # compute_lookback_includes()
+ #
+ # Determines the lookback and includes relations
+ #
+ # LOOKBACK:
+ #
+ # This relation is determined by running the LR(0) state machine forward.
+ # For example, starting with a production "N : . A B C", we run it forward
+ # to obtain "N : A B C ." We then build a relationship between this final
+ # state and the starting state. These relationships are stored in a dictionary
+ # lookdict.
+ #
+ # INCLUDES:
+ #
+ # Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
+ #
+ # This relation is used to determine non-terminal transitions that occur
+ # inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
+ # if the following holds:
+ #
+ # B -> LAT, where T -> epsilon and p' -L-> p
+ #
+ # L is essentially a prefix (which may be empty), T is a suffix that must be
+ # able to derive an empty string. State p' must lead to state p with the string L.
+ #
+ # -----------------------------------------------------------------------------
+
+ def compute_lookback_includes(self,C,trans,nullable):
+
+ lookdict = {} # Dictionary of lookback relations
+ includedict = {} # Dictionary of include relations
+
+ # Make a dictionary of non-terminal transitions
+ dtrans = {}
+ for t in trans:
+ dtrans[t] = 1
+
+ # Loop over all transitions and compute lookbacks and includes
+ for state,N in trans:
+ lookb = []
+ includes = []
+ for p in C[state]:
+ if p.name != N: continue
+
+ # Okay, we have a name match. We now follow the production all the way
+ # through the state machine until we get the . on the right hand side
+
+ lr_index = p.lr_index
+ j = state
+ while lr_index < p.len - 1:
+ lr_index = lr_index + 1
+ t = p.prod[lr_index]
+
+ # Check to see if this symbol and state are a non-terminal transition
+ if (j,t) in dtrans:
+ # Yes. Okay, there is some chance that this is an includes relation
+ # the only way to know for certain is whether the rest of the
+ # production derives empty
+
+ li = lr_index + 1
+ while li < p.len:
+ if p.prod[li] in self.grammar.Terminals: break # No forget it
+ if not p.prod[li] in nullable: break
+ li = li + 1
+ else:
+ # Appears to be a relation between (j,t) and (state,N)
+ includes.append((j,t))
+
+ g = self.lr0_goto(C[j],t) # Go to next set
+ j = self.lr0_cidhash.get(id(g),-1) # Go to next state
+
+ # When we get here, j is the final state, now we have to locate the production
+ for r in C[j]:
+ if r.name != p.name: continue
+ if r.len != p.len: continue
+ i = 0
+ # This look is comparing a production ". A B C" with "A B C ."
+ while i < r.lr_index:
+ if r.prod[i] != p.prod[i+1]: break
+ i = i + 1
+ else:
+ lookb.append((j,r))
+ for i in includes:
+ if not i in includedict: includedict[i] = []
+ includedict[i].append((state,N))
+ lookdict[(state,N)] = lookb
+
+ return lookdict,includedict
+
+ # -----------------------------------------------------------------------------
+ # compute_read_sets()
+ #
+ # Given a set of LR(0) items, this function computes the read sets.
+ #
+ # Inputs: C = Set of LR(0) items
+ # ntrans = Set of nonterminal transitions
+ # nullable = Set of empty transitions
+ #
+ # Returns a set containing the read sets
+ # -----------------------------------------------------------------------------
+
+ def compute_read_sets(self,C, ntrans, nullable):
+ FP = lambda x: self.dr_relation(C,x,nullable)
+ R = lambda x: self.reads_relation(C,x,nullable)
+ F = digraph(ntrans,R,FP)
+ return F
+
+ # -----------------------------------------------------------------------------
+ # compute_follow_sets()
+ #
+ # Given a set of LR(0) items, a set of non-terminal transitions, a readset,
+ # and an include set, this function computes the follow sets
+ #
+ # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
+ #
+ # Inputs:
+ # ntrans = Set of nonterminal transitions
+ # readsets = Readset (previously computed)
+ # inclsets = Include sets (previously computed)
+ #
+ # Returns a set containing the follow sets
+ # -----------------------------------------------------------------------------
+
+ def compute_follow_sets(self,ntrans,readsets,inclsets):
+ FP = lambda x: readsets[x]
+ R = lambda x: inclsets.get(x,[])
+ F = digraph(ntrans,R,FP)
+ return F
+
+ # -----------------------------------------------------------------------------
+ # add_lookaheads()
+ #
+ # Attaches the lookahead symbols to grammar rules.
+ #
+ # Inputs: lookbacks - Set of lookback relations
+ # followset - Computed follow set
+ #
+ # This function directly attaches the lookaheads to productions contained
+ # in the lookbacks set
+ # -----------------------------------------------------------------------------
+
+ def add_lookaheads(self,lookbacks,followset):
+ for trans,lb in lookbacks.items():
+ # Loop over productions in lookback
+ for state,p in lb:
+ if not state in p.lookaheads:
+ p.lookaheads[state] = []
+ f = followset.get(trans,[])
+ for a in f:
+ if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
+
+ # -----------------------------------------------------------------------------
+ # add_lalr_lookaheads()
+ #
+ # This function does all of the work of adding lookahead information for use
+ # with LALR parsing
+ # -----------------------------------------------------------------------------
+
+ def add_lalr_lookaheads(self,C):
+ # Determine all of the nullable nonterminals
+ nullable = self.compute_nullable_nonterminals()
+
+ # Find all non-terminal transitions
+ trans = self.find_nonterminal_transitions(C)
+
+ # Compute read sets
+ readsets = self.compute_read_sets(C,trans,nullable)
+
+ # Compute lookback/includes relations
+ lookd, included = self.compute_lookback_includes(C,trans,nullable)
+
+ # Compute LALR FOLLOW sets
+ followsets = self.compute_follow_sets(trans,readsets,included)
+
+ # Add all of the lookaheads
+ self.add_lookaheads(lookd,followsets)
+
+ # -----------------------------------------------------------------------------
+ # lr_parse_table()
+ #
+ # This function constructs the parse tables for SLR or LALR
+ # -----------------------------------------------------------------------------
+ def lr_parse_table(self):
+ Productions = self.grammar.Productions
+ Precedence = self.grammar.Precedence
+ goto = self.lr_goto # Goto array
+ action = self.lr_action # Action array
+ log = self.log # Logger for output
+
+ actionp = { } # Action production array (temporary)
+
+ log.info("Parsing method: %s", self.lr_method)
+
+ # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
+ # This determines the number of states
+
+ C = self.lr0_items()
+
+ if self.lr_method == 'LALR':
+ self.add_lalr_lookaheads(C)
+
+ # Build the parser table, state by state
+ st = 0
+ for I in C:
+ # Loop over each production in I
+ actlist = [ ] # List of actions
+ st_action = { }
+ st_actionp = { }
+ st_goto = { }
+ log.info("")
+ log.info("state %d", st)
+ log.info("")
for p in I:
- _vf.write(" (%d) %s\n" % (p.number, str(p)))
- _vf.write("\n")
+ log.info(" (%d) %s", p.number, str(p))
+ log.info("")
- for p in I:
- try:
- if p.len == p.lr_index + 1:
- if p.name == "S'":
- # Start symbol. Accept!
- st_action["$end"] = 0
- st_actionp["$end"] = p
- else:
- # We are at the end of a production. Reduce!
- if method == 'LALR':
- laheads = p.lookaheads[st]
+ for p in I:
+ if p.len == p.lr_index + 1:
+ if p.name == "S'":
+ # Start symbol. Accept!
+ st_action["$end"] = 0
+ st_actionp["$end"] = p
else:
- laheads = Follow[p.name]
- for a in laheads:
- actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
- r = st_action.get(a,None)
- if r is not None:
- # Whoa. Have a shift/reduce or reduce/reduce conflict
- if r > 0:
- # Need to decide on shift or reduce here
- # By default we favor shifting. Need to add
- # some precedence rules here.
- sprec,slevel = Productions[st_actionp[a].number].prec
- rprec,rlevel = Precedence.get(a,('right',0))
- if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
- # We really need to reduce here.
- st_action[a] = -p.number
- st_actionp[a] = p
- if not slevel and not rlevel:
- _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
- _vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a)
- n_srconflict += 1
- elif (slevel == rlevel) and (rprec == 'nonassoc'):
- st_action[a] = None
- else:
- # Hmmm. Guess we'll keep the shift
- if not rlevel:
- _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
- _vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a)
- n_srconflict +=1
- elif r < 0:
- # Reduce/reduce conflict. In this case, we favor the rule
- # that was defined first in the grammar file
- oldp = Productions[-r]
- pp = Productions[p.number]
- if oldp.line > pp.line:
- st_action[a] = -p.number
- st_actionp[a] = p
- # sys.stderr.write("Reduce/reduce conflict in state %d\n" % st)
- n_rrconflict += 1
- _vfc.write("reduce/reduce conflict in state %d resolved using rule %d (%s).\n" % (st, st_actionp[a].number, st_actionp[a]))
- _vf.write(" ! reduce/reduce conflict for %s resolved using rule %d (%s).\n" % (a,st_actionp[a].number, st_actionp[a]))
- else:
- sys.stderr.write("Unknown conflict in state %d\n" % st)
+ # We are at the end of a production. Reduce!
+ if self.lr_method == 'LALR':
+ laheads = p.lookaheads[st]
else:
- st_action[a] = -p.number
- st_actionp[a] = p
- else:
- i = p.lr_index
- a = p.prod[i+1] # Get symbol right after the "."
- if Terminals.has_key(a):
- g = lr0_goto(I,a)
- j = _lr0_cidhash.get(id(g),-1)
- if j >= 0:
- # We are in a shift state
- actlist.append((a,p,"shift and go to state %d" % j))
- r = st_action.get(a,None)
- if r is not None:
- # Whoa have a shift/reduce or shift/shift conflict
- if r > 0:
- if r != j:
- sys.stderr.write("Shift/shift conflict in state %d\n" % st)
- elif r < 0:
- # Do a precedence check.
- # - if precedence of reduce rule is higher, we reduce.
- # - if precedence of reduce is same and left assoc, we reduce.
- # - otherwise we shift
- rprec,rlevel = Productions[st_actionp[a].number].prec
- sprec,slevel = Precedence.get(a,('right',0))
- if (slevel > rlevel) or ((slevel == rlevel) and (rprec != 'left')):
- # We decide to shift here... highest precedence to shift
- st_action[a] = j
- st_actionp[a] = p
- if not rlevel:
- n_srconflict += 1
- _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
- _vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a)
- elif (slevel == rlevel) and (rprec == 'nonassoc'):
- st_action[a] = None
+ laheads = self.grammar.Follow[p.name]
+ for a in laheads:
+ actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
+ r = st_action.get(a,None)
+ if r is not None:
+ # Whoa. Have a shift/reduce or reduce/reduce conflict
+ if r > 0:
+ # Need to decide on shift or reduce here
+ # By default we favor shifting. Need to add
+ # some precedence rules here.
+ sprec,slevel = Productions[st_actionp[a].number].prec
+ rprec,rlevel = Precedence.get(a,('right',0))
+ if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
+ # We really need to reduce here.
+ st_action[a] = -p.number
+ st_actionp[a] = p
+ if not slevel and not rlevel:
+ log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
+ self.sr_conflicts.append((st,a,'reduce'))
+ Productions[p.number].reduced += 1
+ elif (slevel == rlevel) and (rprec == 'nonassoc'):
+ st_action[a] = None
+ else:
+ # Hmmm. Guess we'll keep the shift
+ if not rlevel:
+ log.info(" ! shift/reduce conflict for %s resolved as shift",a)
+ self.sr_conflicts.append((st,a,'shift'))
+ elif r < 0:
+ # Reduce/reduce conflict. In this case, we favor the rule
+ # that was defined first in the grammar file
+ oldp = Productions[-r]
+ pp = Productions[p.number]
+ if oldp.line > pp.line:
+ st_action[a] = -p.number
+ st_actionp[a] = p
+ chosenp,rejectp = pp,oldp
+ Productions[p.number].reduced += 1
+ Productions[oldp.number].reduced -= 1
+ else:
+ chosenp,rejectp = oldp,pp
+ self.rr_conflicts.append((st,chosenp,rejectp))
+ log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
else:
- # Hmmm. Guess we'll keep the reduce
- if not slevel and not rlevel:
- n_srconflict +=1
- _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
- _vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a)
+ raise LALRError("Unknown conflict in state %d" % st)
+ else:
+ st_action[a] = -p.number
+ st_actionp[a] = p
+ Productions[p.number].reduced += 1
+ else:
+ i = p.lr_index
+ a = p.prod[i+1] # Get symbol right after the "."
+ if a in self.grammar.Terminals:
+ g = self.lr0_goto(I,a)
+ j = self.lr0_cidhash.get(id(g),-1)
+ if j >= 0:
+ # We are in a shift state
+ actlist.append((a,p,"shift and go to state %d" % j))
+ r = st_action.get(a,None)
+ if r is not None:
+ # Whoa have a shift/reduce or shift/shift conflict
+ if r > 0:
+ if r != j:
+ raise LALRError("Shift/shift conflict in state %d" % st)
+ elif r < 0:
+ # Do a precedence check.
+ # - if precedence of reduce rule is higher, we reduce.
+ # - if precedence of reduce is same and left assoc, we reduce.
+ # - otherwise we shift
+ rprec,rlevel = Productions[st_actionp[a].number].prec
+ sprec,slevel = Precedence.get(a,('right',0))
+ if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
+ # We decide to shift here... highest precedence to shift
+ Productions[st_actionp[a].number].reduced -= 1
+ st_action[a] = j
+ st_actionp[a] = p
+ if not rlevel:
+ log.info(" ! shift/reduce conflict for %s resolved as shift",a)
+ self.sr_conflicts.append((st,a,'shift'))
+ elif (slevel == rlevel) and (rprec == 'nonassoc'):
+ st_action[a] = None
+ else:
+ # Hmmm. Guess we'll keep the reduce
+ if not slevel and not rlevel:
+ log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
+ self.sr_conflicts.append((st,a,'reduce'))
+ else:
+ raise LALRError("Unknown conflict in state %d" % st)
else:
- sys.stderr.write("Unknown conflict in state %d\n" % st)
- else:
- st_action[a] = j
- st_actionp[a] = p
-
- except StandardError,e:
- print sys.exc_info()
- raise YaccError, "Hosed in lr_parse_table"
-
- # Print the actions associated with each terminal
- if yaccdebug:
- _actprint = { }
- for a,p,m in actlist:
- if st_action.has_key(a):
- if p is st_actionp[a]:
- _vf.write(" %-15s %s\n" % (a,m))
- _actprint[(a,m)] = 1
- _vf.write("\n")
- for a,p,m in actlist:
- if st_action.has_key(a):
- if p is not st_actionp[a]:
- if not _actprint.has_key((a,m)):
- _vf.write(" ! %-15s [ %s ]\n" % (a,m))
+ st_action[a] = j
+ st_actionp[a] = p
+
+ # Print the actions associated with each terminal
+ _actprint = { }
+ for a,p,m in actlist:
+ if a in st_action:
+ if p is st_actionp[a]:
+ log.info(" %-15s %s",a,m)
_actprint[(a,m)] = 1
+ log.info("")
+ # Print the actions that were not used. (debugging)
+ not_used = 0
+ for a,p,m in actlist:
+ if a in st_action:
+ if p is not st_actionp[a]:
+ if not (a,m) in _actprint:
+ log.debug(" ! %-15s [ %s ]",a,m)
+ not_used = 1
+ _actprint[(a,m)] = 1
+ if not_used:
+ log.debug("")
+
+ # Construct the goto table for this state
+
+ nkeys = { }
+ for ii in I:
+ for s in ii.usyms:
+ if s in self.grammar.Nonterminals:
+ nkeys[s] = None
+ for n in nkeys:
+ g = self.lr0_goto(I,n)
+ j = self.lr0_cidhash.get(id(g),-1)
+ if j >= 0:
+ st_goto[n] = j
+ log.info(" %-30s shift and go to state %d",n,j)
+
+ action[st] = st_action
+ actionp[st] = st_actionp
+ goto[st] = st_goto
+ st += 1
+
+
+ # -----------------------------------------------------------------------------
+ # write()
+ #
+ # This function writes the LR parsing tables to a file
+ # -----------------------------------------------------------------------------
+
+ def write_table(self,modulename,outputdir='',signature=""):
+ basemodulename = modulename.split(".")[-1]
+ filename = os.path.join(outputdir,basemodulename) + ".py"
+ try:
+ f = open(filename,"w")
- # Construct the goto table for this state
- if yaccdebug:
- _vf.write("\n")
- nkeys = { }
- for ii in I:
- for s in ii.usyms:
- if Nonterminals.has_key(s):
- nkeys[s] = None
- for n in nkeys.keys():
- g = lr0_goto(I,n)
- j = _lr0_cidhash.get(id(g),-1)
- if j >= 0:
- st_goto[n] = j
- if yaccdebug:
- _vf.write(" %-30s shift and go to state %d\n" % (n,j))
-
- action[st] = st_action
- actionp[st] = st_actionp
- goto[st] = st_goto
-
- st += 1
-
- if yaccdebug:
- if n_srconflict == 1:
- sys.stderr.write("yacc: %d shift/reduce conflict\n" % n_srconflict)
- if n_srconflict > 1:
- sys.stderr.write("yacc: %d shift/reduce conflicts\n" % n_srconflict)
- if n_rrconflict == 1:
- sys.stderr.write("yacc: %d reduce/reduce conflict\n" % n_rrconflict)
- if n_rrconflict > 1:
- sys.stderr.write("yacc: %d reduce/reduce conflicts\n" % n_rrconflict)
-
-# -----------------------------------------------------------------------------
-# ==== LR Utility functions ====
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# _lr_write_tables()
-#
-# This function writes the LR parsing tables to a file
-# -----------------------------------------------------------------------------
-
-def lr_write_tables(modulename=tab_module,outputdir=''):
- filename = os.path.join(outputdir,modulename) + ".py"
- try:
- f = open(filename,"w")
-
- f.write("""
+ f.write("""
# %s
# This file is automatically generated. Do not edit.
+_tabversion = %r
-_lr_method = %s
-
-_lr_signature = %s
-""" % (filename, repr(_lr_method), repr(Signature.digest())))
-
- # Change smaller to 0 to go back to original tables
- smaller = 1
+_lr_method = %r
- # Factor out names to try and make smaller
- if smaller:
- items = { }
+_lr_signature = %r
+ """ % (filename, __tabversion__, self.lr_method, signature))
- for s,nd in _lr_action.items():
- for name,v in nd.items():
- i = items.get(name)
- if not i:
- i = ([],[])
- items[name] = i
- i[0].append(s)
- i[1].append(v)
+ # Change smaller to 0 to go back to original tables
+ smaller = 1
- f.write("\n_lr_action_items = {")
- for k,v in items.items():
- f.write("%r:([" % k)
- for i in v[0]:
- f.write("%r," % i)
- f.write("],[")
- for i in v[1]:
- f.write("%r," % i)
-
- f.write("]),")
- f.write("}\n")
-
- f.write("""
+ # Factor out names to try and make smaller
+ if smaller:
+ items = { }
+
+ for s,nd in self.lr_action.items():
+ for name,v in nd.items():
+ i = items.get(name)
+ if not i:
+ i = ([],[])
+ items[name] = i
+ i[0].append(s)
+ i[1].append(v)
+
+ f.write("\n_lr_action_items = {")
+ for k,v in items.items():
+ f.write("%r:([" % k)
+ for i in v[0]:
+ f.write("%r," % i)
+ f.write("],[")
+ for i in v[1]:
+ f.write("%r," % i)
+
+ f.write("]),")
+ f.write("}\n")
+
+ f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
- if not _lr_action.has_key(_x): _lr_action[_x] = { }
+ if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
""")
- else:
- f.write("\n_lr_action = { ");
- for k,v in _lr_action.items():
- f.write("(%r,%r):%r," % (k[0],k[1],v))
- f.write("}\n");
-
- if smaller:
- # Factor out names to try and make smaller
- items = { }
-
- for s,nd in _lr_goto.items():
- for name,v in nd.items():
- i = items.get(name)
- if not i:
- i = ([],[])
- items[name] = i
- i[0].append(s)
- i[1].append(v)
-
- f.write("\n_lr_goto_items = {")
- for k,v in items.items():
- f.write("%r:([" % k)
- for i in v[0]:
- f.write("%r," % i)
- f.write("],[")
- for i in v[1]:
- f.write("%r," % i)
-
- f.write("]),")
- f.write("}\n")
-
- f.write("""
+ else:
+ f.write("\n_lr_action = { ");
+ for k,v in self.lr_action.items():
+ f.write("(%r,%r):%r," % (k[0],k[1],v))
+ f.write("}\n");
+
+ if smaller:
+ # Factor out names to try and make smaller
+ items = { }
+
+ for s,nd in self.lr_goto.items():
+ for name,v in nd.items():
+ i = items.get(name)
+ if not i:
+ i = ([],[])
+ items[name] = i
+ i[0].append(s)
+ i[1].append(v)
+
+ f.write("\n_lr_goto_items = {")
+ for k,v in items.items():
+ f.write("%r:([" % k)
+ for i in v[0]:
+ f.write("%r," % i)
+ f.write("],[")
+ for i in v[1]:
+ f.write("%r," % i)
+
+ f.write("]),")
+ f.write("}\n")
+
+ f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
- if not _lr_goto.has_key(_x): _lr_goto[_x] = { }
+ if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
""")
- else:
- f.write("\n_lr_goto = { ");
- for k,v in _lr_goto.items():
- f.write("(%r,%r):%r," % (k[0],k[1],v))
- f.write("}\n");
-
- # Write production table
- f.write("_lr_productions = [\n")
- for p in Productions:
- if p:
- if (p.func):
- f.write(" (%r,%d,%r,%r,%d),\n" % (p.name, p.len, p.func.__name__,p.file,p.line))
- else:
- f.write(" (%r,%d,None,None,None),\n" % (p.name, p.len))
else:
- f.write(" None,\n")
- f.write("]\n")
+ f.write("\n_lr_goto = { ");
+ for k,v in self.lr_goto.items():
+ f.write("(%r,%r):%r," % (k[0],k[1],v))
+ f.write("}\n");
+
+ # Write production table
+ f.write("_lr_productions = [\n")
+ for p in self.lr_productions:
+ if p.func:
+ f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
+ else:
+ f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
+ f.write("]\n")
+ f.close()
- f.close()
+ except IOError:
+ e = sys.exc_info()[1]
+ sys.stderr.write("Unable to create '%s'\n" % filename)
+ sys.stderr.write(str(e)+"\n")
+ return
- except IOError,e:
- print >>sys.stderr, "Unable to create '%s'" % filename
- print >>sys.stderr, e
- return
-def lr_read_tables(module=tab_module,optimize=0):
- global _lr_action, _lr_goto, _lr_productions, _lr_method
- try:
- exec "import %s as parsetab" % module
-
- if (optimize) or (Signature.digest() == parsetab._lr_signature):
- _lr_action = parsetab._lr_action
- _lr_goto = parsetab._lr_goto
- _lr_productions = parsetab._lr_productions
- _lr_method = parsetab._lr_method
- return 1
- else:
- return 0
-
- except (ImportError,AttributeError):
- return 0
+ # -----------------------------------------------------------------------------
+ # pickle_table()
+ #
+ # This function pickles the LR parsing tables to a supplied file object
+ # -----------------------------------------------------------------------------
+ def pickle_table(self,filename,signature=""):
+ try:
+ import cPickle as pickle
+ except ImportError:
+ import pickle
+ outf = open(filename,"wb")
+ pickle.dump(__tabversion__,outf,pickle_protocol)
+ pickle.dump(self.lr_method,outf,pickle_protocol)
+ pickle.dump(signature,outf,pickle_protocol)
+ pickle.dump(self.lr_action,outf,pickle_protocol)
+ pickle.dump(self.lr_goto,outf,pickle_protocol)
+
+ outp = []
+ for p in self.lr_productions:
+ if p.func:
+ outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
+ else:
+ outp.append((str(p),p.name,p.len,None,None,None))
+ pickle.dump(outp,outf,pickle_protocol)
+ outf.close()
# -----------------------------------------------------------------------------
-# yacc(module)
+# === INTROSPECTION ===
#
-# Build the parser module
+# The following functions and classes are used to implement the PLY
+# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
-def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=1, optimize=0,write_tables=1,debugfile=debug_file,outputdir=''):
- global yaccdebug
- yaccdebug = debug
+# -----------------------------------------------------------------------------
+# get_caller_module_dict()
+#
+# This function returns a dictionary containing all of the symbols defined within
+# a caller further down the call stack. This is used to get the environment
+# associated with the yacc() call if none was provided.
+# -----------------------------------------------------------------------------
- initialize_vars()
- files = { }
- error = 0
+def get_caller_module_dict(levels):
+ try:
+ raise RuntimeError
+ except RuntimeError:
+ e,b,t = sys.exc_info()
+ f = t.tb_frame
+ while levels > 0:
+ f = f.f_back
+ levels -= 1
+ ldict = f.f_globals.copy()
+ if f.f_globals != f.f_locals:
+ ldict.update(f.f_locals)
+
+ return ldict
+# -----------------------------------------------------------------------------
+# parse_grammar()
+#
+# This takes a raw grammar rule string and parses it into production data
+# -----------------------------------------------------------------------------
+def parse_grammar(doc,file,line):
+ grammar = []
+ # Split the doc string into lines
+ pstrings = doc.splitlines()
+ lastp = None
+ dline = line
+ for ps in pstrings:
+ dline += 1
+ p = ps.split()
+ if not p: continue
+ try:
+ if p[0] == '|':
+ # This is a continuation of a previous rule
+ if not lastp:
+ raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
+ prodname = lastp
+ syms = p[1:]
+ else:
+ prodname = p[0]
+ lastp = prodname
+ syms = p[2:]
+ assign = p[1]
+ if assign != ':' and assign != '::=':
+ raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
- # Add parsing method to signature
- Signature.update(method)
+ grammar.append((file,dline,prodname,syms))
+ except SyntaxError:
+ raise
+ except Exception:
+ raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip()))
- # If a "module" parameter was supplied, extract its dictionary.
- # Note: a module may in fact be an instance as well.
+ return grammar
- if module:
- # User supplied a module object.
- if isinstance(module, types.ModuleType):
- ldict = module.__dict__
- elif isinstance(module, _INSTANCETYPE):
- _items = [(k,getattr(module,k)) for k in dir(module)]
- ldict = { }
- for i in _items:
- ldict[i[0]] = i[1]
+# -----------------------------------------------------------------------------
+# ParserReflect()
+#
+# This class represents information extracted for building a parser including
+# start symbol, error function, tokens, precedence list, action functions,
+# etc.
+# -----------------------------------------------------------------------------
+class ParserReflect(object):
+ def __init__(self,pdict,log=None):
+ self.pdict = pdict
+ self.start = None
+ self.error_func = None
+ self.tokens = None
+ self.files = {}
+ self.grammar = []
+ self.error = 0
+
+ if log is None:
+ self.log = PlyLogger(sys.stderr)
else:
- raise ValueError,"Expected a module"
-
- else:
- # No module given. We might be able to get information from the caller.
- # Throw an exception and unwind the traceback to get the globals
-
+ self.log = log
+
+ # Get all of the basic information
+ def get_all(self):
+ self.get_start()
+ self.get_error_func()
+ self.get_tokens()
+ self.get_precedence()
+ self.get_pfunctions()
+
+ # Validate all of the information
+ def validate_all(self):
+ self.validate_start()
+ self.validate_error_func()
+ self.validate_tokens()
+ self.validate_precedence()
+ self.validate_pfunctions()
+ self.validate_files()
+ return self.error
+
+ # Compute a signature over the grammar
+ def signature(self):
try:
- raise RuntimeError
- except RuntimeError:
- e,b,t = sys.exc_info()
- f = t.tb_frame
- f = f.f_back # Walk out to our calling function
- ldict = f.f_globals # Grab its globals dictionary
-
- # Add starting symbol to signature
- if not start:
- start = ldict.get("start",None)
- if start:
- Signature.update(start)
-
- # If running in optimized mode. We're going to
-
- if (optimize and lr_read_tables(tabmodule,1)):
- # Read parse table
- del Productions[:]
- for p in _lr_productions:
- if not p:
- Productions.append(None)
- else:
- m = MiniProduction()
- m.name = p[0]
- m.len = p[1]
- m.file = p[3]
- m.line = p[4]
- if p[2]:
- m.func = ldict[p[2]]
- Productions.append(m)
-
- else:
- # Get the tokens map
- if (module and isinstance(module,_INSTANCETYPE)):
- tokens = getattr(module,"tokens",None)
- else:
- tokens = ldict.get("tokens",None)
-
- if not tokens:
- raise YaccError,"module does not define a list 'tokens'"
- if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
- raise YaccError,"tokens must be a list or tuple."
+ from hashlib import md5
+ except ImportError:
+ from md5 import md5
+ try:
+ sig = md5()
+ if self.start:
+ sig.update(self.start.encode('latin-1'))
+ if self.prec:
+ sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1'))
+ if self.tokens:
+ sig.update(" ".join(self.tokens).encode('latin-1'))
+ for f in self.pfuncs:
+ if f[3]:
+ sig.update(f[3].encode('latin-1'))
+ except (TypeError,ValueError):
+ pass
+ return sig.digest()
+
+ # -----------------------------------------------------------------------------
+ # validate_file()
+ #
+ # This method checks to see if there are duplicated p_rulename() functions
+ # in the parser module file. Without this function, it is really easy for
+ # users to make mistakes by cutting and pasting code fragments (and it's a real
+ # bugger to try and figure out why the resulting parser doesn't work). Therefore,
+ # we just do a little regular expression pattern matching of def statements
+ # to try and detect duplicates.
+ # -----------------------------------------------------------------------------
+
+ def validate_files(self):
+ # Match def p_funcname(
+ fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
+
+ for filename in self.files.keys():
+ base,ext = os.path.splitext(filename)
+ if ext != '.py': return 1 # No idea. Assume it's okay.
- # Check to see if a requires dictionary is defined.
- requires = ldict.get("require",None)
- if requires:
- if not (isinstance(requires,types.DictType)):
- raise YaccError,"require must be a dictionary."
+ try:
+ f = open(filename)
+ lines = f.readlines()
+ f.close()
+ except IOError:
+ continue
- for r,v in requires.items():
- try:
- if not (isinstance(v,types.ListType)):
- raise TypeError
- v1 = [x.split(".") for x in v]
- Requires[r] = v1
- except StandardError:
- print >>sys.stderr, "Invalid specification for rule '%s' in require. Expected a list of strings" % r
-
-
- # Build the dictionary of terminals. We a record a 0 in the
- # dictionary to track whether or not a terminal is actually
- # used in the grammar
-
- if 'error' in tokens:
- print >>sys.stderr, "yacc: Illegal token 'error'. Is a reserved word."
- raise YaccError,"Illegal token name"
-
- for n in tokens:
- if Terminals.has_key(n):
- print >>sys.stderr, "yacc: Warning. Token '%s' multiply defined." % n
- Terminals[n] = [ ]
-
- Terminals['error'] = [ ]
-
- # Get the precedence map (if any)
- prec = ldict.get("precedence",None)
- if prec:
- if not (isinstance(prec,types.ListType) or isinstance(prec,types.TupleType)):
- raise YaccError,"precedence must be a list or tuple."
- add_precedence(prec)
- Signature.update(repr(prec))
-
- for n in tokens:
- if not Precedence.has_key(n):
- Precedence[n] = ('right',0) # Default, right associative, 0 precedence
-
- # Look for error handler
- ef = ldict.get('p_error',None)
- if ef:
- if isinstance(ef,types.FunctionType):
+ counthash = { }
+ for linen,l in enumerate(lines):
+ linen += 1
+ m = fre.match(l)
+ if m:
+ name = m.group(1)
+ prev = counthash.get(name)
+ if not prev:
+ counthash[name] = linen
+ else:
+ self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev)
+
+ # Get the start symbol
+ def get_start(self):
+ self.start = self.pdict.get('start')
+
+ # Validate the start symbol
+ def validate_start(self):
+ if self.start is not None:
+ if not isinstance(self.start,str):
+ self.log.error("'start' must be a string")
+
+ # Look for error handler
+ def get_error_func(self):
+ self.error_func = self.pdict.get('p_error')
+
+ # Validate the error function
+ def validate_error_func(self):
+ if self.error_func:
+ if isinstance(self.error_func,types.FunctionType):
ismethod = 0
- elif isinstance(ef, types.MethodType):
+ elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
- raise YaccError,"'p_error' defined, but is not a function or method."
- eline = ef.func_code.co_firstlineno
- efile = ef.func_code.co_filename
- files[efile] = None
-
- if (ef.func_code.co_argcount != 1+ismethod):
- raise YaccError,"%s:%d: p_error() requires 1 argument." % (efile,eline)
- global Errorfunc
- Errorfunc = ef
- else:
- print >>sys.stderr, "yacc: Warning. no p_error() function is defined."
+ self.log.error("'p_error' defined, but is not a function or method")
+ self.error = 1
+ return
- # Get the list of built-in functions with p_ prefix
- symbols = [ldict[f] for f in ldict.keys()
- if (type(ldict[f]) in (types.FunctionType, types.MethodType) and ldict[f].__name__[:2] == 'p_'
- and ldict[f].__name__ != 'p_error')]
+ eline = func_code(self.error_func).co_firstlineno
+ efile = func_code(self.error_func).co_filename
+ self.files[efile] = 1
- # Check for non-empty symbols
- if len(symbols) == 0:
- raise YaccError,"no rules of the form p_rulename are defined."
+ if (func_code(self.error_func).co_argcount != 1+ismethod):
+ self.log.error("%s:%d: p_error() requires 1 argument",efile,eline)
+ self.error = 1
- # Sort the symbols by line number
- symbols.sort(lambda x,y: cmp(x.func_code.co_firstlineno,y.func_code.co_firstlineno))
+ # Get the tokens map
+ def get_tokens(self):
+ tokens = self.pdict.get("tokens",None)
+ if not tokens:
+ self.log.error("No token list is defined")
+ self.error = 1
+ return
+
+ if not isinstance(tokens,(list, tuple)):
+ self.log.error("tokens must be a list or tuple")
+ self.error = 1
+ return
+
+ if not tokens:
+ self.log.error("tokens is empty")
+ self.error = 1
+ return
+
+ self.tokens = tokens
+
+ # Validate the tokens
+ def validate_tokens(self):
+ # Validate the tokens.
+ if 'error' in self.tokens:
+ self.log.error("Illegal token name 'error'. Is a reserved word")
+ self.error = 1
+ return
+
+ terminals = {}
+ for n in self.tokens:
+ if n in terminals:
+ self.log.warning("Token '%s' multiply defined", n)
+ terminals[n] = 1
+
+ # Get the precedence map (if any)
+ def get_precedence(self):
+ self.prec = self.pdict.get("precedence",None)
+
+ # Validate and parse the precedence map
+ def validate_precedence(self):
+ preclist = []
+ if self.prec:
+ if not isinstance(self.prec,(list,tuple)):
+ self.log.error("precedence must be a list or tuple")
+ self.error = 1
+ return
+ for level,p in enumerate(self.prec):
+ if not isinstance(p,(list,tuple)):
+ self.log.error("Bad precedence table")
+ self.error = 1
+ return
- # Add all of the symbols to the grammar
- for f in symbols:
- if (add_function(f)) < 0:
- error += 1
+ if len(p) < 2:
+ self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p)
+ self.error = 1
+ return
+ assoc = p[0]
+ if not isinstance(assoc,str):
+ self.log.error("precedence associativity must be a string")
+ self.error = 1
+ return
+ for term in p[1:]:
+ if not isinstance(term,str):
+ self.log.error("precedence items must be strings")
+ self.error = 1
+ return
+ preclist.append((term,assoc,level+1))
+ self.preclist = preclist
+
+ # Get all p_functions from the grammar
+ def get_pfunctions(self):
+ p_functions = []
+ for name, item in self.pdict.items():
+ if name[:2] != 'p_': continue
+ if name == 'p_error': continue
+ if isinstance(item,(types.FunctionType,types.MethodType)):
+ line = func_code(item).co_firstlineno
+ file = func_code(item).co_filename
+ p_functions.append((line,file,name,item.__doc__))
+
+ # Sort all of the actions by line number
+ p_functions.sort()
+ self.pfuncs = p_functions
+
+
+ # Validate all of the p_functions
+ def validate_pfunctions(self):
+ grammar = []
+ # Check for non-empty symbols
+ if len(self.pfuncs) == 0:
+ self.log.error("no rules of the form p_rulename are defined")
+ self.error = 1
+ return
+
+ for line, file, name, doc in self.pfuncs:
+ func = self.pdict[name]
+ if isinstance(func, types.MethodType):
+ reqargs = 2
else:
- files[f.func_code.co_filename] = None
-
- # Make a signature of the docstrings
- for f in symbols:
- if f.__doc__:
- Signature.update(f.__doc__)
-
- lr_init_vars()
-
- if error:
- raise YaccError,"Unable to construct parser."
-
- if not lr_read_tables(tabmodule):
-
- # Validate files
- for filename in files.keys():
- if not validate_file(filename):
- error = 1
-
- # Validate dictionary
- validate_dict(ldict)
-
- if start and not Prodnames.has_key(start):
- raise YaccError,"Bad starting symbol '%s'" % start
+ reqargs = 1
+ if func_code(func).co_argcount > reqargs:
+ self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__)
+ self.error = 1
+ elif func_code(func).co_argcount < reqargs:
+ self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__)
+ self.error = 1
+ elif not func.__doc__:
+ self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__)
+ else:
+ try:
+ parsed_g = parse_grammar(doc,file,line)
+ for g in parsed_g:
+ grammar.append((name, g))
+ except SyntaxError:
+ e = sys.exc_info()[1]
+ self.log.error(str(e))
+ self.error = 1
+
+ # Looks like a valid grammar rule
+ # Mark the file in which defined.
+ self.files[file] = 1
+
+ # Secondary validation step that looks for p_ definitions that are not functions
+ # or functions that look like they might be grammar rules.
+
+ for n,v in self.pdict.items():
+ if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue
+ if n[0:2] == 't_': continue
+ if n[0:2] == 'p_' and n != 'p_error':
+ self.log.warning("'%s' not defined as a function", n)
+ if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or
+ (isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)):
+ try:
+ doc = v.__doc__.split(" ")
+ if doc[1] == ':':
+ self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix",
+ func_code(v).co_filename, func_code(v).co_firstlineno,n)
+ except Exception:
+ pass
- augment_grammar(start)
- error = verify_productions(cycle_check=check_recursion)
- otherfunc = [ldict[f] for f in ldict.keys()
- if (type(f) in (types.FunctionType,types.MethodType) and ldict[f].__name__[:2] != 'p_')]
+ self.grammar = grammar
- if error:
- raise YaccError,"Unable to construct parser."
+# -----------------------------------------------------------------------------
+# yacc(module)
+#
+# Build a parser
+# -----------------------------------------------------------------------------
- build_lritems()
- compute_first1()
- compute_follow(start)
+def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
+ check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='',
+ debuglog=None, errorlog = None, picklefile=None):
- if method in ['SLR','LALR']:
- lr_parse_table(method)
- else:
- raise YaccError, "Unknown parsing method '%s'" % method
+ global parse # Reference to the parsing method of the last built parser
- if write_tables:
- lr_write_tables(tabmodule,outputdir)
+ # If pickling is enabled, table files are not created
- if yaccdebug:
- try:
- f = open(os.path.join(outputdir,debugfile),"w")
- f.write(_vfc.getvalue())
- f.write("\n\n")
- f.write(_vf.getvalue())
- f.close()
- except IOError,e:
- print >>sys.stderr, "yacc: can't create '%s'" % debugfile,e
+ if picklefile:
+ write_tables = 0
- # Made it here. Create a parser object and set up its internal state.
- # Set global parse() method to bound method of parser object.
+ if errorlog is None:
+ errorlog = PlyLogger(sys.stderr)
- p = Parser("xyzzy")
- p.productions = Productions
- p.errorfunc = Errorfunc
- p.action = _lr_action
- p.goto = _lr_goto
- p.method = _lr_method
- p.require = Requires
+ # Get the module dictionary used for the parser
+ if module:
+ _items = [(k,getattr(module,k)) for k in dir(module)]
+ pdict = dict(_items)
+ else:
+ pdict = get_caller_module_dict(2)
- global parse
- parse = p.parse
+ # Collect parser information from the dictionary
+ pinfo = ParserReflect(pdict,log=errorlog)
+ pinfo.get_all()
- global parser
- parser = p
+ if pinfo.error:
+ raise YaccError("Unable to build parser")
- # Clean up all of the globals we created
- if (not optimize):
- yacc_cleanup()
- return p
+ # Check signature against table files (if any)
+ signature = pinfo.signature()
-# yacc_cleanup function. Delete all of the global variables
-# used during table construction
+ # Read the tables
+ try:
+ lr = LRTable()
+ if picklefile:
+ read_signature = lr.read_pickle(picklefile)
+ else:
+ read_signature = lr.read_table(tabmodule)
+ if optimize or (read_signature == signature):
+ try:
+ lr.bind_callables(pinfo.pdict)
+ parser = LRParser(lr,pinfo.error_func)
+ parse = parser.parse
+ return parser
+ except Exception:
+ e = sys.exc_info()[1]
+ errorlog.warning("There was a problem loading the table file: %s", repr(e))
+ except VersionError:
+ e = sys.exc_info()
+ errorlog.warning(str(e))
+ except Exception:
+ pass
+
+ if debuglog is None:
+ if debug:
+ debuglog = PlyLogger(open(debugfile,"w"))
+ else:
+ debuglog = NullLogger()
-def yacc_cleanup():
- global _lr_action, _lr_goto, _lr_method, _lr_goto_cache
- del _lr_action, _lr_goto, _lr_method, _lr_goto_cache
+ debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__)
- global Productions, Prodnames, Prodmap, Terminals
- global Nonterminals, First, Follow, Precedence, LRitems
- global Errorfunc, Signature, Requires
- del Productions, Prodnames, Prodmap, Terminals
- del Nonterminals, First, Follow, Precedence, LRitems
- del Errorfunc, Signature, Requires
+ errors = 0
- global _vf, _vfc
- del _vf, _vfc
+ # Validate the parser information
+ if pinfo.validate_all():
+ raise YaccError("Unable to build parser")
+
+ if not pinfo.error_func:
+ errorlog.warning("no p_error() function is defined")
+ # Create a grammar object
+ grammar = Grammar(pinfo.tokens)
-# Stub that raises an error if parsing is attempted without first calling yacc()
-def parse(*args,**kwargs):
- raise YaccError, "yacc: No parser built with yacc()"
+ # Set precedence level for terminals
+ for term, assoc, level in pinfo.preclist:
+ try:
+ grammar.set_precedence(term,assoc,level)
+ except GrammarError:
+ e = sys.exc_info()[1]
+ errorlog.warning("%s",str(e))
+
+ # Add productions to the grammar
+ for funcname, gram in pinfo.grammar:
+ file, line, prodname, syms = gram
+ try:
+ grammar.add_production(prodname,syms,funcname,file,line)
+ except GrammarError:
+ e = sys.exc_info()[1]
+ errorlog.error("%s",str(e))
+ errors = 1
+ # Set the grammar start symbols
+ try:
+ if start is None:
+ grammar.set_start(pinfo.start)
+ else:
+ grammar.set_start(start)
+ except GrammarError:
+ e = sys.exc_info()[1]
+ errorlog.error(str(e))
+ errors = 1
+
+ if errors:
+ raise YaccError("Unable to build parser")
+
+ # Verify the grammar structure
+ undefined_symbols = grammar.undefined_symbols()
+ for sym, prod in undefined_symbols:
+ errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym)
+ errors = 1
+
+ unused_terminals = grammar.unused_terminals()
+ if unused_terminals:
+ debuglog.info("")
+ debuglog.info("Unused terminals:")
+ debuglog.info("")
+ for term in unused_terminals:
+ errorlog.warning("Token '%s' defined, but not used", term)
+ debuglog.info(" %s", term)
+
+ # Print out all productions to the debug log
+ if debug:
+ debuglog.info("")
+ debuglog.info("Grammar")
+ debuglog.info("")
+ for n,p in enumerate(grammar.Productions):
+ debuglog.info("Rule %-5d %s", n, p)
+
+ # Find unused non-terminals
+ unused_rules = grammar.unused_rules()
+ for prod in unused_rules:
+ errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name)
+
+ if len(unused_terminals) == 1:
+ errorlog.warning("There is 1 unused token")
+ if len(unused_terminals) > 1:
+ errorlog.warning("There are %d unused tokens", len(unused_terminals))
+
+ if len(unused_rules) == 1:
+ errorlog.warning("There is 1 unused rule")
+ if len(unused_rules) > 1:
+ errorlog.warning("There are %d unused rules", len(unused_rules))
+
+ if debug:
+ debuglog.info("")
+ debuglog.info("Terminals, with rules where they appear")
+ debuglog.info("")
+ terms = list(grammar.Terminals)
+ terms.sort()
+ for term in terms:
+ debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]]))
+
+ debuglog.info("")
+ debuglog.info("Nonterminals, with rules where they appear")
+ debuglog.info("")
+ nonterms = list(grammar.Nonterminals)
+ nonterms.sort()
+ for nonterm in nonterms:
+ debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]]))
+ debuglog.info("")
+
+ if check_recursion:
+ unreachable = grammar.find_unreachable()
+ for u in unreachable:
+ errorlog.warning("Symbol '%s' is unreachable",u)
+
+ infinite = grammar.infinite_cycles()
+ for inf in infinite:
+ errorlog.error("Infinite recursion detected for symbol '%s'", inf)
+ errors = 1
+
+ unused_prec = grammar.unused_precedence()
+ for term, assoc in unused_prec:
+ errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term)
+ errors = 1
+
+ if errors:
+ raise YaccError("Unable to build parser")
+
+ # Run the LRGeneratedTable on the grammar
+ if debug:
+ errorlog.debug("Generating %s tables", method)
+
+ lr = LRGeneratedTable(grammar,method,debuglog)
+
+ if debug:
+ num_sr = len(lr.sr_conflicts)
+
+ # Report shift/reduce and reduce/reduce conflicts
+ if num_sr == 1:
+ errorlog.warning("1 shift/reduce conflict")
+ elif num_sr > 1:
+ errorlog.warning("%d shift/reduce conflicts", num_sr)
+
+ num_rr = len(lr.rr_conflicts)
+ if num_rr == 1:
+ errorlog.warning("1 reduce/reduce conflict")
+ elif num_rr > 1:
+ errorlog.warning("%d reduce/reduce conflicts", num_rr)
+
+ # Write out conflicts to the output file
+ if debug and (lr.sr_conflicts or lr.rr_conflicts):
+ debuglog.warning("")
+ debuglog.warning("Conflicts:")
+ debuglog.warning("")
+
+ for state, tok, resolution in lr.sr_conflicts:
+ debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution)
+
+ already_reported = {}
+ for state, rule, rejected in lr.rr_conflicts:
+ if (state,id(rule),id(rejected)) in already_reported:
+ continue
+ debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
+ debuglog.warning("rejected rule (%s) in state %d", rejected,state)
+ errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
+ errorlog.warning("rejected rule (%s) in state %d", rejected, state)
+ already_reported[state,id(rule),id(rejected)] = 1
+
+ warned_never = []
+ for state, rule, rejected in lr.rr_conflicts:
+ if not rejected.reduced and (rejected not in warned_never):
+ debuglog.warning("Rule (%s) is never reduced", rejected)
+ errorlog.warning("Rule (%s) is never reduced", rejected)
+ warned_never.append(rejected)
+
+ # Write the table file if requested
+ if write_tables:
+ lr.write_table(tabmodule,outputdir,signature)
+
+ # Write a pickled version of the tables
+ if picklefile:
+ lr.pickle_table(picklefile,signature)
+
+ # Build the parser
+ lr.bind_callables(pinfo.pdict)
+ parser = LRParser(lr,pinfo.error_func)
+
+ parse = parser.parse
+ return parser