summaryrefslogtreecommitdiff
path: root/BaseTools/Source/Python/UPT/Library
diff options
context:
space:
mode:
authorlgao4 <lgao4@6f19259b-4bc3-4df7-8a09-765794883524>2011-08-26 07:46:26 +0000
committerlgao4 <lgao4@6f19259b-4bc3-4df7-8a09-765794883524>2011-08-26 07:46:26 +0000
commit4234283c3acb8c35014acc1546621fbc2621b095 (patch)
tree208a4f87b2820ec1f3a414508ca1c215c5deed18 /BaseTools/Source/Python/UPT/Library
parentba944801a988dddf3ed217c72c8d880d0f03d150 (diff)
downloadedk2-platforms-4234283c3acb8c35014acc1546621fbc2621b095.tar.xz
Sync BaseTools Branch (version r2271) to EDKII main trunk.
BaseTool Branch: https://edk2-buildtools.svn.sourceforge.net/svnroot/edk2-buildtools/branches/Releases/BaseTools_r2100 Signed-off-by: lgao4 Reviewed-by: hchen30 git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@12214 6f19259b-4bc3-4df7-8a09-765794883524
Diffstat (limited to 'BaseTools/Source/Python/UPT/Library')
-rw-r--r--BaseTools/Source/Python/UPT/Library/CommentGenerating.py217
-rw-r--r--BaseTools/Source/Python/UPT/Library/CommentParsing.py451
-rw-r--r--BaseTools/Source/Python/UPT/Library/DataType.py919
-rw-r--r--BaseTools/Source/Python/UPT/Library/ExpressionValidate.py489
-rw-r--r--BaseTools/Source/Python/UPT/Library/GlobalData.py94
-rw-r--r--BaseTools/Source/Python/UPT/Library/Misc.py921
-rw-r--r--BaseTools/Source/Python/UPT/Library/ParserValidate.py717
-rw-r--r--BaseTools/Source/Python/UPT/Library/Parsing.py993
-rw-r--r--BaseTools/Source/Python/UPT/Library/String.py968
-rw-r--r--BaseTools/Source/Python/UPT/Library/Xml/XmlRoutines.py228
-rw-r--r--BaseTools/Source/Python/UPT/Library/Xml/__init__.py20
-rw-r--r--BaseTools/Source/Python/UPT/Library/__init__.py20
12 files changed, 6037 insertions, 0 deletions
diff --git a/BaseTools/Source/Python/UPT/Library/CommentGenerating.py b/BaseTools/Source/Python/UPT/Library/CommentGenerating.py
new file mode 100644
index 0000000000..06da61b3e9
--- /dev/null
+++ b/BaseTools/Source/Python/UPT/Library/CommentGenerating.py
@@ -0,0 +1,217 @@
+## @file
+# This file is used to define comment generating interface
+#
+# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials are licensed and made available
+# under the terms and conditions of the BSD License which accompanies this
+# distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+
+'''
+CommentGenerating
+'''
+
+##
+# Import Modules
+#
+from Library.String import GetSplitValueList
+from Library.DataType import TAB_SPACE_SPLIT
+from Library.DataType import TAB_INF_GUIDTYPE_VAR
+from Library.DataType import USAGE_ITEM_NOTIFY
+from Library.DataType import ITEM_UNDEFINED
+from Library.DataType import LANGUAGE_EN_US
+
+## GenTailCommentLines
+#
+# @param TailCommentLines: the tail comment lines that need to be generated
+# @param LeadingSpaceNum: the number of leading space needed for non-first
+# line tail comment
+#
+def GenTailCommentLines (TailCommentLines, LeadingSpaceNum = 0):
+ EndOfLine = "\n"
+ TailCommentLines = TailCommentLines.rstrip(EndOfLine)
+ CommentStr = " ## " + (EndOfLine + LeadingSpaceNum * TAB_SPACE_SPLIT + \
+ " ## ").join(GetSplitValueList(TailCommentLines, \
+ EndOfLine))
+ return CommentStr
+
+## GenGenericComment
+#
+# @param CommentLines: Generic comment Text, maybe Multiple Lines
+#
+def GenGenericComment (CommentLines):
+ if not CommentLines:
+ return ''
+ EndOfLine = "\n"
+ CommentLines = CommentLines.rstrip(EndOfLine)
+ CommentStr = '## ' + (EndOfLine + '# ').join\
+ (GetSplitValueList(CommentLines, EndOfLine)) + EndOfLine
+ return CommentStr
+
+## GenGenericCommentF
+#
+# similar to GenGenericComment but will remove <EOL> at end of comment once,
+# and for line with only <EOL>, '#\n' will be generated instead of '# \n'
+#
+# @param CommentLines: Generic comment Text, maybe Multiple Lines
+# @return CommentStr: Generated comment line
+#
+def GenGenericCommentF (CommentLines, NumOfPound=1):
+ if not CommentLines:
+ return ''
+ EndOfLine = "\n"
+ #
+ # if comment end with '\n', then remove it to prevent one extra line
+ # generate later on
+ #
+ if CommentLines.endswith(EndOfLine):
+ CommentLines = CommentLines[:-1]
+ CommentLineList = GetSplitValueList(CommentLines, EndOfLine)
+ CommentStr = ''
+ for Line in CommentLineList:
+ if Line == '':
+ CommentStr += '#' * NumOfPound + '\n'
+ else:
+ CommentStr += '#' * NumOfPound + ' ' + Line + '\n'
+
+ return CommentStr
+
+
+## GenHeaderCommentSection
+#
+# Generate Header comment sections
+#
+# @param Abstract One line of abstract
+# @param Description multiple lines of Description
+# @param Copyright possible multiple copyright lines
+# @param License possible multiple license lines
+#
+def GenHeaderCommentSection(Abstract, Description, Copyright, License):
+ EndOfLine = '\n'
+ Content = ''
+
+ Content += '## @file' + EndOfLine
+ if Abstract:
+ Abstract = Abstract.rstrip(EndOfLine)
+ Content += '# ' + Abstract + EndOfLine
+ Content += '#' + EndOfLine
+ else:
+ Content += '#' + EndOfLine
+
+ if Description:
+ Description = Description.rstrip(EndOfLine)
+ Content += '# ' + (EndOfLine + '# ').join(GetSplitValueList\
+ (Description, '\n'))
+ Content += EndOfLine + '#' + EndOfLine
+
+ #
+ # There is no '#\n' line to separate multiple copyright lines in code base
+ #
+ if Copyright:
+ Copyright = Copyright.rstrip(EndOfLine)
+ Content += '# ' + (EndOfLine + '# ').join\
+ (GetSplitValueList(Copyright, '\n'))
+ Content += EndOfLine + '#' + EndOfLine
+
+ if License:
+ License = License.rstrip(EndOfLine)
+ Content += '# ' + (EndOfLine + '# ').join(GetSplitValueList\
+ (License, '\n'))
+ Content += EndOfLine + '#' + EndOfLine
+
+ Content += '##' + EndOfLine
+
+ return Content
+
+
+## GenInfPcdTailComment
+# Generate Pcd tail comment for Inf, this would be one line comment
+#
+# @param Usage: Usage type
+# @param TailCommentText: Comment text for tail comment
+#
+def GenInfPcdTailComment (Usage, TailCommentText):
+ if (Usage == ITEM_UNDEFINED) and (not TailCommentText):
+ return ''
+
+ CommentLine = TAB_SPACE_SPLIT.join([Usage, TailCommentText])
+ return GenTailCommentLines(CommentLine)
+
+## GenInfProtocolPPITailComment
+# Generate Protocol/PPI tail comment for Inf
+#
+# @param Usage: Usage type
+# @param TailCommentText: Comment text for tail comment
+#
+def GenInfProtocolPPITailComment (Usage, Notify, TailCommentText):
+ if (not Notify) and (Usage == ITEM_UNDEFINED) and (not TailCommentText):
+ return ''
+
+ if Notify:
+ CommentLine = USAGE_ITEM_NOTIFY + " ## "
+ else:
+ CommentLine = ''
+
+ CommentLine += TAB_SPACE_SPLIT.join([Usage, TailCommentText])
+ return GenTailCommentLines(CommentLine)
+
+## GenInfGuidTailComment
+# Generate Guid tail comment for Inf
+#
+# @param Usage: Usage type
+# @param TailCommentText: Comment text for tail comment
+#
+def GenInfGuidTailComment (Usage, GuidTypeList, VariableName, TailCommentText):
+ GuidType = GuidTypeList[0]
+ if (Usage == ITEM_UNDEFINED) and (GuidType == ITEM_UNDEFINED) and \
+ (not TailCommentText):
+ return ''
+
+ FirstLine = Usage + " ## " + GuidType
+ if GuidType == TAB_INF_GUIDTYPE_VAR:
+ FirstLine += ":" + VariableName
+
+ CommentLine = TAB_SPACE_SPLIT.join([FirstLine, TailCommentText])
+ return GenTailCommentLines(CommentLine)
+
+## GenDecGuidTailComment
+#
+# @param SupModuleList: Supported module type list
+#
+def GenDecTailComment (SupModuleList):
+ CommentLine = TAB_SPACE_SPLIT.join(SupModuleList)
+ return GenTailCommentLines(CommentLine)
+
+
+## _GetHelpStr
+# get HelpString from a list of HelpTextObject, the priority refer to
+# related HLD
+#
+# @param HelpTextObjList: List of HelpTextObject
+#
+# @return HelpStr: the help text string found, '' means no help text found
+#
+def _GetHelpStr(HelpTextObjList):
+ HelpStr = ''
+
+ for HelpObj in HelpTextObjList:
+ if HelpObj and HelpObj.GetLang() == LANGUAGE_EN_US:
+ HelpStr = HelpObj.GetString()
+ return HelpStr
+
+ for HelpObj in HelpTextObjList:
+ if HelpObj and HelpObj.GetLang().startswith('en'):
+ HelpStr = HelpObj.GetString()
+ return HelpStr
+
+ for HelpObj in HelpTextObjList:
+ if HelpObj and not HelpObj.GetLang():
+ HelpStr = HelpObj.GetString()
+ return HelpStr
+
+ return HelpStr \ No newline at end of file
diff --git a/BaseTools/Source/Python/UPT/Library/CommentParsing.py b/BaseTools/Source/Python/UPT/Library/CommentParsing.py
new file mode 100644
index 0000000000..5c07f34a74
--- /dev/null
+++ b/BaseTools/Source/Python/UPT/Library/CommentParsing.py
@@ -0,0 +1,451 @@
+## @file
+# This file is used to define comment parsing interface
+#
+# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials are licensed and made available
+# under the terms and conditions of the BSD License which accompanies this
+# distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+
+'''
+CommentParsing
+'''
+
+##
+# Import Modules
+#
+import re
+
+from Library.String import GetSplitValueList
+from Library.String import CleanString2
+from Library.DataType import HEADER_COMMENT_NOT_STARTED
+from Library.DataType import TAB_COMMENT_SPLIT
+from Library.DataType import HEADER_COMMENT_LICENSE
+from Library.DataType import HEADER_COMMENT_ABSTRACT
+from Library.DataType import HEADER_COMMENT_COPYRIGHT
+from Library.DataType import HEADER_COMMENT_DESCRIPTION
+from Library.DataType import TAB_SPACE_SPLIT
+from Library.DataType import TAB_COMMA_SPLIT
+from Library.DataType import SUP_MODULE_LIST
+from Object.POM.CommonObject import TextObject
+from Object.POM.CommonObject import PcdErrorObject
+import Logger.Log as Logger
+from Logger.ToolError import FORMAT_INVALID
+from Logger.ToolError import FORMAT_NOT_SUPPORTED
+from Logger import StringTable as ST
+
+## ParseHeaderCommentSection
+#
+# Parse Header comment section lines, extract Abstract, Description, Copyright
+# , License lines
+#
+# @param CommentList: List of (Comment, LineNumber)
+# @param FileName: FileName of the comment
+#
+def ParseHeaderCommentSection(CommentList, FileName = None):
+ Abstract = ''
+ Description = ''
+ Copyright = ''
+ License = ''
+ EndOfLine = "\n"
+ STR_HEADER_COMMENT_START = "@file"
+ HeaderCommentStage = HEADER_COMMENT_NOT_STARTED
+
+ #
+ # first find the last copyright line
+ #
+ Last = 0
+ for Index in xrange(len(CommentList)-1, 0, -1):
+ Line = CommentList[Index][0]
+ if _IsCopyrightLine(Line):
+ Last = Index
+ break
+
+ for Item in CommentList:
+ Line = Item[0]
+ LineNo = Item[1]
+
+ if not Line.startswith(TAB_COMMENT_SPLIT) and Line:
+ Logger.Error("\nUPT", FORMAT_INVALID, ST.ERR_INVALID_COMMENT_FORMAT, FileName, Item[1])
+ Comment = CleanString2(Line)[1]
+ Comment = Comment.strip()
+ #
+ # if there are blank lines between License or Description, keep them as they would be
+ # indication of different block; or in the position that Abstract should be, also keep it
+ # as it indicates that no abstract
+ #
+ if not Comment and HeaderCommentStage not in [HEADER_COMMENT_LICENSE, \
+ HEADER_COMMENT_DESCRIPTION, HEADER_COMMENT_ABSTRACT]:
+ continue
+
+ if HeaderCommentStage == HEADER_COMMENT_NOT_STARTED:
+ if Comment.startswith(STR_HEADER_COMMENT_START):
+ HeaderCommentStage = HEADER_COMMENT_ABSTRACT
+ else:
+ License += Comment + EndOfLine
+ else:
+ if HeaderCommentStage == HEADER_COMMENT_ABSTRACT:
+ #
+ # in case there is no abstract and description
+ #
+ if not Comment:
+ Abstract = ''
+ HeaderCommentStage = HEADER_COMMENT_DESCRIPTION
+ elif _IsCopyrightLine(Comment):
+ Result, ErrMsg = _ValidateCopyright(Comment)
+ ValidateCopyright(Result, ST.WRN_INVALID_COPYRIGHT, FileName, LineNo, ErrMsg)
+ Copyright += Comment + EndOfLine
+ HeaderCommentStage = HEADER_COMMENT_COPYRIGHT
+ else:
+ Abstract += Comment + EndOfLine
+ HeaderCommentStage = HEADER_COMMENT_DESCRIPTION
+ elif HeaderCommentStage == HEADER_COMMENT_DESCRIPTION:
+ #
+ # in case there is no description
+ #
+ if _IsCopyrightLine(Comment):
+ Result, ErrMsg = _ValidateCopyright(Comment)
+ ValidateCopyright(Result, ST.WRN_INVALID_COPYRIGHT, FileName, LineNo, ErrMsg)
+ Copyright += Comment + EndOfLine
+ HeaderCommentStage = HEADER_COMMENT_COPYRIGHT
+ else:
+ Description += Comment + EndOfLine
+ elif HeaderCommentStage == HEADER_COMMENT_COPYRIGHT:
+ if _IsCopyrightLine(Comment):
+ Result, ErrMsg = _ValidateCopyright(Comment)
+ ValidateCopyright(Result, ST.WRN_INVALID_COPYRIGHT, FileName, LineNo, ErrMsg)
+ Copyright += Comment + EndOfLine
+ else:
+ #
+ # Contents after copyright line are license, those non-copyright lines in between
+ # copyright line will be discarded
+ #
+ if LineNo > Last:
+ if License:
+ License += EndOfLine
+ License += Comment + EndOfLine
+ HeaderCommentStage = HEADER_COMMENT_LICENSE
+ else:
+ if not Comment and not License:
+ continue
+ License += Comment + EndOfLine
+
+ if not Copyright:
+ Logger.Error("\nUPT", FORMAT_INVALID, ST.ERR_COPYRIGHT_MISSING, \
+ FileName)
+
+ if not License:
+ Logger.Error("\nUPT", FORMAT_INVALID, ST.ERR_LICENSE_MISSING, FileName)
+
+ return Abstract.strip(), Description.strip(), Copyright.strip(), License.strip()
+
+## _IsCopyrightLine
+# check whether current line is copyright line, the criteria is whether there is case insensitive keyword "Copyright"
+# followed by zero or more white space characters followed by a "(" character
+#
+# @param LineContent: the line need to be checked
+# @return: True if current line is copyright line, False else
+#
+def _IsCopyrightLine (LineContent):
+ LineContent = LineContent.upper()
+ Result = False
+
+ ReIsCopyrightRe = re.compile(r"""(^|\s)COPYRIGHT *\(""", re.DOTALL)
+ if ReIsCopyrightRe.search(LineContent):
+ Result = True
+
+ return Result
+
+## ParseGenericComment
+#
+# @param GenericComment: Generic comment list, element of
+# (CommentLine, LineNum)
+# @param ContainerFile: Input value for filename of Dec file
+#
+def ParseGenericComment (GenericComment, ContainerFile=None, SkipTag=None):
+ if ContainerFile:
+ pass
+ HelpTxt = None
+ HelpStr = ''
+
+ for Item in GenericComment:
+ CommentLine = Item[0]
+ Comment = CleanString2(CommentLine)[1]
+ if SkipTag is not None and Comment.startswith(SkipTag):
+ Comment = Comment.replace(SkipTag, '', 1)
+ HelpStr += Comment + '\n'
+
+ if HelpStr:
+ HelpTxt = TextObject()
+ if HelpStr.endswith('\n') and not HelpStr.endswith('\n\n') and HelpStr != '\n':
+ HelpStr = HelpStr[:-1]
+ HelpTxt.SetString(HelpStr)
+
+ return HelpTxt
+
+
+## ParseDecPcdGenericComment
+#
+# @param GenericComment: Generic comment list, element of (CommentLine,
+# LineNum)
+# @param ContainerFile: Input value for filename of Dec file
+#
+def ParseDecPcdGenericComment (GenericComment, ContainerFile):
+ HelpStr = ''
+ PcdErr = None
+
+ for (CommentLine, LineNum) in GenericComment:
+ Comment = CleanString2(CommentLine)[1]
+ if Comment.startswith("@ValidRange"):
+ if PcdErr:
+ Logger.Error('Parser',
+ FORMAT_NOT_SUPPORTED,
+ ST.WRN_MULTI_PCD_RANGES,
+ File = ContainerFile,
+ Line = LineNum)
+ ValidRange = Comment.replace("@ValidRange", "", 1)
+ if _CheckRangeExpression(ValidRange):
+ PcdErr = PcdErrorObject()
+ PcdErr.SetValidValueRange(ValidRange)
+ elif Comment.startswith("@ValidList"):
+ if PcdErr:
+ Logger.Error('Parser',
+ FORMAT_NOT_SUPPORTED,
+ ST.WRN_MULTI_PCD_RANGES,
+ File = ContainerFile,
+ Line = LineNum)
+ ValidValue = Comment.replace("@ValidList", "", 1).replace(TAB_COMMA_SPLIT, TAB_SPACE_SPLIT)
+ PcdErr = PcdErrorObject()
+ PcdErr.SetValidValue(ValidValue)
+ elif Comment.startswith("@Expression"):
+ if PcdErr:
+ Logger.Error('Parser',
+ FORMAT_NOT_SUPPORTED,
+ ST.WRN_MULTI_PCD_RANGES,
+ File = ContainerFile,
+ Line = LineNum)
+ Expression = Comment.replace("@Expression", "", 1)
+ if _CheckRangeExpression(Expression):
+ PcdErr = PcdErrorObject()
+ PcdErr.SetExpression(Expression)
+ else:
+ HelpStr += Comment + '\n'
+
+ #
+ # remove the last EOL if the comment is of format 'FOO\n'
+ #
+ if HelpStr.endswith('\n'):
+ if HelpStr != '\n' and not HelpStr.endswith('\n\n'):
+ HelpStr = HelpStr[:-1]
+
+ return HelpStr, PcdErr
+
+## ParseDecPcdTailComment
+#
+# @param TailCommentList: Tail comment list of Pcd, item of format (Comment, LineNum)
+# @param ContainerFile: Input value for filename of Dec file
+# @retVal SupModuleList: The supported module type list detected
+# @retVal HelpStr: The generic help text string detected
+#
+def ParseDecPcdTailComment (TailCommentList, ContainerFile):
+ assert(len(TailCommentList) == 1)
+ TailComment = TailCommentList[0][0]
+ LineNum = TailCommentList[0][1]
+
+ Comment = TailComment.lstrip(" #")
+
+ ReFindFirstWordRe = re.compile(r"""^([^ #]*)""", re.DOTALL)
+
+ #
+ # get first word and compare with SUP_MODULE_LIST
+ #
+ MatchObject = ReFindFirstWordRe.match(Comment)
+ if not (MatchObject and MatchObject.group(1) in SUP_MODULE_LIST):
+ return None, Comment
+
+ #
+ # parse line, it must have supported module type specified
+ #
+ if Comment.find(TAB_COMMENT_SPLIT) == -1:
+ Comment += TAB_COMMENT_SPLIT
+ SupMode, HelpStr = GetSplitValueList(Comment, TAB_COMMENT_SPLIT, 1)
+ SupModuleList = []
+ for Mod in GetSplitValueList(SupMode, TAB_SPACE_SPLIT):
+ if not Mod:
+ continue
+ elif Mod not in SUP_MODULE_LIST:
+ Logger.Error("UPT",
+ FORMAT_INVALID,
+ ST.WRN_INVALID_MODULE_TYPE%Mod,
+ ContainerFile,
+ LineNum)
+ else:
+ SupModuleList.append(Mod)
+
+ return SupModuleList, HelpStr
+
+
+## _CheckRangeExpression
+#
+# @param Expression: Pcd range expression
+#
+def _CheckRangeExpression(Expression):
+ #
+ # check grammar for Pcd range expression is not required yet
+ #
+ if Expression:
+ pass
+ return True
+
+## ValidateCopyright
+#
+#
+#
+def ValidateCopyright(Result, ErrType, FileName, LineNo, ErrMsg):
+ if not Result:
+ Logger.Warn("\nUPT", ErrType, FileName, LineNo, ErrMsg)
+
+## _ValidateCopyright
+#
+# @param Line: Line that contains copyright information, # stripped
+#
+# @retval Result: True if line is conformed to Spec format, False else
+# @retval ErrMsg: the detailed error description
+#
+def _ValidateCopyright(Line):
+ if Line:
+ pass
+ Result = True
+ ErrMsg = ''
+
+ return Result, ErrMsg
+
+def GenerateTokenList (Comment):
+ #
+ # Tokenize Comment using '#' and ' ' as token seperators
+ #
+ RelplacedComment = None
+ while Comment != RelplacedComment:
+ RelplacedComment = Comment
+ Comment = Comment.replace('##', '#').replace(' ', ' ').replace(' ', '#').strip('# ')
+ return Comment.split('#')
+
+
+#
+# Comment - Comment to parse
+# TypeTokens - A dictionary of type token synonyms
+# RemoveTokens - A list of tokens to remove from help text
+# ParseVariable - True for parsing [Guids]. Otherwise False
+#
+def ParseComment (Comment, UsageTokens, TypeTokens, RemoveTokens, ParseVariable):
+ #
+ # Initialize return values
+ #
+ Usage = None
+ Type = None
+ String = None
+ HelpText = None
+
+ Comment = Comment[0]
+
+ NumTokens = 2
+ if ParseVariable:
+ #
+ # Remove white space around first instance of ':' from Comment if 'Variable'
+ # is in front of ':' and Variable is the 1st or 2nd token in Comment.
+ #
+ List = Comment.split(':', 1)
+ if len(List) > 1:
+ SubList = GenerateTokenList (List[0].strip())
+ if len(SubList) in [1, 2] and SubList[-1] == 'Variable':
+ if List[1].strip().find('L"') == 0:
+ Comment = List[0].strip() + ':' + List[1].strip()
+
+ #
+ # Remove first instance of L"<VariableName> from Comment and put into String
+ # if and only if L"<VariableName>" is the 1st token, the 2nd token. Or
+ # L"<VariableName>" is the third token immediately following 'Variable:'.
+ #
+ End = -1
+ Start = Comment.find('Variable:L"')
+ if Start >= 0:
+ String = Comment[Start + 9:]
+ End = String[2:].find('"')
+ else:
+ Start = Comment.find('L"')
+ if Start >= 0:
+ String = Comment[Start:]
+ End = String[2:].find('"')
+ if End >= 0:
+ SubList = GenerateTokenList (Comment[:Start])
+ if len(SubList) < 2:
+ Comment = Comment[:Start] + String[End + 3:]
+ String = String[:End + 3]
+ Type = 'Variable'
+ NumTokens = 1
+
+ #
+ # Initialze HelpText to Comment.
+ # Content will be remove from HelpText as matching tokens are found
+ #
+ HelpText = Comment
+
+ #
+ # Tokenize Comment using '#' and ' ' as token seperators
+ #
+ List = GenerateTokenList (Comment)
+
+ #
+ # Search first two tokens for Usage and Type and remove any matching tokens
+ # from HelpText
+ #
+ for Token in List[0:NumTokens]:
+ if Usage == None and Token in UsageTokens:
+ Usage = UsageTokens[Token]
+ HelpText = HelpText.replace(Token, '')
+ if Usage != None or not ParseVariable:
+ for Token in List[0:NumTokens]:
+ if Type == None and Token in TypeTokens:
+ Type = TypeTokens[Token]
+ HelpText = HelpText.replace(Token, '')
+ if Usage != None:
+ for Token in List[0:NumTokens]:
+ if Token in RemoveTokens:
+ HelpText = HelpText.replace(Token, '')
+
+ #
+ # If no Usage token is present and set Usage to UNDEFINED
+ #
+ if Usage == None:
+ Usage = 'UNDEFINED'
+
+ #
+ # If no Type token is present and set Type to UNDEFINED
+ #
+ if Type == None:
+ Type = 'UNDEFINED'
+
+ #
+ # If Type is not 'Variable:', then set String to None
+ #
+ if Type != 'Variable':
+ String = None
+
+ #
+ # Strip ' ' and '#' from the beginning of HelpText
+ # If HelpText is an empty string after all parsing is
+ # complete then set HelpText to None
+ #
+ HelpText = HelpText.lstrip('# ')
+ if HelpText == '':
+ HelpText = None
+
+ #
+ # Return parsing results
+ #
+ return Usage, Type, String, HelpText
diff --git a/BaseTools/Source/Python/UPT/Library/DataType.py b/BaseTools/Source/Python/UPT/Library/DataType.py
new file mode 100644
index 0000000000..da6b69d82b
--- /dev/null
+++ b/BaseTools/Source/Python/UPT/Library/DataType.py
@@ -0,0 +1,919 @@
+## @file
+# This file is used to define class for data type structure
+#
+# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials are licensed and made available
+# under the terms and conditions of the BSD License which accompanies this
+# distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+'''
+DataType
+'''
+
+##
+# Module List Items
+#
+MODULE_LIST = ["BASE",
+ "SEC",
+ "PEI_CORE",
+ "PEIM",
+ "DXE_CORE",
+ "DXE_DRIVER",
+ "SMM_CORE",
+ "DXE_RUNTIME_DRIVER",
+ "DXE_SAL_DRIVER",
+ "DXE_SMM_DRIVER",
+ "UEFI_DRIVER",
+ "UEFI_APPLICATION",
+ "USER_DEFINED"]
+
+VALID_DEPEX_MODULE_TYPE_LIST = ["PEIM",
+ "DXE_DRIVER",
+ "DXE_SMM_DRIVER",
+ "DXE_RUNTIME_DRIVER",
+ "DXE_SAL_DRIVER",
+ "UEFI_DRIVER",
+ ]
+##
+# Usage List Items
+#
+USAGE_LIST = ["CONSUMES",
+ "SOMETIMES_CONSUMES",
+ "PRODUCES",
+ "SOMETIMES_PRODUCES"]
+
+LANGUAGE_EN_US = 'en-US'
+
+USAGE_ITEM_PRODUCES = 'PRODUCES'
+USAGE_ITEM_SOMETIMES_PRODUCES = 'SOMETIMES_PRODUCES'
+USAGE_ITEM_CONSUMES = 'CONSUMES'
+USAGE_ITEM_SOMETIMES_CONSUMES = 'SOMETIMES_CONSUMES'
+USAGE_ITEM_TO_START = 'TO_START'
+USAGE_ITEM_BY_START = 'BY_START'
+USAGE_ITEM_NOTIFY = 'NOTIFY'
+USAGE_ITEM_UNDEFINED = 'UNDEFINED'
+
+USAGE_CONSUMES_LIST = [USAGE_ITEM_CONSUMES,
+ 'CONSUMED',
+ 'ALWAYS_CONSUMED',
+ 'ALWAYS_CONSUMES'
+ ]
+
+USAGE_PRODUCES_LIST = [USAGE_ITEM_PRODUCES,
+ 'PRODUCED',
+ 'ALWAYS_PRODUCED',
+ 'ALWAYS_PRODUCES'
+ ]
+
+USAGE_SOMETIMES_PRODUCES_LIST = [USAGE_ITEM_SOMETIMES_PRODUCES,
+ 'SOMETIMES_PRODUCED'
+ ]
+
+USAGE_SOMETIMES_CONSUMES_LIST = [USAGE_ITEM_SOMETIMES_CONSUMES,
+ 'SOMETIMES_CONSUMED'
+ ]
+
+ITEM_UNDEFINED = 'UNDEFINED'
+
+
+#
+# Dictionary of usage tokens and their synonmys
+#
+ALL_USAGE_TOKENS = {
+ "PRODUCES" : "PRODUCES",
+ "PRODUCED" : "PRODUCES",
+ "ALWAYS_PRODUCES" : "PRODUCES",
+ "ALWAYS_PRODUCED" : "PRODUCES",
+ "SOMETIMES_PRODUCES" : "SOMETIMES_PRODUCES",
+ "SOMETIMES_PRODUCED" : "SOMETIMES_PRODUCES",
+ "CONSUMES" : "CONSUMES",
+ "CONSUMED" : "CONSUMES",
+ "ALWAYS_CONSUMES" : "CONSUMES",
+ "ALWAYS_CONSUMED" : "CONSUMES",
+ "SOMETIMES_CONSUMES" : "SOMETIMES_CONSUMES",
+ "SOMETIMES_CONSUMED" : "SOMETIMES_CONSUMES",
+ "SOMETIME_CONSUMES" : "SOMETIMES_CONSUMES",
+ "UNDEFINED" : "UNDEFINED"
+ }
+
+PROTOCOL_USAGE_TOKENS = {
+ "TO_START" : "TO_START",
+ "BY_START" : "BY_START"
+ }
+
+PROTOCOL_USAGE_TOKENS.update (ALL_USAGE_TOKENS)
+
+#
+# Dictionary of GUID type tokens
+#
+GUID_TYPE_TOKENS = {
+ "Event" : "Event",
+ "File" : "File",
+ "FV" : "FV",
+ "GUID" : "GUID",
+ "Guid" : "GUID",
+ "HII" : "HII",
+ "HOB" : "HOB",
+ "Hob" : "HOB",
+ "Hob:" : "HOB",
+ "SystemTable" : "SystemTable",
+ "TokenSpaceGuid" : "TokenSpaceGuid",
+ "UNDEFINED" : "UNDEFINED"
+ }
+
+#
+# Dictionary of Protocol Notify tokens and their synonyms
+#
+PROTOCOL_NOTIFY_TOKENS = {
+ "NOTIFY" : "NOTIFY",
+ "PROTOCOL_NOTIFY" : "NOTIFY",
+ "UNDEFINED" : "UNDEFINED"
+ }
+
+#
+# Dictionary of PPI Notify tokens and their synonyms
+#
+PPI_NOTIFY_TOKENS = {
+ "NOTIFY" : "NOTIFY",
+ "PPI_NOTIFY" : "NOTIFY",
+ "UNDEFINED" : "UNDEFINED"
+ }
+
+EVENT_TOKENS = {
+ "EVENT_TYPE_PERIODIC_TIMER" : "EVENT_TYPE_PERIODIC_TIMER",
+ "EVENT_TYPE_RELATIVE_TIMER" : "EVENT_TYPE_RELATIVE_TIMER",
+ "UNDEFINED" : "UNDEFINED"
+ }
+
+BOOTMODE_TOKENS = {
+ "FULL" : "FULL",
+ "MINIMAL" : "MINIMAL",
+ "NO_CHANGE" : "NO_CHANGE",
+ "DIAGNOSTICS" : "DIAGNOSTICS",
+ "DEFAULT" : "DEFAULT",
+ "S2_RESUME" : "S2_RESUME",
+ "S3_RESUME" : "S3_RESUME",
+ "S4_RESUME" : "S4_RESUME",
+ "S5_RESUME" : "S5_RESUME",
+ "FLASH_UPDATE" : "FLASH_UPDATE",
+ "RECOVERY_FULL" : "RECOVERY_FULL",
+ "RECOVERY_MINIMAL" : "RECOVERY_MINIMAL",
+ "RECOVERY_NO_CHANGE" : "RECOVERY_NO_CHANGE",
+ "RECOVERY_DIAGNOSTICS" : "RECOVERY_DIAGNOSTICS",
+ "RECOVERY_DEFAULT" : "RECOVERY_DEFAULT",
+ "RECOVERY_S2_RESUME" : "RECOVERY_S2_RESUME",
+ "RECOVERY_S3_RESUME" : "RECOVERY_S3_RESUME",
+ "RECOVERY_S4_RESUME" : "RECOVERY_S4_RESUME",
+ "RECOVERY_S5_RESUME" : "RECOVERY_S5_RESUME",
+ "RECOVERY_FLASH_UPDATE" : "RECOVERY_FLASH_UPDATE",
+ "UNDEFINED" : "UNDEFINED"
+ }
+
+HOB_TOKENS = {
+ "PHIT" : "PHIT",
+ "MEMORY_ALLOCATION" : "MEMORY_ALLOCATION",
+ "LOAD_PEIM" : "LOAD_PEIM",
+ "RESOURCE_DESCRIPTOR" : "RESOURCE_DESCRIPTOR",
+ "FIRMWARE_VOLUME" : "FIRMWARE_VOLUME",
+ "UNDEFINED" : "UNDEFINED"
+ }
+
+##
+# Usage List Items for Protocol
+#
+PROTOCOL_USAGE_LIST = USAGE_LIST + ["TO_START", "BY_START"]
+
+##
+# End of Line
+# Use this but not os.linesep for os.linesep has bug in it.
+#
+END_OF_LINE = '\n'
+
+##
+# Arch List Items
+#
+ARCH_LIST = ["IA32",
+ "X64",
+ "IPF",
+ "EBC",
+ "COMMON"]
+##
+# PCD driver type list items
+#
+PCD_DIRVER_TYPE_LIST = ["PEI_PCD_DRIVER", "DXE_PCD_DRIVER"]
+
+##
+# Boot Mode List Items
+#
+BOOT_MODE_LIST = ["FULL",
+ "MINIMAL",
+ "NO_CHANGE",
+ "DIAGNOSTICS",
+ "DEFAULT",
+ "S2_RESUME",
+ "S3_RESUME",
+ "S4_RESUME",
+ "S5_RESUME",
+ "FLASH_UPDATE",
+ "RECOVERY_FULL",
+ "RECOVERY_MINIMAL",
+ "RECOVERY_NO_CHANGE",
+ "RECOVERY_DIAGNOSTICS",
+ "RECOVERY_DEFAULT",
+ "RECOVERY_S2_RESUME",
+ "RECOVERY_S3_RESUME",
+ "RECOVERY_S4_RESUME",
+ "RECOVERY_S5_RESUME",
+ "RECOVERY_FLASH_UPDATE"]
+
+##
+# Event Type List Items
+#
+EVENT_TYPE_LIST = ["EVENT_TYPE_PERIODIC_TIMER",
+ "EVENT_TYPE_RELATIVE_TIMER"]
+
+##
+# Hob Type List Items
+#
+HOB_TYPE_LIST = ["PHIT",
+ "MEMORY_ALLOCATION",
+ "RESOURCE_DESCRIPTOR",
+ "FIRMWARE_VOLUME",
+ "LOAD_PEIM"]
+
+##
+# GUID_TYPE_LIST
+#
+GUID_TYPE_LIST = ["Event", "File", "FV", "GUID", "HII", "HOB",
+ "SystemTable", "TokenSpaceGuid", "Variable"]
+##
+# PCD Usage Type List of Package
+#
+PCD_USAGE_TYPE_LIST_OF_PACKAGE = ["FeatureFlag", "PatchableInModule",
+ "FixedAtBuild", "Dynamic", "DynamicEx"]
+
+##
+# PCD Usage Type List of Module
+#
+PCD_USAGE_TYPE_LIST_OF_MODULE = ["FEATUREPCD", "PATCHPCD", "FIXEDPCD", "PCD", "PCDEX"]
+##
+# PCD Usage Type List of UPT
+#
+PCD_USAGE_TYPE_LIST_OF_UPT = PCD_USAGE_TYPE_LIST_OF_MODULE
+
+##
+# Binary File Type List
+#
+BINARY_FILE_TYPE_LIST = ["GUID", "PE32", "PIC", "TE", "DXE_DEPEX", "VER", "UI", "COMPAT16", "FV", "BIN", "RAW",
+ "ACPI", "ASL",
+ "PEI_DEPEX",
+ "SMM_DEPEX",
+ "SUBTYPE_GUID"
+ ]
+BINARY_FILE_TYPE_LIST_IN_UDP = \
+ ["GUID", "FREEFORM",
+ "UEFI_IMAGE", "PE32", "PIC",
+ "PEI_DEPEX",
+ "DXE_DEPEX",
+ "SMM_DEPEX",
+ "FV", "TE",
+ "BIN", "VER", "UI"
+ ]
+
+##
+# Possible values for COMPONENT_TYPE, and their descriptions, are listed in
+# the table,
+# "Component (module) Types." For each component, the BASE_NAME and
+# COMPONENT_TYPE
+# are required. The COMPONENT_TYPE definition is case sensitive.
+#
+COMPONENT_TYPE_LIST = [
+ "APPLICATION",
+ "ACPITABLE",
+ "APRIORI",
+ "BINARY",
+ "BS_DRIVER",
+ "CONFIG",
+ "FILE",
+ "FVIMAGEFILE",
+ "LIBRARY",
+ "LOGO",
+ "LEGACY16",
+ "MICROCODE",
+ "PE32_PEIM",
+ "PEI_CORE",
+ "RAWFILE",
+ "RT_DRIVER",
+ "SAL_RT_DRIVER",
+ "SECURITY_CORE",
+ "COMBINED_PEIM_DRIVER",
+ "PIC_PEIM",
+ "RELOCATABLE_PEIM"
+ ]
+
+##
+# Common Definitions
+#
+TAB_SPLIT = '.'
+TAB_COMMENT_EDK1_START = '/*'
+TAB_COMMENT_EDK1_END = '*/'
+TAB_COMMENT_EDK1_SPLIT = '//'
+TAB_COMMENT_SPLIT = '#'
+TAB_EQUAL_SPLIT = '='
+TAB_DEQUAL_SPLIT = '=='
+TAB_VALUE_SPLIT = '|'
+TAB_COMMA_SPLIT = ','
+TAB_SPACE_SPLIT = ' '
+TAB_UNDERLINE_SPLIT = '_'
+TAB_SEMI_COLON_SPLIT = ';'
+TAB_COLON_SPLIT = ':'
+TAB_SECTION_START = '['
+TAB_SECTION_END = ']'
+TAB_OPTION_START = '<'
+TAB_OPTION_END = '>'
+TAB_SLASH = '\\'
+TAB_BACK_SLASH = '/'
+TAB_SPECIAL_COMMENT = '##'
+TAB_HEADER_COMMENT = '@file'
+TAB_STAR = "*"
+
+TAB_EDK_SOURCE = '$(EDK_SOURCE)'
+TAB_EFI_SOURCE = '$(EFI_SOURCE)'
+TAB_WORKSPACE = '$(WORKSPACE)'
+
+TAB_ARCH_NULL = ''
+TAB_ARCH_COMMON = 'COMMON'
+TAB_ARCH_IA32 = 'IA32'
+TAB_ARCH_X64 = 'X64'
+TAB_ARCH_IPF = 'IPF'
+TAB_ARCH_ARM = 'ARM'
+TAB_ARCH_EBC = 'EBC'
+
+ARCH_LIST = \
+[TAB_ARCH_IA32, TAB_ARCH_X64, TAB_ARCH_IPF, TAB_ARCH_ARM, TAB_ARCH_EBC]
+
+SUP_MODULE_BASE = 'BASE'
+SUP_MODULE_SEC = 'SEC'
+SUP_MODULE_PEI_CORE = 'PEI_CORE'
+SUP_MODULE_PEIM = 'PEIM'
+SUP_MODULE_DXE_CORE = 'DXE_CORE'
+SUP_MODULE_DXE_DRIVER = 'DXE_DRIVER'
+SUP_MODULE_DXE_RUNTIME_DRIVER = 'DXE_RUNTIME_DRIVER'
+SUP_MODULE_DXE_SAL_DRIVER = 'DXE_SAL_DRIVER'
+SUP_MODULE_DXE_SMM_DRIVER = 'DXE_SMM_DRIVER'
+SUP_MODULE_UEFI_DRIVER = 'UEFI_DRIVER'
+SUP_MODULE_UEFI_APPLICATION = 'UEFI_APPLICATION'
+SUP_MODULE_USER_DEFINED = 'USER_DEFINED'
+SUP_MODULE_SMM_CORE = 'SMM_CORE'
+
+SUP_MODULE_LIST = \
+[SUP_MODULE_BASE, SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, \
+SUP_MODULE_DXE_CORE, SUP_MODULE_DXE_DRIVER, \
+ SUP_MODULE_DXE_RUNTIME_DRIVER, SUP_MODULE_DXE_SAL_DRIVER, \
+ SUP_MODULE_DXE_SMM_DRIVER, SUP_MODULE_UEFI_DRIVER, \
+ SUP_MODULE_UEFI_APPLICATION, SUP_MODULE_USER_DEFINED, \
+ SUP_MODULE_SMM_CORE]
+SUP_MODULE_LIST_STRING = TAB_VALUE_SPLIT.join(l for l in SUP_MODULE_LIST)
+
+EDK_COMPONENT_TYPE_LIBRARY = 'LIBRARY'
+EDK_COMPONENT_TYPE_SECUARITY_CORE = 'SECUARITY_CORE'
+EDK_COMPONENT_TYPE_PEI_CORE = 'PEI_CORE'
+EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER = 'COMBINED_PEIM_DRIVER'
+EDK_COMPONENT_TYPE_PIC_PEIM = 'PIC_PEIM'
+EDK_COMPONENT_TYPE_RELOCATABLE_PEIM = 'RELOCATABLE_PEIM'
+EDK_COMPONENT_TYPE_BS_DRIVER = 'BS_DRIVER'
+EDK_COMPONENT_TYPE_RT_DRIVER = 'RT_DRIVER'
+EDK_COMPONENT_TYPE_SAL_RT_DRIVER = 'SAL_RT_DRIVER'
+EDK_COMPONENT_TYPE_APPLICATION = 'APPLICATION'
+EDK_NAME = 'EDK'
+EDKII_NAME = 'EDKII'
+
+BINARY_FILE_TYPE_FW = 'FW'
+BINARY_FILE_TYPE_GUID = 'GUID'
+BINARY_FILE_TYPE_PREEFORM = 'PREEFORM'
+BINARY_FILE_TYPE_UEFI_APP = 'UEFI_APP'
+BINARY_FILE_TYPE_UNI_UI = 'UNI_UI'
+BINARY_FILE_TYPE_SEC_UI = 'SEC_UI'
+BINARY_FILE_TYPE_UNI_VER = 'UNI_VER'
+BINARY_FILE_TYPE_SEC_VER = 'SEC_VER'
+BINARY_FILE_TYPE_LIB = 'LIB'
+BINARY_FILE_TYPE_PE32 = 'PE32'
+BINARY_FILE_TYPE_PIC = 'PIC'
+BINARY_FILE_TYPE_PEI_DEPEX = 'PEI_DEPEX'
+BINARY_FILE_TYPE_DXE_DEPEX = 'DXE_DEPEX'
+BINARY_FILE_TYPE_SMM_DEPEX = 'SMM_DEPEX'
+BINARY_FILE_TYPE_TE = 'TE'
+BINARY_FILE_TYPE_VER = 'VER'
+BINARY_FILE_TYPE_UI = 'UI'
+BINARY_FILE_TYPE_BIN = 'BIN'
+BINARY_FILE_TYPE_FV = 'FV'
+BINARY_FILE_TYPE_UI_LIST = [BINARY_FILE_TYPE_UNI_UI,
+ BINARY_FILE_TYPE_SEC_UI,
+ BINARY_FILE_TYPE_UI
+ ]
+BINARY_FILE_TYPE_VER_LIST = [BINARY_FILE_TYPE_UNI_VER,
+ BINARY_FILE_TYPE_SEC_VER,
+ BINARY_FILE_TYPE_VER
+ ]
+
+DEPEX_SECTION_LIST = ['<PEI_DEPEX>',
+ '<DXE_DEPEX>',
+ '<SMM_DEPEX>'
+ ]
+
+PLATFORM_COMPONENT_TYPE_LIBRARY = 'LIBRARY'
+PLATFORM_COMPONENT_TYPE_LIBRARY_CLASS = 'LIBRARY_CLASS'
+PLATFORM_COMPONENT_TYPE_MODULE = 'MODULE'
+
+TAB_LIBRARIES = 'Libraries'
+
+TAB_SOURCE = 'Source'
+TAB_SOURCES = 'Sources'
+TAB_SOURCES_COMMON = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_COMMON
+TAB_SOURCES_IA32 = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_IA32
+TAB_SOURCES_X64 = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_X64
+TAB_SOURCES_IPF = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_IPF
+TAB_SOURCES_ARM = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_ARM
+TAB_SOURCES_EBC = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_EBC
+
+TAB_BINARIES = 'Binaries'
+TAB_BINARIES_COMMON = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_COMMON
+TAB_BINARIES_IA32 = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_IA32
+TAB_BINARIES_X64 = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_X64
+TAB_BINARIES_IPF = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_IPF
+TAB_BINARIES_ARM = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_ARM
+TAB_BINARIES_EBC = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_EBC
+
+TAB_INCLUDES = 'Includes'
+TAB_INCLUDES_COMMON = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_COMMON
+TAB_INCLUDES_IA32 = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_IA32
+TAB_INCLUDES_X64 = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_X64
+TAB_INCLUDES_IPF = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_IPF
+TAB_INCLUDES_ARM = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_ARM
+TAB_INCLUDES_EBC = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_EBC
+
+TAB_GUIDS = 'Guids'
+TAB_GUIDS_COMMON = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_COMMON
+TAB_GUIDS_IA32 = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_IA32
+TAB_GUIDS_X64 = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_X64
+TAB_GUIDS_IPF = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_IPF
+TAB_GUIDS_ARM = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_ARM
+TAB_GUIDS_EBC = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_EBC
+
+TAB_PROTOCOLS = 'Protocols'
+TAB_PROTOCOLS_COMMON = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_COMMON
+TAB_PROTOCOLS_IA32 = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_IA32
+TAB_PROTOCOLS_X64 = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_X64
+TAB_PROTOCOLS_IPF = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_IPF
+TAB_PROTOCOLS_ARM = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_ARM
+TAB_PROTOCOLS_EBC = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_EBC
+
+TAB_PPIS = 'Ppis'
+TAB_PPIS_COMMON = TAB_PPIS + TAB_SPLIT + TAB_ARCH_COMMON
+TAB_PPIS_IA32 = TAB_PPIS + TAB_SPLIT + TAB_ARCH_IA32
+TAB_PPIS_X64 = TAB_PPIS + TAB_SPLIT + TAB_ARCH_X64
+TAB_PPIS_IPF = TAB_PPIS + TAB_SPLIT + TAB_ARCH_IPF
+TAB_PPIS_ARM = TAB_PPIS + TAB_SPLIT + TAB_ARCH_ARM
+TAB_PPIS_EBC = TAB_PPIS + TAB_SPLIT + TAB_ARCH_EBC
+
+TAB_LIBRARY_CLASSES = 'LibraryClasses'
+TAB_LIBRARY_CLASSES_COMMON = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_COMMON
+TAB_LIBRARY_CLASSES_IA32 = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_IA32
+TAB_LIBRARY_CLASSES_X64 = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_X64
+TAB_LIBRARY_CLASSES_IPF = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_IPF
+TAB_LIBRARY_CLASSES_ARM = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_ARM
+TAB_LIBRARY_CLASSES_EBC = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_EBC
+
+TAB_PACKAGES = 'Packages'
+TAB_PACKAGES_COMMON = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_COMMON
+TAB_PACKAGES_IA32 = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_IA32
+TAB_PACKAGES_X64 = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_X64
+TAB_PACKAGES_IPF = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_IPF
+TAB_PACKAGES_ARM = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_ARM
+TAB_PACKAGES_EBC = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_EBC
+
+TAB_PCDS = 'Pcds'
+TAB_PCDS_FIXED_AT_BUILD = 'FixedAtBuild'
+TAB_PCDS_PATCHABLE_IN_MODULE = 'PatchableInModule'
+TAB_PCDS_FEATURE_FLAG = 'FeatureFlag'
+TAB_PCDS_DYNAMIC_EX = 'DynamicEx'
+TAB_PCDS_DYNAMIC_EX_DEFAULT = 'DynamicExDefault'
+TAB_PCDS_DYNAMIC_EX_VPD = 'DynamicExVpd'
+TAB_PCDS_DYNAMIC_EX_HII = 'DynamicExHii'
+TAB_PCDS_DYNAMIC = 'Dynamic'
+TAB_PCDS_DYNAMIC_DEFAULT = 'DynamicDefault'
+TAB_PCDS_DYNAMIC_VPD = 'DynamicVpd'
+TAB_PCDS_DYNAMIC_HII = 'DynamicHii'
+
+TAB_PTR_TYPE_PCD = 'VOID*'
+
+PCD_DYNAMIC_TYPE_LIST = [TAB_PCDS_DYNAMIC, TAB_PCDS_DYNAMIC_DEFAULT, \
+ TAB_PCDS_DYNAMIC_VPD, TAB_PCDS_DYNAMIC_HII]
+PCD_DYNAMIC_EX_TYPE_LIST = [TAB_PCDS_DYNAMIC_EX, TAB_PCDS_DYNAMIC_EX_DEFAULT, \
+ TAB_PCDS_DYNAMIC_EX_VPD, TAB_PCDS_DYNAMIC_EX_HII]
+
+## Dynamic-ex PCD types
+#
+gDYNAMIC_EX_PCD = [TAB_PCDS_DYNAMIC_EX, TAB_PCDS_DYNAMIC_EX_DEFAULT, \
+ TAB_PCDS_DYNAMIC_EX_VPD, TAB_PCDS_DYNAMIC_EX_HII]
+
+TAB_PCDS_FIXED_AT_BUILD_NULL = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD
+TAB_PCDS_FIXED_AT_BUILD_COMMON = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
+TAB_SPLIT + TAB_ARCH_COMMON
+TAB_PCDS_FIXED_AT_BUILD_IA32 = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
+TAB_SPLIT + TAB_ARCH_IA32
+TAB_PCDS_FIXED_AT_BUILD_X64 = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
+TAB_SPLIT + TAB_ARCH_X64
+TAB_PCDS_FIXED_AT_BUILD_IPF = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
+TAB_SPLIT + TAB_ARCH_IPF
+TAB_PCDS_FIXED_AT_BUILD_ARM = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
+TAB_SPLIT + TAB_ARCH_ARM
+TAB_PCDS_FIXED_AT_BUILD_EBC = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
+TAB_SPLIT + TAB_ARCH_EBC
+
+TAB_PCDS_PATCHABLE_IN_MODULE_NULL = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE
+TAB_PCDS_PATCHABLE_IN_MODULE_COMMON = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE \
++ TAB_SPLIT + TAB_ARCH_COMMON
+TAB_PCDS_PATCHABLE_IN_MODULE_IA32 = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
+TAB_SPLIT + TAB_ARCH_IA32
+TAB_PCDS_PATCHABLE_IN_MODULE_X64 = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
+TAB_SPLIT + TAB_ARCH_X64
+TAB_PCDS_PATCHABLE_IN_MODULE_IPF = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
+TAB_SPLIT + TAB_ARCH_IPF
+TAB_PCDS_PATCHABLE_IN_MODULE_ARM = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
+TAB_SPLIT + TAB_ARCH_ARM
+TAB_PCDS_PATCHABLE_IN_MODULE_EBC = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
+TAB_SPLIT + TAB_ARCH_EBC
+
+TAB_PCDS_FEATURE_FLAG_NULL = TAB_PCDS + TAB_PCDS_FEATURE_FLAG
+TAB_PCDS_FEATURE_FLAG_COMMON = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT \
++ TAB_ARCH_COMMON
+TAB_PCDS_FEATURE_FLAG_IA32 = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
+TAB_ARCH_IA32
+TAB_PCDS_FEATURE_FLAG_X64 = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
+TAB_ARCH_X64
+TAB_PCDS_FEATURE_FLAG_IPF = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
+TAB_ARCH_IPF
+TAB_PCDS_FEATURE_FLAG_ARM = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
+TAB_ARCH_ARM
+TAB_PCDS_FEATURE_FLAG_EBC = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
+TAB_ARCH_EBC
+
+TAB_PCDS_DYNAMIC_EX_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX
+TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX_DEFAULT
+TAB_PCDS_DYNAMIC_EX_HII_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX_HII
+TAB_PCDS_DYNAMIC_EX_VPD_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX_VPD
+TAB_PCDS_DYNAMIC_EX_COMMON = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
+TAB_ARCH_COMMON
+TAB_PCDS_DYNAMIC_EX_IA32 = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
+TAB_ARCH_IA32
+TAB_PCDS_DYNAMIC_EX_X64 = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
+TAB_ARCH_X64
+TAB_PCDS_DYNAMIC_EX_IPF = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
+TAB_ARCH_IPF
+TAB_PCDS_DYNAMIC_EX_ARM = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
+TAB_ARCH_ARM
+TAB_PCDS_DYNAMIC_EX_EBC = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
+TAB_ARCH_EBC
+
+TAB_PCDS_DYNAMIC_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC
+TAB_PCDS_DYNAMIC_DEFAULT_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_DEFAULT
+TAB_PCDS_DYNAMIC_HII_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_HII
+TAB_PCDS_DYNAMIC_VPD_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_VPD
+TAB_PCDS_DYNAMIC_COMMON = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + \
+TAB_ARCH_COMMON
+TAB_PCDS_DYNAMIC_IA32 = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_IA32
+TAB_PCDS_DYNAMIC_X64 = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_X64
+TAB_PCDS_DYNAMIC_IPF = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_IPF
+TAB_PCDS_DYNAMIC_ARM = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_ARM
+TAB_PCDS_DYNAMIC_EBC = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_EBC
+
+TAB_PCD_DYNAMIC_TYPE_LIST = [TAB_PCDS_DYNAMIC_DEFAULT_NULL, \
+ TAB_PCDS_DYNAMIC_VPD_NULL, \
+ TAB_PCDS_DYNAMIC_HII_NULL]
+TAB_PCD_DYNAMIC_EX_TYPE_LIST = [TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL, \
+ TAB_PCDS_DYNAMIC_EX_VPD_NULL, \
+ TAB_PCDS_DYNAMIC_EX_HII_NULL]
+
+TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE = \
+'PcdLoadFixAddressPeiCodePageNumber'
+TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE = 'UINT32'
+TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE = \
+'PcdLoadFixAddressBootTimeCodePageNumber'
+TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE = 'UINT32'
+TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE = \
+'PcdLoadFixAddressRuntimeCodePageNumber'
+TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE = 'UINT32'
+TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE = \
+'PcdLoadFixAddressSmmCodePageNumber'
+TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE = 'UINT32'
+TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_LIST = \
+[TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE, \
+TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE, \
+TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE, \
+TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE]
+PCD_SECTION_LIST = [TAB_PCDS_FIXED_AT_BUILD_NULL.upper(), \
+ TAB_PCDS_PATCHABLE_IN_MODULE_NULL.upper(), \
+ TAB_PCDS_FEATURE_FLAG_NULL.upper(), \
+ TAB_PCDS_DYNAMIC_EX_NULL.upper(), \
+ TAB_PCDS_DYNAMIC_NULL.upper()]
+INF_PCD_SECTION_LIST = ["FixedPcd".upper(), "FeaturePcd".upper(), \
+ "PatchPcd".upper(), "Pcd".upper(), "PcdEx".upper()]
+
+TAB_DEPEX = 'Depex'
+TAB_DEPEX_COMMON = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_COMMON
+TAB_DEPEX_IA32 = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_IA32
+TAB_DEPEX_X64 = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_X64
+TAB_DEPEX_IPF = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_IPF
+TAB_DEPEX_ARM = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_ARM
+TAB_DEPEX_EBC = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_EBC
+
+TAB_SKUIDS = 'SkuIds'
+
+TAB_LIBRARIES = 'Libraries'
+TAB_LIBRARIES_COMMON = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_COMMON
+TAB_LIBRARIES_IA32 = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_IA32
+TAB_LIBRARIES_X64 = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_X64
+TAB_LIBRARIES_IPF = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_IPF
+TAB_LIBRARIES_ARM = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_ARM
+TAB_LIBRARIES_EBC = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_EBC
+
+TAB_COMPONENTS = 'Components'
+TAB_COMPONENTS_COMMON = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_COMMON
+TAB_COMPONENTS_IA32 = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_IA32
+TAB_COMPONENTS_X64 = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_X64
+TAB_COMPONENTS_IPF = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_IPF
+TAB_COMPONENTS_ARM = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_ARM
+TAB_COMPONENTS_EBC = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_EBC
+
+TAB_COMPONENTS_SOURCE_OVERRIDE_PATH = 'SOURCE_OVERRIDE_PATH'
+
+TAB_BUILD_OPTIONS = 'BuildOptions'
+
+TAB_DEFINE = 'DEFINE'
+TAB_NMAKE = 'Nmake'
+TAB_USER_EXTENSIONS = 'UserExtensions'
+TAB_INCLUDE = '!include'
+
+#
+# Common Define
+#
+TAB_COMMON_DEFINES = 'Defines'
+
+#
+# Inf Definitions
+#
+TAB_INF_DEFINES = TAB_COMMON_DEFINES
+TAB_INF_DEFINES_INF_VERSION = 'INF_VERSION'
+TAB_INF_DEFINES_BASE_NAME = 'BASE_NAME'
+TAB_INF_DEFINES_FILE_GUID = 'FILE_GUID'
+TAB_INF_DEFINES_MODULE_TYPE = 'MODULE_TYPE'
+TAB_INF_DEFINES_EFI_SPECIFICATION_VERSION = 'EFI_SPECIFICATION_VERSION'
+TAB_INF_DEFINES_UEFI_SPECIFICATION_VERSION = 'UEFI_SPECIFICATION_VERSION'
+TAB_INF_DEFINES_PI_SPECIFICATION_VERSION = 'PI_SPECIFICATION_VERSION'
+TAB_INF_DEFINES_EDK_RELEASE_VERSION = 'EDK_RELEASE_VERSION'
+TAB_INF_DEFINES_BINARY_MODULE = 'BINARY_MODULE'
+TAB_INF_DEFINES_LIBRARY_CLASS = 'LIBRARY_CLASS'
+TAB_INF_DEFINES_COMPONENT_TYPE = 'COMPONENT_TYPE'
+TAB_INF_DEFINES_MAKEFILE_NAME = 'MAKEFILE_NAME'
+TAB_INF_DEFINES_BUILD_NUMBER = 'BUILD_NUMBER'
+TAB_INF_DEFINES_BUILD_TYPE = 'BUILD_TYPE'
+TAB_INF_DEFINES_FFS_EXT = 'FFS_EXT'
+TAB_INF_DEFINES_FV_EXT = 'FV_EXT'
+TAB_INF_DEFINES_SOURCE_FV = 'SOURCE_FV'
+TAB_INF_DEFINES_PACKAGE = 'PACKAGE'
+TAB_INF_DEFINES_VERSION_NUMBER = 'VERSION_NUMBER'
+TAB_INF_DEFINES_VERSION = 'VERSION'
+TAB_INF_DEFINES_VERSION_STRING = 'VERSION_STRING'
+TAB_INF_DEFINES_PCD_IS_DRIVER = 'PCD_IS_DRIVER'
+TAB_INF_DEFINES_TIANO_EDK1_FLASHMAP_H = 'TIANO_EDK1_FLASHMAP_H'
+TAB_INF_DEFINES_ENTRY_POINT = 'ENTRY_POINT'
+TAB_INF_DEFINES_UNLOAD_IMAGE = 'UNLOAD_IMAGE'
+TAB_INF_DEFINES_CONSTRUCTOR = 'CONSTRUCTOR'
+TAB_INF_DEFINES_DESTRUCTOR = 'DESTRUCTOR'
+TAB_INF_DEFINES_PCI_VENDOR_ID = 'PCI_VENDOR_ID'
+TAB_INF_DEFINES_PCI_DEVICE_ID = 'PCI_DEVICE_ID'
+TAB_INF_DEFINES_PCI_CLASS_CODE = 'PCI_CLASS_CODE'
+TAB_INF_DEFINES_PCI_REVISION = 'PCI_REVISION'
+TAB_INF_DEFINES_PCI_COMPRESS = 'PCI_COMPRESS'
+TAB_INF_DEFINES_DEFINE = 'DEFINE'
+TAB_INF_DEFINES_SPEC = 'SPEC'
+TAB_INF_DEFINES_UEFI_HII_RESOURCE_SECTION = 'UEFI_HII_RESOURCE_SECTION'
+TAB_INF_DEFINES_CUSTOM_MAKEFILE = 'CUSTOM_MAKEFILE'
+TAB_INF_DEFINES_MACRO = '__MACROS__'
+TAB_INF_DEFINES_SHADOW = 'SHADOW'
+TAB_INF_DEFINES_DPX_SOURCE = 'DPX_SOURCE'
+TAB_INF_FIXED_PCD = 'FixedPcd'
+TAB_INF_FEATURE_PCD = 'FeaturePcd'
+TAB_INF_PATCH_PCD = 'PatchPcd'
+TAB_INF_PCD = 'Pcd'
+TAB_INF_PCD_EX = 'PcdEx'
+TAB_INF_GUIDTYPE_VAR = 'Variable'
+
+#
+# Dec Definitions
+#
+TAB_DEC_DEFINES = TAB_COMMON_DEFINES
+TAB_DEC_DEFINES_DEC_SPECIFICATION = 'DEC_SPECIFICATION'
+TAB_DEC_DEFINES_PACKAGE_NAME = 'PACKAGE_NAME'
+TAB_DEC_DEFINES_PACKAGE_GUID = 'PACKAGE_GUID'
+TAB_DEC_DEFINES_PACKAGE_VERSION = 'PACKAGE_VERSION'
+TAB_DEC_DEFINES_PKG_UNI_FILE = 'PKG_UNI_FILE'
+
+#
+# Dsc Definitions
+#
+TAB_DSC_DEFINES = TAB_COMMON_DEFINES
+TAB_DSC_DEFINES_PLATFORM_NAME = 'PLATFORM_NAME'
+TAB_DSC_DEFINES_PLATFORM_GUID = 'PLATFORM_GUID'
+TAB_DSC_DEFINES_PLATFORM_VERSION = 'PLATFORM_VERSION'
+TAB_DSC_DEFINES_DSC_SPECIFICATION = 'DSC_SPECIFICATION'
+TAB_DSC_DEFINES_OUTPUT_DIRECTORY = 'OUTPUT_DIRECTORY'
+TAB_DSC_DEFINES_SUPPORTED_ARCHITECTURES = 'SUPPORTED_ARCHITECTURES'
+TAB_DSC_DEFINES_BUILD_TARGETS = 'BUILD_TARGETS'
+TAB_DSC_DEFINES_SKUID_IDENTIFIER = 'SKUID_IDENTIFIER'
+TAB_DSC_DEFINES_FLASH_DEFINITION = 'FLASH_DEFINITION'
+TAB_DSC_DEFINES_BUILD_NUMBER = 'BUILD_NUMBER'
+TAB_DSC_DEFINES_MAKEFILE_NAME = 'MAKEFILE_NAME'
+TAB_DSC_DEFINES_BS_BASE_ADDRESS = 'BsBaseAddress'
+TAB_DSC_DEFINES_RT_BASE_ADDRESS = 'RtBaseAddress'
+TAB_DSC_DEFINES_DEFINE = 'DEFINE'
+TAB_FIX_LOAD_TOP_MEMORY_ADDRESS = 'FIX_LOAD_TOP_MEMORY_ADDRESS'
+
+#
+# TargetTxt Definitions
+#
+TAB_TAT_DEFINES_ACTIVE_PLATFORM = 'ACTIVE_PLATFORM'
+TAB_TAT_DEFINES_ACTIVE_MODULE = 'ACTIVE_MODULE'
+TAB_TAT_DEFINES_TOOL_CHAIN_CONF = 'TOOL_CHAIN_CONF'
+TAB_TAT_DEFINES_MULTIPLE_THREAD = 'MULTIPLE_THREAD'
+TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER = 'MAX_CONCURRENT_THREAD_NUMBER'
+TAB_TAT_DEFINES_TARGET = 'TARGET'
+TAB_TAT_DEFINES_TOOL_CHAIN_TAG = 'TOOL_CHAIN_TAG'
+TAB_TAT_DEFINES_TARGET_ARCH = 'TARGET_ARCH'
+TAB_TAT_DEFINES_BUILD_RULE_CONF = "BUILD_RULE_CONF"
+
+#
+# ToolDef Definitions
+#
+TAB_TOD_DEFINES_TARGET = 'TARGET'
+TAB_TOD_DEFINES_TOOL_CHAIN_TAG = 'TOOL_CHAIN_TAG'
+TAB_TOD_DEFINES_TARGET_ARCH = 'TARGET_ARCH'
+TAB_TOD_DEFINES_COMMAND_TYPE = 'COMMAND_TYPE'
+TAB_TOD_DEFINES_FAMILY = 'FAMILY'
+TAB_TOD_DEFINES_BUILDRULEFAMILY = 'BUILDRULEFAMILY'
+
+#
+# Conditional Statements
+#
+TAB_IF = '!if'
+TAB_END_IF = '!endif'
+TAB_ELSE_IF = '!elseif'
+TAB_ELSE = '!else'
+TAB_IF_DEF = '!ifdef'
+TAB_IF_N_DEF = '!ifndef'
+TAB_IF_EXIST = '!if exist'
+
+#
+# Unknown section
+#
+TAB_UNKNOWN = 'UNKNOWN'
+
+#
+# Header section (virtual section for abstract, description, copyright,
+# license)
+#
+TAB_HEADER = 'Header'
+TAB_HEADER_ABSTRACT = 'Abstract'
+TAB_HEADER_DESCRIPTION = 'Description'
+TAB_HEADER_COPYRIGHT = 'Copyright'
+TAB_HEADER_LICENSE = 'License'
+#
+# Build database path
+#
+DATABASE_PATH = ":memory:"
+#
+# used by ECC
+#
+MODIFIER_LIST = ['IN', 'OUT', 'OPTIONAL', 'UNALIGNED', 'EFI_RUNTIMESERVICE', \
+ 'EFI_BOOTSERVICE', 'EFIAPI']
+#
+# Dependency Expression
+#
+DEPEX_SUPPORTED_OPCODE = ["BEFORE", "AFTER", "PUSH", "AND", "OR", "NOT", \
+ "END", "SOR", "TRUE", "FALSE", '(', ')']
+
+TAB_STATIC_LIBRARY = "STATIC-LIBRARY-FILE"
+TAB_DYNAMIC_LIBRARY = "DYNAMIC-LIBRARY-FILE"
+TAB_FRAMEWORK_IMAGE = "EFI-IMAGE-FILE"
+TAB_C_CODE_FILE = "C-CODE-FILE"
+TAB_C_HEADER_FILE = "C-HEADER-FILE"
+TAB_UNICODE_FILE = "UNICODE-TEXT-FILE"
+TAB_DEPENDENCY_EXPRESSION_FILE = "DEPENDENCY-EXPRESSION-FILE"
+TAB_UNKNOWN_FILE = "UNKNOWN-TYPE-FILE"
+TAB_DEFAULT_BINARY_FILE = "_BINARY_FILE_"
+#
+# used to indicate the state of processing header comment section of dec,
+# inf files
+#
+HEADER_COMMENT_NOT_STARTED = -1
+HEADER_COMMENT_STARTED = 0
+HEADER_COMMENT_FILE = 1
+HEADER_COMMENT_ABSTRACT = 2
+HEADER_COMMENT_DESCRIPTION = 3
+HEADER_COMMENT_COPYRIGHT = 4
+HEADER_COMMENT_LICENSE = 5
+HEADER_COMMENT_END = 6
+
+#
+# Static values for data models
+#
+MODEL_UNKNOWN = 0
+
+MODEL_FILE_C = 1001
+MODEL_FILE_H = 1002
+MODEL_FILE_ASM = 1003
+MODEL_FILE_INF = 1011
+MODEL_FILE_DEC = 1012
+MODEL_FILE_DSC = 1013
+MODEL_FILE_FDF = 1014
+MODEL_FILE_INC = 1015
+MODEL_FILE_CIF = 1016
+
+MODEL_IDENTIFIER_FILE_HEADER = 2001
+MODEL_IDENTIFIER_FUNCTION_HEADER = 2002
+MODEL_IDENTIFIER_COMMENT = 2003
+MODEL_IDENTIFIER_PARAMETER = 2004
+MODEL_IDENTIFIER_STRUCTURE = 2005
+MODEL_IDENTIFIER_VARIABLE = 2006
+MODEL_IDENTIFIER_INCLUDE = 2007
+MODEL_IDENTIFIER_PREDICATE_EXPRESSION = 2008
+MODEL_IDENTIFIER_ENUMERATE = 2009
+MODEL_IDENTIFIER_PCD = 2010
+MODEL_IDENTIFIER_UNION = 2011
+MODEL_IDENTIFIER_MACRO_IFDEF = 2012
+MODEL_IDENTIFIER_MACRO_IFNDEF = 2013
+MODEL_IDENTIFIER_MACRO_DEFINE = 2014
+MODEL_IDENTIFIER_MACRO_ENDIF = 2015
+MODEL_IDENTIFIER_MACRO_PROGMA = 2016
+MODEL_IDENTIFIER_FUNCTION_CALLING = 2018
+MODEL_IDENTIFIER_TYPEDEF = 2017
+MODEL_IDENTIFIER_FUNCTION_DECLARATION = 2019
+MODEL_IDENTIFIER_ASSIGNMENT_EXPRESSION = 2020
+
+MODEL_EFI_PROTOCOL = 3001
+MODEL_EFI_PPI = 3002
+MODEL_EFI_GUID = 3003
+MODEL_EFI_LIBRARY_CLASS = 3004
+MODEL_EFI_LIBRARY_INSTANCE = 3005
+MODEL_EFI_PCD = 3006
+MODEL_EFI_SOURCE_FILE = 3007
+MODEL_EFI_BINARY_FILE = 3008
+MODEL_EFI_SKU_ID = 3009
+MODEL_EFI_INCLUDE = 3010
+MODEL_EFI_DEPEX = 3011
+
+MODEL_PCD = 4000
+MODEL_PCD_FIXED_AT_BUILD = 4001
+MODEL_PCD_PATCHABLE_IN_MODULE = 4002
+MODEL_PCD_FEATURE_FLAG = 4003
+MODEL_PCD_DYNAMIC_EX = 4004
+MODEL_PCD_DYNAMIC_EX_DEFAULT = 4005
+MODEL_PCD_DYNAMIC_EX_VPD = 4006
+MODEL_PCD_DYNAMIC_EX_HII = 4007
+MODEL_PCD_DYNAMIC = 4008
+MODEL_PCD_DYNAMIC_DEFAULT = 4009
+MODEL_PCD_DYNAMIC_VPD = 4010
+MODEL_PCD_DYNAMIC_HII = 4011
+
+MODEL_META_DATA_FILE_HEADER = 5000
+MODEL_META_DATA_HEADER = 5001
+MODEL_META_DATA_INCLUDE = 5002
+MODEL_META_DATA_DEFINE = 5003
+MODEL_META_DATA_CONDITIONAL_STATEMENT_IF = 5004
+MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE = 5005
+MODEL_META_DATA_CONDITIONAL_STATEMENT_IFDEF = 5006
+MODEL_META_DATA_CONDITIONAL_STATEMENT_IFNDEF = 5007
+MODEL_META_DATA_BUILD_OPTION = 5008
+MODEL_META_DATA_COMPONENT = 5009
+MODEL_META_DATA_USER_EXTENSION = 5010
+MODEL_META_DATA_PACKAGE = 5011
+MODEL_META_DATA_NMAKE = 5012
+MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSEIF = 50013
+MODEL_META_DATA_CONDITIONAL_STATEMENT_ENDIF = 5014
+MODEL_META_DATA_COMPONENT_SOURCE_OVERRIDE_PATH = 5015
+
+TOOL_FAMILY_LIST = ["MSFT",
+ "INTEL",
+ "GCC",
+ "RVCT"
+ ]
+
+TYPE_HOB_SECTION = 'HOB'
+TYPE_EVENT_SECTION = 'EVENT'
+TYPE_BOOTMODE_SECTION = 'BOOTMODE'
diff --git a/BaseTools/Source/Python/UPT/Library/ExpressionValidate.py b/BaseTools/Source/Python/UPT/Library/ExpressionValidate.py
new file mode 100644
index 0000000000..91041c7a64
--- /dev/null
+++ b/BaseTools/Source/Python/UPT/Library/ExpressionValidate.py
@@ -0,0 +1,489 @@
+## @file
+# This file is used to check PCD logical expression
+#
+# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials are licensed and made available
+# under the terms and conditions of the BSD License which accompanies this
+# distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+'''
+ExpressionValidate
+'''
+
+##
+# Import Modules
+#
+import re
+from Logger import StringTable as ST
+
+## IsValidBareCString
+#
+# Check if String is comprised by whitespace(0x20), !(0x21), 0x23 - 0x7E
+# or '\n', '\t', '\f', '\r', '\b', '\0', '\\'
+#
+# @param String: string to be checked
+#
+def IsValidBareCString(String):
+ EscapeList = ['n', 't', 'f', 'r', 'b', '0', '\\', '"']
+ PreChar = ''
+ LastChar = ''
+ for Char in String:
+ LastChar = Char
+ if PreChar == '\\':
+ if Char not in EscapeList:
+ return False
+ if Char == '\\':
+ PreChar = ''
+ continue
+ else:
+ IntChar = ord(Char)
+ if IntChar != 0x20 and IntChar != 0x09 and IntChar != 0x21 \
+ and (IntChar < 0x23 or IntChar > 0x7e):
+ return False
+ PreChar = Char
+
+ # Last char cannot be \ if PreChar is not \
+ if LastChar == '\\' and PreChar == LastChar:
+ return False
+ return True
+
+def _ValidateToken(Token):
+ Token = Token.strip()
+ Index = Token.find("\"")
+ if Index != -1:
+ return IsValidBareCString(Token[Index+1:-1])
+ return True
+
+## _ExprError
+#
+# @param Exception: Exception
+#
+class _ExprError(Exception):
+ def __init__(self, Error = ''):
+ Exception.__init__(self)
+ self.Error = Error
+
+## _ExprBase
+#
+class _ExprBase:
+ HEX_PATTERN = '[\t\s]*0[xX][a-fA-F0-9]+'
+ INT_PATTERN = '[\t\s]*[0-9]+'
+ MACRO_PATTERN = '[\t\s]*\$\(([A-Z][_A-Z0-9]*)\)'
+ PCD_PATTERN = \
+ '[\t\s]*[_a-zA-Z][a-zA-Z0-9_]*[\t\s]*\.[\t\s]*[_a-zA-Z][a-zA-Z0-9_]*'
+ QUOTED_PATTERN = '[\t\s]*L?"[^"]*"'
+ BOOL_PATTERN = '[\t\s]*(true|True|TRUE|false|False|FALSE)'
+ def __init__(self, Token):
+ self.Token = Token
+ self.Index = 0
+ self.Len = len(Token)
+
+ ## SkipWhitespace
+ #
+ def SkipWhitespace(self):
+ for Char in self.Token[self.Index:]:
+ if Char not in ' \t':
+ break
+ self.Index += 1
+
+ ## IsCurrentOp
+ #
+ # @param OpList: option list
+ #
+ def IsCurrentOp(self, OpList):
+ self.SkipWhitespace()
+ LetterOp = ["EQ", "NE", "GE", "LE", "GT", "LT", "NOT", "and", "AND",
+ "or", "OR", "XOR"]
+ OpMap = {
+ '|' : '|',
+ '&' : '&',
+ '!' : '=',
+ '>' : '=',
+ '<' : '='
+ }
+ for Operator in OpList:
+ if not self.Token[self.Index:].startswith(Operator):
+ continue
+ self.Index += len(Operator)
+ Char = self.Token[self.Index : self.Index + 1]
+ if (Operator in LetterOp and (Char == '_' or Char.isalnum())) \
+ or (Operator in OpMap and OpMap[Operator] == Char):
+ self.Index -= len(Operator)
+ break
+ return True
+ return False
+
+## _LogicalExpressionParser
+#
+# @param _ExprBase: _ExprBase object
+#
+class _LogicalExpressionParser(_ExprBase):
+ #
+ # STRINGITEM can only be logical field according to spec
+ #
+ STRINGITEM = -1
+
+ #
+ # Evaluate to True or False
+ #
+ LOGICAL = 0
+ REALLOGICAL = 2
+
+ #
+ # Just arithmetic expression
+ #
+ ARITH = 1
+
+ def __init__(self, Token):
+ _ExprBase.__init__(self, Token)
+ self.Parens = 0
+
+ def _CheckToken(self, MatchList):
+ for Match in MatchList:
+ if Match and Match.start() == 0:
+ if not _ValidateToken(
+ self.Token[self.Index:self.Index+Match.end()]
+ ):
+ return False
+
+ self.Index += Match.end()
+ if self.Token[self.Index - 1] == '"':
+ return True
+ if self.Token[self.Index:self.Index+1] == '_' or \
+ self.Token[self.Index:self.Index+1].isalnum():
+ self.Index -= Match.end()
+ return False
+
+ Token = self.Token[self.Index - Match.end():self.Index]
+ if Token.strip() in ["EQ", "NE", "GE", "LE", "GT", "LT",
+ "NOT", "and", "AND", "or", "OR", "XOR"]:
+ self.Index -= Match.end()
+ return False
+
+ return True
+ return False
+
+ def IsAtomicNumVal(self):
+ #
+ # Hex number
+ #
+ Match1 = re.compile(self.HEX_PATTERN).match(self.Token[self.Index:])
+
+ #
+ # Number
+ #
+ Match2 = re.compile(self.INT_PATTERN).match(self.Token[self.Index:])
+
+ #
+ # Macro
+ #
+ Match3 = re.compile(self.MACRO_PATTERN).match(self.Token[self.Index:])
+
+ #
+ # PcdName
+ #
+ Match4 = re.compile(self.PCD_PATTERN).match(self.Token[self.Index:])
+
+ return self._CheckToken([Match1, Match2, Match3, Match4])
+
+
+ def IsAtomicItem(self):
+ #
+ # Macro
+ #
+ Match1 = re.compile(self.MACRO_PATTERN).match(self.Token[self.Index:])
+
+ #
+ # PcdName
+ #
+ Match2 = re.compile(self.PCD_PATTERN).match(self.Token[self.Index:])
+
+ #
+ # Quoted string
+ #
+ Match3 = re.compile(self.QUOTED_PATTERN).\
+ match(self.Token[self.Index:].replace('\\\\', '//').\
+ replace('\\\"', '\\\''))
+
+ return self._CheckToken([Match1, Match2, Match3])
+
+ ## A || B
+ #
+ def LogicalExpression(self):
+ Ret = self.SpecNot()
+ while self.IsCurrentOp(['||', 'OR', 'or', '&&', 'AND', 'and', 'XOR']):
+ if self.Token[self.Index-1] == '|' and self.Parens <= 0:
+ raise _ExprError(ST.ERR_EXPR_OR)
+ if Ret == self.ARITH:
+ raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
+ Ret = self.SpecNot()
+ if Ret == self.ARITH:
+ raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
+ Ret = self.REALLOGICAL
+ return Ret
+
+ def SpecNot(self):
+ if self.IsCurrentOp(["NOT", "!"]):
+ return self.SpecNot()
+ return self.Rel()
+
+ ## A < B, A > B, A <= B, A >= b
+ #
+ def Rel(self):
+ Ret = self.Expr()
+ if self.IsCurrentOp(["<=", ">=", ">", "<", "GT", "LT", "GE", "LE",
+ "==", "EQ", "!=", "NE"]):
+ if Ret == self.STRINGITEM or Ret == self.REALLOGICAL:
+ raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
+ Ret = self.Expr()
+ if Ret == self.STRINGITEM or Ret == self.REALLOGICAL:
+ raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
+ Ret = self.REALLOGICAL
+ return Ret
+
+ ## A + B, A - B
+ #
+ def Expr(self):
+ Ret = self.Factor()
+ while self.IsCurrentOp(["+", "-", "&", "|", "^"]):
+ if self.Token[self.Index-1] == '|' and self.Parens <= 0:
+ raise _ExprError(ST.ERR_EXPR_OR)
+ if Ret == self.STRINGITEM or Ret == self.REALLOGICAL:
+ raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
+ Ret = self.Factor()
+ if Ret == self.STRINGITEM or Ret == self.REALLOGICAL:
+ raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
+ Ret = self.ARITH
+ return Ret
+
+ ## Factor
+ #
+ def Factor(self):
+ if self.IsCurrentOp(["("]):
+ self.Parens += 1
+ Ret = self.LogicalExpression()
+ if not self.IsCurrentOp([")"]):
+ raise _ExprError(ST.ERR_EXPR_RIGHT_PAREN % \
+ (self.Token, self.Token[self.Index:]))
+ self.Parens -= 1
+ return Ret
+
+ if self.IsAtomicItem():
+ if self.Token[self.Index - 1] == '"':
+ return self.STRINGITEM
+ return self.LOGICAL
+ elif self.IsAtomicNumVal():
+ return self.ARITH
+ else:
+ raise _ExprError(ST.ERR_EXPR_FACTOR % \
+ (self.Token, self.Token[self.Index:]))
+
+ ## IsValidLogicalExpression
+ #
+ def IsValidLogicalExpression(self):
+ if self.Len == 0:
+ return False, ST.ERR_EXPR_EMPTY
+ try:
+ if self.LogicalExpression() == self.ARITH:
+ return False, ST.ERR_EXPR_LOGICAL % self.Token
+ except _ExprError, XExcept:
+ return False, XExcept.Error
+ self.SkipWhitespace()
+ if self.Index != self.Len:
+ return False, (ST.ERR_EXPR_BOOLEAN % \
+ (self.Token[self.Index:], self.Token))
+ return True, ''
+
+## _ValidRangeExpressionParser
+#
+class _ValidRangeExpressionParser(_ExprBase):
+ INT_RANGE_PATTERN = '[\t\s]*[0-9]+[\t\s]*-[\t\s]*[0-9]+'
+ HEX_RANGE_PATTERN = \
+ '[\t\s]*0[xX][a-fA-F0-9]+[\t\s]*-[\t\s]*0[xX][a-fA-F0-9]+'
+ def __init__(self, Token):
+ _ExprBase.__init__(self, Token)
+
+ ## IsValidRangeExpression
+ #
+ def IsValidRangeExpression(self):
+ if self.Len == 0:
+ return False
+ try:
+ self.RangeExpression()
+ except _ExprError:
+ return False
+ self.SkipWhitespace()
+ if self.Index != self.Len:
+ return False
+ return True
+
+ ## RangeExpression
+ #
+ def RangeExpression(self):
+ self.Unary()
+ while self.IsCurrentOp(['OR', 'AND', 'XOR']):
+ self.Unary()
+
+ ## Unary
+ #
+ def Unary(self):
+ if self.IsCurrentOp(["NOT", "-"]):
+ return self.Unary()
+ return self.ValidRange()
+
+ ## ValidRange
+ #
+ def ValidRange(self):
+ if self.IsCurrentOp(["("]):
+ self.RangeExpression()
+ if not self.IsCurrentOp([")"]):
+ raise _ExprError('')
+ return
+
+ if self.IsCurrentOp(["LT", "GT", "LE", "GE", "EQ"]):
+ IntMatch = \
+ re.compile(self.INT_PATTERN).match(self.Token[self.Index:])
+ HexMatch = \
+ re.compile(self.HEX_PATTERN).match(self.Token[self.Index:])
+ if HexMatch and HexMatch.start() == 0:
+ self.Index += HexMatch.end()
+ elif IntMatch and IntMatch.start() == 0:
+ self.Index += IntMatch.end()
+ else:
+ raise _ExprError('')
+ else:
+ IntRangeMatch = re.compile(
+ self.INT_RANGE_PATTERN).match(self.Token[self.Index:]
+ )
+ HexRangeMatch = re.compile(
+ self.HEX_RANGE_PATTERN).match(self.Token[self.Index:]
+ )
+ if HexRangeMatch and HexRangeMatch.start() == 0:
+ self.Index += HexRangeMatch.end()
+ elif IntRangeMatch and IntRangeMatch.start() == 0:
+ self.Index += IntRangeMatch.end()
+ else:
+ raise _ExprError('')
+
+ if self.Token[self.Index:self.Index+1] == '_' or \
+ self.Token[self.Index:self.Index+1].isalnum():
+ raise _ExprError('')
+
+## _StringTestParser
+#
+class _StringTestParser(_ExprBase):
+ def __init__(self, Token):
+ _ExprBase.__init__(self, Token)
+
+ ## IsValidStringTest
+ #
+ def IsValidStringTest(self):
+ if self.Len == 0:
+ return False, ST.ERR_EXPR_EMPTY
+ try:
+ self.StringTest()
+ except _ExprError, XExcept:
+ return False, XExcept.Error
+ return True, ''
+
+ ## StringItem
+ #
+ def StringItem(self):
+ Match1 = re.compile(self.QUOTED_PATTERN)\
+ .match(self.Token[self.Index:].replace('\\\\', '//')\
+ .replace('\\\"', '\\\''))
+ Match2 = re.compile(self.MACRO_PATTERN).match(self.Token[self.Index:])
+ Match3 = re.compile(self.PCD_PATTERN).match(self.Token[self.Index:])
+ MatchList = [Match1, Match2, Match3]
+ for Match in MatchList:
+ if Match and Match.start() == 0:
+ if not _ValidateToken(
+ self.Token[self.Index:self.Index+Match.end()]
+ ):
+ raise _ExprError(ST.ERR_EXPR_STRING_ITEM % \
+ (self.Token, self.Token[self.Index:]))
+ self.Index += Match.end()
+ Token = self.Token[self.Index - Match.end():self.Index]
+ if Token.strip() in ["EQ", "NE"]:
+ raise _ExprError(ST.ERR_EXPR_STRING_ITEM % \
+ (self.Token, self.Token[self.Index:]))
+ return
+ else:
+ raise _ExprError(ST.ERR_EXPR_STRING_ITEM % \
+ (self.Token, self.Token[self.Index:]))
+
+ ## StringTest
+ #
+ def StringTest(self):
+ self.StringItem()
+ if not self.IsCurrentOp(["==", "EQ", "!=", "NE"]):
+ raise _ExprError(ST.ERR_EXPR_EQUALITY % \
+ (self.Token, self.Token[self.Index:]))
+ self.StringItem()
+ if self.Index != self.Len:
+ raise _ExprError(ST.ERR_EXPR_BOOLEAN % \
+ (self.Token[self.Index:], self.Token))
+
+##
+# Check syntax of logical expression
+#
+# @param Token: expression token
+#
+def IsValidLogicalExpr(Token, Flag=False):
+ #
+ # Not do the check right now, keep the implementation for future enhancement.
+ #
+ if not Flag:
+ return True, ""
+ return _LogicalExpressionParser(Token).IsValidLogicalExpression()
+
+##
+# Check syntax of string test
+#
+# @param Token: string test token
+#
+def IsValidStringTest(Token, Flag=False):
+ #
+ # Not do the check right now, keep the implementation for future enhancement.
+ #
+ if not Flag:
+ return True, ""
+ return _StringTestParser(Token).IsValidStringTest()
+
+##
+# Check syntax of range expression
+#
+# @param Token: range expression token
+#
+def IsValidRangeExpr(Token):
+ return _ValidRangeExpressionParser(Token).IsValidRangeExpression()
+
+##
+# Check whether the feature flag expression is valid or not
+#
+# @param Token: feature flag expression
+#
+def IsValidFeatureFlagExp(Token, Flag=False):
+ #
+ # Not do the check right now, keep the implementation for future enhancement.
+ #
+ if not Flag:
+ return True, "", Token
+ else:
+ if Token in ['TRUE', 'FALSE', 'true', 'false', 'True', 'False',
+ '0x1', '0x01', '0x0', '0x00']:
+ return True, ""
+ Valid, Cause = IsValidStringTest(Token, Flag)
+ if not Valid:
+ Valid, Cause = IsValidLogicalExpr(Token, Flag)
+ if not Valid:
+ return False, Cause
+ return True, ""
+
+if __name__ == '__main__':
+ print _LogicalExpressionParser('a ^ b > a + b').IsValidLogicalExpression()
diff --git a/BaseTools/Source/Python/UPT/Library/GlobalData.py b/BaseTools/Source/Python/UPT/Library/GlobalData.py
new file mode 100644
index 0000000000..fedd981529
--- /dev/null
+++ b/BaseTools/Source/Python/UPT/Library/GlobalData.py
@@ -0,0 +1,94 @@
+## @file
+# This file is used to define common static strings and global data used by UPT
+#
+# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials are licensed and made available
+# under the terms and conditions of the BSD License which accompanies this
+# distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+'''
+GlobalData
+'''
+
+#
+# The workspace directory
+#
+gWORKSPACE = '.'
+
+#
+# INF module directory
+#
+gINF_MODULE_DIR = "."
+gINF_MODULE_NAME = ''
+
+#
+# the directory to holds upt related files
+#
+gUPT_DIR = r"Conf/upt/"
+
+#
+# Log file for invalid meta-data files during force removing
+#
+gINVALID_MODULE_FILE = gUPT_DIR + r"Invalid_Modules.log"
+
+#
+# File name for content zip file in the distribution
+#
+gCONTENT_FILE = "dist.content"
+
+#
+# File name for XML file in the distibution
+#
+gDESC_FILE = 'dist.pkg'
+
+#
+# Case Insensitive flag
+#
+gCASE_INSENSITIVE = ''
+
+#
+# All Files dictionary
+#
+gALL_FILES = {}
+
+#
+# Database instance
+#
+gDB = None
+
+#
+# list for files that are found in module level but not in INF files,
+# items are (File, ModulePath), all these should be relative to $(WORKSPACE)
+#
+gMISS_FILE_IN_MODLIST = []
+
+#
+# Global Current Line
+#
+gINF_CURRENT_LINE = None
+
+#
+# Global pkg list
+#
+gWSPKG_LIST = []
+
+#
+# Flag used to take WARN as ERROR.
+# By default, only ERROR message will break the tools execution.
+#
+gWARNING_AS_ERROR = False
+
+#
+# Used to specify the temp directory to hold the unpacked distribution files
+#
+gUNPACK_DIR = None
+
+#
+# Flag used to mark whether the INF file is Binary INF or not.
+#
+gIS_BINARY_INF = False
diff --git a/BaseTools/Source/Python/UPT/Library/Misc.py b/BaseTools/Source/Python/UPT/Library/Misc.py
new file mode 100644
index 0000000000..658c4e0cfb
--- /dev/null
+++ b/BaseTools/Source/Python/UPT/Library/Misc.py
@@ -0,0 +1,921 @@
+## @file
+# Common routines used by all tools
+#
+# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials are licensed and made available
+# under the terms and conditions of the BSD License which accompanies this
+# distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+
+'''
+Misc
+'''
+
+##
+# Import Modules
+#
+import os.path
+from os import access
+from os import F_OK
+from os import makedirs
+from os import getcwd
+from os import chdir
+from os import listdir
+from os import remove
+from os import rmdir
+from os import linesep
+from os import walk
+from os import environ
+import re
+from UserDict import IterableUserDict
+
+import Logger.Log as Logger
+from Logger import StringTable as ST
+from Logger import ToolError
+from Library import GlobalData
+from Library.DataType import SUP_MODULE_LIST
+from Library.DataType import END_OF_LINE
+from Library.DataType import TAB_SPLIT
+from Library.DataType import LANGUAGE_EN_US
+from Library.String import GetSplitValueList
+from Library.ParserValidate import IsValidHexVersion
+from Library.ParserValidate import IsValidPath
+from Object.POM.CommonObject import TextObject
+
+## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C
+# structure style
+#
+# @param Guid: The GUID string
+#
+def GuidStringToGuidStructureString(Guid):
+ GuidList = Guid.split('-')
+ Result = '{'
+ for Index in range(0, 3, 1):
+ Result = Result + '0x' + GuidList[Index] + ', '
+ Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
+ for Index in range(0, 12, 2):
+ Result = Result + ', 0x' + GuidList[4][Index:Index + 2]
+ Result += '}}'
+ return Result
+
+## Check whether GUID string is of format xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+#
+# @param GuidValue: The GUID value
+#
+def CheckGuidRegFormat(GuidValue):
+ ## Regular expression used to find out register format of GUID
+ #
+ RegFormatGuidPattern = re.compile("^\s*([0-9a-fA-F]){8}-"
+ "([0-9a-fA-F]){4}-"
+ "([0-9a-fA-F]){4}-"
+ "([0-9a-fA-F]){4}-"
+ "([0-9a-fA-F]){12}\s*$")
+
+ if RegFormatGuidPattern.match(GuidValue):
+ return True
+ else:
+ return False
+
+
+## Convert GUID string in C structure style to
+# xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+#
+# @param GuidValue: The GUID value in C structure format
+#
+def GuidStructureStringToGuidString(GuidValue):
+ GuidValueString = GuidValue.lower().replace("{", "").replace("}", "").\
+ replace(" ", "").replace(";", "")
+ GuidValueList = GuidValueString.split(",")
+ if len(GuidValueList) != 11:
+ return ''
+ try:
+ return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
+ int(GuidValueList[0], 16),
+ int(GuidValueList[1], 16),
+ int(GuidValueList[2], 16),
+ int(GuidValueList[3], 16),
+ int(GuidValueList[4], 16),
+ int(GuidValueList[5], 16),
+ int(GuidValueList[6], 16),
+ int(GuidValueList[7], 16),
+ int(GuidValueList[8], 16),
+ int(GuidValueList[9], 16),
+ int(GuidValueList[10], 16)
+ )
+ except BaseException:
+ return ''
+
+## Create directories
+#
+# @param Directory: The directory name
+#
+def CreateDirectory(Directory):
+ if Directory == None or Directory.strip() == "":
+ return True
+ try:
+ if not access(Directory, F_OK):
+ makedirs(Directory)
+ except BaseException:
+ return False
+ return True
+
+## Remove directories, including files and sub-directories in it
+#
+# @param Directory: The directory name
+#
+def RemoveDirectory(Directory, Recursively=False):
+ if Directory == None or Directory.strip() == "" or not \
+ os.path.exists(Directory):
+ return
+ if Recursively:
+ CurrentDirectory = getcwd()
+ chdir(Directory)
+ for File in listdir("."):
+ if os.path.isdir(File):
+ RemoveDirectory(File, Recursively)
+ else:
+ remove(File)
+ chdir(CurrentDirectory)
+ rmdir(Directory)
+
+## Store content in file
+#
+# This method is used to save file only when its content is changed. This is
+# quite useful for "make" system to decide what will be re-built and what
+# won't.
+#
+# @param File: The path of file
+# @param Content: The new content of the file
+# @param IsBinaryFile: The flag indicating if the file is binary file
+# or not
+#
+def SaveFileOnChange(File, Content, IsBinaryFile=True):
+ if not IsBinaryFile:
+ Content = Content.replace("\n", linesep)
+
+ if os.path.exists(File):
+ try:
+ if Content == open(File, "rb").read():
+ return False
+ except BaseException:
+ Logger.Error(None, ToolError.FILE_OPEN_FAILURE, ExtraData=File)
+
+ CreateDirectory(os.path.dirname(File))
+ try:
+ FileFd = open(File, "wb")
+ FileFd.write(Content)
+ FileFd.close()
+ except BaseException:
+ Logger.Error(None, ToolError.FILE_CREATE_FAILURE, ExtraData=File)
+
+ return True
+
+## Get all files of a directory
+#
+# @param Root: Root dir
+# @param SkipList : The files need be skipped
+#
+def GetFiles(Root, SkipList=None, FullPath=True):
+ OriPath = os.path.normpath(Root)
+ FileList = []
+ for Root, Dirs, Files in walk(Root):
+ if SkipList:
+ for Item in SkipList:
+ if Item in Dirs:
+ Dirs.remove(Item)
+ for Dir in Dirs:
+ if Dir.startswith('.'):
+ Dirs.remove(Dir)
+
+ for File in Files:
+ if File.startswith('.'):
+ continue
+ File = os.path.normpath(os.path.join(Root, File))
+ if not FullPath:
+ File = File[len(OriPath) + 1:]
+ FileList.append(File)
+
+ return FileList
+
+## Get all non-metadata files of a directory
+#
+# @param Root: Root Dir
+# @param SkipList : List of path need be skipped
+# @param FullPath: True if the returned file should be full path
+# @param PrefixPath: the path that need to be added to the files found
+# @return: the list of files found
+#
+def GetNonMetaDataFiles(Root, SkipList, FullPath, PrefixPath):
+ FileList = GetFiles(Root, SkipList, FullPath)
+ NewFileList = []
+ for File in FileList:
+ ExtName = os.path.splitext(File)[1]
+ #
+ # skip '.dec', '.inf', '.dsc', '.fdf' files
+ #
+ if ExtName.lower() not in ['.dec', '.inf', '.dsc', '.fdf']:
+ NewFileList.append(os.path.normpath(os.path.join(PrefixPath, File)))
+
+ return NewFileList
+
+## Check if given file exists or not
+#
+# @param File: File name or path to be checked
+# @param Dir: The directory the file is relative to
+#
+def ValidFile(File, Ext=None):
+ File = File.replace('\\', '/')
+ if Ext != None:
+ FileExt = os.path.splitext(File)[1]
+ if FileExt.lower() != Ext.lower():
+ return False
+ if not os.path.exists(File):
+ return False
+ return True
+
+## RealPath
+#
+# @param File: File name or path to be checked
+# @param Dir: The directory the file is relative to
+# @param OverrideDir: The override directory
+#
+def RealPath(File, Dir='', OverrideDir=''):
+ NewFile = os.path.normpath(os.path.join(Dir, File))
+ NewFile = GlobalData.gALL_FILES[NewFile]
+ if not NewFile and OverrideDir:
+ NewFile = os.path.normpath(os.path.join(OverrideDir, File))
+ NewFile = GlobalData.gALL_FILES[NewFile]
+ return NewFile
+
+## RealPath2
+#
+# @param File: File name or path to be checked
+# @param Dir: The directory the file is relative to
+# @param OverrideDir: The override directory
+#
+def RealPath2(File, Dir='', OverrideDir=''):
+ if OverrideDir:
+ NewFile = GlobalData.gALL_FILES[os.path.normpath(os.path.join\
+ (OverrideDir, File))]
+ if NewFile:
+ if OverrideDir[-1] == os.path.sep:
+ return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
+ else:
+ return NewFile[len(OverrideDir) + 1:], \
+ NewFile[0:len(OverrideDir)]
+
+ NewFile = GlobalData.gALL_FILES[os.path.normpath(os.path.join(Dir, File))]
+ if NewFile:
+ if Dir:
+ if Dir[-1] == os.path.sep:
+ return NewFile[len(Dir):], NewFile[0:len(Dir)]
+ else:
+ return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]
+ else:
+ return NewFile, ''
+
+ return None, None
+
+## A dict which can access its keys and/or values orderly
+#
+# The class implements a new kind of dict which its keys or values can be
+# accessed in the order they are added into the dict. It guarantees the order
+# by making use of an internal list to keep a copy of keys.
+#
+class Sdict(IterableUserDict):
+ ## Constructor
+ #
+ def __init__(self):
+ IterableUserDict.__init__(self)
+ self._key_list = []
+
+ ## [] operator
+ #
+ def __setitem__(self, Key, Value):
+ if Key not in self._key_list:
+ self._key_list.append(Key)
+ IterableUserDict.__setitem__(self, Key, Value)
+
+ ## del operator
+ #
+ def __delitem__(self, Key):
+ self._key_list.remove(Key)
+ IterableUserDict.__delitem__(self, Key)
+
+ ## used in "for k in dict" loop to ensure the correct order
+ #
+ def __iter__(self):
+ return self.iterkeys()
+
+ ## len() support
+ #
+ def __len__(self):
+ return len(self._key_list)
+
+ ## "in" test support
+ #
+ def __contains__(self, Key):
+ return Key in self._key_list
+
+ ## indexof support
+ #
+ def index(self, Key):
+ return self._key_list.index(Key)
+
+ ## insert support
+ #
+ def insert(self, Key, Newkey, Newvalue, Order):
+ Index = self._key_list.index(Key)
+ if Order == 'BEFORE':
+ self._key_list.insert(Index, Newkey)
+ IterableUserDict.__setitem__(self, Newkey, Newvalue)
+ elif Order == 'AFTER':
+ self._key_list.insert(Index + 1, Newkey)
+ IterableUserDict.__setitem__(self, Newkey, Newvalue)
+
+ ## append support
+ #
+ def append(self, Sdict2):
+ for Key in Sdict2:
+ if Key not in self._key_list:
+ self._key_list.append(Key)
+ IterableUserDict.__setitem__(self, Key, Sdict2[Key])
+ ## hash key
+ #
+ def has_key(self, Key):
+ return Key in self._key_list
+
+ ## Empty the dict
+ #
+ def clear(self):
+ self._key_list = []
+ IterableUserDict.clear(self)
+
+ ## Return a copy of keys
+ #
+ def keys(self):
+ Keys = []
+ for Key in self._key_list:
+ Keys.append(Key)
+ return Keys
+
+ ## Return a copy of values
+ #
+ def values(self):
+ Values = []
+ for Key in self._key_list:
+ Values.append(self[Key])
+ return Values
+
+ ## Return a copy of (key, value) list
+ #
+ def items(self):
+ Items = []
+ for Key in self._key_list:
+ Items.append((Key, self[Key]))
+ return Items
+
+ ## Iteration support
+ #
+ def iteritems(self):
+ return iter(self.items())
+
+ ## Keys interation support
+ #
+ def iterkeys(self):
+ return iter(self.keys())
+
+ ## Values interation support
+ #
+ def itervalues(self):
+ return iter(self.values())
+
+ ## Return value related to a key, and remove the (key, value) from the dict
+ #
+ def pop(self, Key, *Dv):
+ Value = None
+ if Key in self._key_list:
+ Value = self[Key]
+ self.__delitem__(Key)
+ elif len(Dv) != 0 :
+ Value = Dv[0]
+ return Value
+
+ ## Return (key, value) pair, and remove the (key, value) from the dict
+ #
+ def popitem(self):
+ Key = self._key_list[-1]
+ Value = self[Key]
+ self.__delitem__(Key)
+ return Key, Value
+ ## update method
+ #
+ def update(self, Dict=None, **Kwargs):
+ if Dict != None:
+ for Key1, Val1 in Dict.items():
+ self[Key1] = Val1
+ if len(Kwargs):
+ for Key1, Val1 in Kwargs.items():
+ self[Key1] = Val1
+
+## CommonPath
+#
+# @param PathList: PathList
+#
+def CommonPath(PathList):
+ Path1 = min(PathList).split(os.path.sep)
+ Path2 = max(PathList).split(os.path.sep)
+ for Index in xrange(min(len(Path1), len(Path2))):
+ if Path1[Index] != Path2[Index]:
+ return os.path.sep.join(Path1[:Index])
+ return os.path.sep.join(Path1)
+
+## PathClass
+#
+class PathClass(object):
+ def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
+ Arch='COMMON', ToolChainFamily='', Target='', TagName='', \
+ ToolCode=''):
+ self.Arch = Arch
+ self.File = str(File)
+ if os.path.isabs(self.File):
+ self.Root = ''
+ self.AlterRoot = ''
+ else:
+ self.Root = str(Root)
+ self.AlterRoot = str(AlterRoot)
+
+ #
+ # Remove any '.' and '..' in path
+ #
+ if self.Root:
+ self.Path = os.path.normpath(os.path.join(self.Root, self.File))
+ self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
+ #
+ # eliminate the side-effect of 'C:'
+ #
+ if self.Root[-1] == ':':
+ self.Root += os.path.sep
+ #
+ # file path should not start with path separator
+ #
+ if self.Root[-1] == os.path.sep:
+ self.File = self.Path[len(self.Root):]
+ else:
+ self.File = self.Path[len(self.Root) + 1:]
+ else:
+ self.Path = os.path.normpath(self.File)
+
+ self.SubDir, self.Name = os.path.split(self.File)
+ self.BaseName, self.Ext = os.path.splitext(self.Name)
+
+ if self.Root:
+ if self.SubDir:
+ self.Dir = os.path.join(self.Root, self.SubDir)
+ else:
+ self.Dir = self.Root
+ else:
+ self.Dir = self.SubDir
+
+ if IsBinary:
+ self.Type = Type
+ else:
+ self.Type = self.Ext.lower()
+
+ self.IsBinary = IsBinary
+ self.Target = Target
+ self.TagName = TagName
+ self.ToolCode = ToolCode
+ self.ToolChainFamily = ToolChainFamily
+
+ self._Key = None
+
+ ## Convert the object of this class to a string
+ #
+ # Convert member Path of the class to a string
+ #
+ def __str__(self):
+ return self.Path
+
+ ## Override __eq__ function
+ #
+ # Check whether PathClass are the same
+ #
+ def __eq__(self, Other):
+ if type(Other) == type(self):
+ return self.Path == Other.Path
+ else:
+ return self.Path == str(Other)
+
+ ## Override __hash__ function
+ #
+ # Use Path as key in hash table
+ #
+ def __hash__(self):
+ return hash(self.Path)
+
+ ## _GetFileKey
+ #
+ def _GetFileKey(self):
+ if self._Key == None:
+ self._Key = self.Path.upper()
+ return self._Key
+ ## Validate
+ #
+ def Validate(self, Type='', CaseSensitive=True):
+ if GlobalData.gCASE_INSENSITIVE:
+ CaseSensitive = False
+ if Type and Type.lower() != self.Type:
+ return ToolError.FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % \
+ (self.File, Type, self.Type)
+
+ RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
+ if not RealRoot and not RealFile:
+ RealFile = self.File
+ if self.AlterRoot:
+ RealFile = os.path.join(self.AlterRoot, self.File)
+ elif self.Root:
+ RealFile = os.path.join(self.Root, self.File)
+ return ToolError.FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
+
+ ErrorCode = 0
+ ErrorInfo = ''
+ if RealRoot != self.Root or RealFile != self.File:
+ if CaseSensitive and (RealFile != self.File or \
+ (RealRoot != self.Root and RealRoot != \
+ self.AlterRoot)):
+ ErrorCode = ToolError.FILE_CASE_MISMATCH
+ ErrorInfo = self.File + '\n\t' + RealFile + \
+ " [in file system]"
+
+ self.SubDir, self.Name = os.path.split(RealFile)
+ self.BaseName, self.Ext = os.path.splitext(self.Name)
+ if self.SubDir:
+ self.Dir = os.path.join(RealRoot, self.SubDir)
+ else:
+ self.Dir = RealRoot
+ self.File = RealFile
+ self.Root = RealRoot
+ self.Path = os.path.join(RealRoot, RealFile)
+ return ErrorCode, ErrorInfo
+
+ Key = property(_GetFileKey)
+
+## Check environment variables
+#
+# Check environment variables that must be set for build. Currently they are
+#
+# WORKSPACE The directory all packages/platforms start from
+# EDK_TOOLS_PATH The directory contains all tools needed by the build
+# PATH $(EDK_TOOLS_PATH)/Bin/<sys> must be set in PATH
+#
+# If any of above environment variable is not set or has error, the build
+# will be broken.
+#
+def CheckEnvVariable():
+ #
+ # check WORKSPACE
+ #
+ if "WORKSPACE" not in environ:
+ Logger.Error("UPT",
+ ToolError.UPT_ENVIRON_MISSING_ERROR,
+ ST.ERR_NOT_FOUND_ENVIRONMENT,
+ ExtraData="WORKSPACE")
+
+ WorkspaceDir = os.path.normpath(environ["WORKSPACE"])
+ if not os.path.exists(WorkspaceDir):
+ Logger.Error("UPT",
+ ToolError.UPT_ENVIRON_MISSING_ERROR,
+ ST.ERR_WORKSPACE_NOTEXIST,
+ ExtraData="%s" % WorkspaceDir)
+ elif ' ' in WorkspaceDir:
+ Logger.Error("UPT",
+ ToolError.FORMAT_NOT_SUPPORTED,
+ ST.ERR_SPACE_NOTALLOWED,
+ ExtraData=WorkspaceDir)
+
+## Check whether all module types are in list
+#
+# check whether all module types (SUP_MODULE_LIST) are in list
+#
+# @param ModuleList: a list of ModuleType
+#
+def IsAllModuleList(ModuleList):
+ NewModuleList = [Module.upper() for Module in ModuleList]
+ for Module in SUP_MODULE_LIST:
+ if Module not in NewModuleList:
+ return False
+ else:
+ return True
+
+## Dictionary that use comment(GenericComment, TailComment) as value,
+# if a new comment which key already in the dic is inserted, then the
+# comment will be merged.
+# Key is (Statement, SupArch), when TailComment is added, it will ident
+# according to Statement
+#
+class MergeCommentDict(dict):
+ ## []= operator
+ #
+ def __setitem__(self, Key, CommentVal):
+ GenericComment, TailComment = CommentVal
+ if Key in self:
+ OrigVal1, OrigVal2 = dict.__getitem__(self, Key)
+ Statement = Key[0]
+ dict.__setitem__(self, Key, (OrigVal1 + GenericComment, OrigVal2 \
+ + len(Statement) * ' ' + TailComment))
+ else:
+ dict.__setitem__(self, Key, (GenericComment, TailComment))
+
+ ## =[] operator
+ #
+ def __getitem__(self, Key):
+ return dict.__getitem__(self, Key)
+
+
+## GenDummyHelpTextObj
+#
+# @retval HelpTxt: Generated dummy help text object
+#
+def GenDummyHelpTextObj():
+ HelpTxt = TextObject()
+ HelpTxt.SetLang(LANGUAGE_EN_US)
+ HelpTxt.SetString(' ')
+ return HelpTxt
+
+## ConvertVersionToDecimal, the minor version should be within 0 - 99
+# <HexVersion> ::= "0x" <Major> <Minor>
+# <Major> ::= (a-fA-F0-9){4}
+# <Minor> ::= (a-fA-F0-9){4}
+# <DecVersion> ::= (0-65535) ["." (0-99)]
+#
+# @param StringIn: The string contains version defined in INF file.
+# It can be Decimal or Hex
+#
+def ConvertVersionToDecimal(StringIn):
+ if IsValidHexVersion(StringIn):
+ Value = int(StringIn, 16)
+ Major = Value >> 16
+ Minor = Value & 0xFFFF
+ MinorStr = str(Minor)
+ if len(MinorStr) == 1:
+ MinorStr = '0' + MinorStr
+ return str(Major) + '.' + MinorStr
+ else:
+ if StringIn.find(TAB_SPLIT) != -1:
+ return StringIn
+ elif StringIn:
+ return StringIn + '.0'
+ else:
+ #
+ # when StringIn is '', return it directly
+ #
+ return StringIn
+
+## GetHelpStringByRemoveHashKey
+#
+# Remove hash key at the header of string and return the remain.
+#
+# @param String: The string need to be processed.
+#
+def GetHelpStringByRemoveHashKey(String):
+ ReturnString = ''
+ PattenRemoveHashKey = re.compile(r"^[#+\s]+", re.DOTALL)
+ String = String.strip()
+ if String == '':
+ return String
+
+ LineList = GetSplitValueList(String, END_OF_LINE)
+ for Line in LineList:
+ ValueList = PattenRemoveHashKey.split(Line)
+ if len(ValueList) == 1:
+ ReturnString += ValueList[0] + END_OF_LINE
+ else:
+ ReturnString += ValueList[1] + END_OF_LINE
+
+ if ReturnString.endswith('\n') and not ReturnString.endswith('\n\n') and ReturnString != '\n':
+ ReturnString = ReturnString[:-1]
+
+ return ReturnString
+
+## ConvPathFromAbsToRel
+#
+# Get relative file path from absolute path.
+#
+# @param Path: The string contain file absolute path.
+# @param Root: The string contain the parent path of Path in.
+#
+#
+def ConvPathFromAbsToRel(Path, Root):
+ Path = os.path.normpath(Path)
+ Root = os.path.normpath(Root)
+ FullPath = os.path.normpath(os.path.join(Root, Path))
+
+ #
+ # If Path is absolute path.
+ # It should be in Root.
+ #
+ if os.path.isabs(Path):
+ return FullPath[FullPath.find(Root) + len(Root) + 1:]
+
+ else:
+ return Path
+
+## ConvertPath
+#
+# Convert special characters to '_', '\' to '/'
+# return converted path: Test!1.inf -> Test_1.inf
+#
+# @param Path: Path to be converted
+#
+def ConvertPath(Path):
+ RetPath = ''
+ for Char in Path.strip():
+ if Char.isalnum() or Char in '.-_/':
+ RetPath = RetPath + Char
+ elif Char == '\\':
+ RetPath = RetPath + '/'
+ else:
+ RetPath = RetPath + '_'
+ return RetPath
+
+## ConvertSpec
+#
+# during install, convert the Spec string extract from UPD into INF allowable definition,
+# the difference is period is allowed in the former (not the first letter) but not in the latter.
+# return converted Spec string
+#
+# @param SpecStr: SpecStr to be converted
+#
+def ConvertSpec(SpecStr):
+ RetStr = ''
+ for Char in SpecStr:
+ if Char.isalnum() or Char == '_':
+ RetStr = RetStr + Char
+ else:
+ RetStr = RetStr + '_'
+
+ return RetStr
+
+
+## IsEqualList
+#
+# Judge two lists are identical(contain same item).
+# The rule is elements in List A are in List B and elements in List B are in List A.
+#
+# @param ListA, ListB Lists need to be judged.
+#
+# @return True ListA and ListB are identical
+# @return False ListA and ListB are different with each other
+#
+def IsEqualList(ListA, ListB):
+ if ListA == ListB:
+ return True
+
+ for ItemA in ListA:
+ if not ItemA in ListB:
+ return False
+
+ for ItemB in ListB:
+ if not ItemB in ListA:
+ return False
+
+ return True
+
+## ConvertArchList
+#
+# Convert item in ArchList if the start character is lower case.
+# In UDP spec, Arch is only allowed as: [A-Z]([a-zA-Z0-9])*
+#
+# @param ArchList The ArchList need to be converted.
+#
+# @return NewList The ArchList been converted.
+#
+def ConvertArchList(ArchList):
+ NewArchList = []
+ if not ArchList:
+ return NewArchList
+
+ if type(ArchList) == list:
+ for Arch in ArchList:
+ Arch = Arch.upper()
+ NewArchList.append(Arch)
+ elif type(ArchList) == str:
+ ArchList = ArchList.upper()
+ NewArchList.append(ArchList)
+
+ return NewArchList
+
+## ProcessLineExtender
+#
+# Process the LineExtender of Line in LineList.
+# If one line ends with a line extender, then it will be combined together with next line.
+#
+# @param LineList The LineList need to be processed.
+#
+# @return NewList The ArchList been processed.
+#
+def ProcessLineExtender(LineList):
+ NewList = []
+ Count = 0
+ while Count < len(LineList):
+ if LineList[Count].strip().endswith("\\") and Count + 1 < len(LineList):
+ NewList.append(LineList[Count].strip()[:-2] + LineList[Count + 1])
+ Count = Count + 1
+ else:
+ NewList.append(LineList[Count])
+
+ Count = Count + 1
+
+ return NewList
+
+## GetLibInstanceInfo
+#
+# Get the information from Library Instance INF file.
+#
+# @param string. A string start with # and followed by INF file path
+# @param WorkSpace. The WorkSpace directory used to combined with INF file path.
+#
+# @return GUID, Version
+def GetLibInstanceInfo(String, WorkSpace, LineNo):
+
+ FileGuidString = ""
+ VerString = ""
+
+ OrignalString = String
+ String = String.strip()
+ if not String:
+ return None, None
+ #
+ # Remove "#" characters at the beginning
+ #
+ String = GetHelpStringByRemoveHashKey(String)
+ String = String.strip()
+
+ #
+ # Validate file name exist.
+ #
+ FullFileName = os.path.normpath(os.path.realpath(os.path.join(WorkSpace, String)))
+ if not (ValidFile(FullFileName)):
+ Logger.Error("InfParser",
+ ToolError.FORMAT_INVALID,
+ ST.ERR_FILELIST_EXIST % (String),
+ File=GlobalData.gINF_MODULE_NAME,
+ Line=LineNo,
+ ExtraData=OrignalString)
+
+ #
+ # Validate file exist/format.
+ #
+ if IsValidPath(String, WorkSpace):
+ IsValidFileFlag = True
+ else:
+ Logger.Error("InfParser",
+ ToolError.FORMAT_INVALID,
+ ST.ERR_INF_PARSER_FILE_NOT_EXIST_OR_NAME_INVALID % (String),
+ File=GlobalData.gINF_MODULE_NAME,
+ Line=LineNo,
+ ExtraData=OrignalString)
+ return False
+ if IsValidFileFlag:
+ FileLinesList = []
+
+ try:
+ FInputfile = open(FullFileName, "rb", 0)
+ try:
+ FileLinesList = FInputfile.readlines()
+ except BaseException:
+ Logger.Error("InfParser",
+ ToolError.FILE_READ_FAILURE,
+ ST.ERR_FILE_OPEN_FAILURE,
+ File=FullFileName)
+ finally:
+ FInputfile.close()
+ except BaseException:
+ Logger.Error("InfParser",
+ ToolError.FILE_READ_FAILURE,
+ ST.ERR_FILE_OPEN_FAILURE,
+ File=FullFileName)
+
+ ReFileGuidPattern = re.compile("^\s*FILE_GUID\s*=.*$")
+ ReVerStringPattern = re.compile("^\s*VERSION_STRING\s*=.*$")
+
+ FileLinesList = ProcessLineExtender(FileLinesList)
+
+ for Line in FileLinesList:
+ if ReFileGuidPattern.match(Line):
+ FileGuidString = Line
+ if ReVerStringPattern.match(Line):
+ VerString = Line
+
+ if FileGuidString:
+ FileGuidString = GetSplitValueList(FileGuidString, '=', 1)[1]
+ if VerString:
+ VerString = GetSplitValueList(VerString, '=', 1)[1]
+
+ return FileGuidString, VerString
diff --git a/BaseTools/Source/Python/UPT/Library/ParserValidate.py b/BaseTools/Source/Python/UPT/Library/ParserValidate.py
new file mode 100644
index 0000000000..d6b9a096c7
--- /dev/null
+++ b/BaseTools/Source/Python/UPT/Library/ParserValidate.py
@@ -0,0 +1,717 @@
+## @file ParserValidate.py
+#
+# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials are licensed and made available
+# under the terms and conditions of the BSD License which accompanies this
+# distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+
+'''
+PaserValidate
+'''
+
+import os.path
+import re
+
+from Library.DataType import MODULE_LIST
+from Library.DataType import COMPONENT_TYPE_LIST
+from Library.DataType import PCD_USAGE_TYPE_LIST_OF_MODULE
+from Library.DataType import TAB_SPACE_SPLIT
+from Library.String import GetSplitValueList
+from Library.ExpressionValidate import IsValidBareCString
+from Library.ExpressionValidate import IsValidFeatureFlagExp
+
+## __HexDigit() method
+#
+# Whether char input is a Hex data bit
+#
+# @param TempChar: The char to test
+#
+def __HexDigit(TempChar):
+ if (TempChar >= 'a' and TempChar <= 'f') or \
+ (TempChar >= 'A' and TempChar <= 'F') \
+ or (TempChar >= '0' and TempChar <= '9'):
+ return True
+ else:
+ return False
+
+## IsValidHex() method
+#
+# Whether char input is a Hex data.
+#
+# @param TempChar: The char to test
+#
+def IsValidHex(HexStr):
+ if not HexStr.upper().startswith("0X"):
+ return False
+ CharList = [c for c in HexStr[2:] if not __HexDigit(c)]
+ if len(CharList) == 0:
+ return True
+ else:
+ return False
+
+## Judge the input string is valid bool type or not.
+#
+# <TRUE> ::= {"TRUE"} {"true"} {"True"} {"0x1"} {"0x01"}
+# <FALSE> ::= {"FALSE"} {"false"} {"False"} {"0x0"} {"0x00"}
+# <BoolType> ::= {<TRUE>} {<FALSE>}
+#
+# @param BoolString: A string contained the value need to be judged.
+#
+def IsValidBoolType(BoolString):
+ #
+ # Valid Ture
+ #
+ if BoolString == 'TRUE' or \
+ BoolString == 'True' or \
+ BoolString == 'true' or \
+ BoolString == '0x1' or \
+ BoolString == '0x01':
+ return True
+ #
+ # Valid False
+ #
+ elif BoolString == 'FALSE' or \
+ BoolString == 'False' or \
+ BoolString == 'false' or \
+ BoolString == '0x0' or \
+ BoolString == '0x00':
+ return True
+ #
+ # Invalid bool type
+ #
+ else:
+ return False
+
+## Is Valid Module Type List or not
+#
+# @param ModuleTypeList: A list contain ModuleType strings need to be
+# judged.
+#
+def IsValidInfMoudleTypeList(ModuleTypeList):
+ for ModuleType in ModuleTypeList:
+ return IsValidInfMoudleType(ModuleType)
+
+## Is Valid Module Type or not
+#
+# @param ModuleType: A string contain ModuleType need to be judged.
+#
+def IsValidInfMoudleType(ModuleType):
+ if ModuleType in MODULE_LIST:
+ return True
+ else:
+ return False
+
+## Is Valid Component Type or not
+#
+# @param ComponentType: A string contain ComponentType need to be judged.
+#
+def IsValidInfComponentType(ComponentType):
+ if ComponentType.upper() in COMPONENT_TYPE_LIST:
+ return True
+ else:
+ return False
+
+
+## Is valid Tool Family or not
+#
+# @param ToolFamily: A string contain Tool Family need to be judged.
+# Famlily := [A-Z]([a-zA-Z0-9])*
+#
+def IsValidToolFamily(ToolFamily):
+ ReIsValieFamily = re.compile(r"^[A-Z]+[A-Za-z0-9]{0,}$", re.DOTALL)
+ if ReIsValieFamily.match(ToolFamily) == None:
+ return False
+ return True
+
+## Is valid Tool TagName or not
+#
+# The TagName sample is MYTOOLS and VS2005.
+#
+# @param TagName: A string contain Tool TagName need to be judged.
+#
+def IsValidToolTagName(TagName):
+ if TagName.strip() == '':
+ return True
+ if TagName.strip() == '*':
+ return True
+ if not IsValidWord(TagName):
+ return False
+ return True
+
+## Is valid arch or not
+#
+# @param Arch The arch string need to be validated
+# <OA> ::= (a-zA-Z)(A-Za-z0-9){0,}
+# <arch> ::= {"IA32"} {"X64"} {"IPF"} {"EBC"} {<OA>}
+# {"common"}
+# @param Arch: Input arch
+#
+def IsValidArch(Arch):
+ if Arch == 'common':
+ return True
+ ReIsValieArch = re.compile(r"^[a-zA-Z]+[a-zA-Z0-9]{0,}$", re.DOTALL)
+ if ReIsValieArch.match(Arch) == None:
+ return False
+ return True
+
+## Is valid family or not
+#
+# <Family> ::= {"MSFT"} {"GCC"} {"INTEL"} {<Usr>} {"*"}
+# <Usr> ::= [A-Z][A-Za-z0-9]{0,}
+#
+# @param family: The family string need to be validated
+#
+def IsValidFamily(Family):
+ Family = Family.strip()
+ if Family == '*':
+ return True
+
+ if Family == '':
+ return True
+
+ ReIsValidFamily = re.compile(r"^[A-Z]+[A-Za-z0-9]{0,}$", re.DOTALL)
+ if ReIsValidFamily.match(Family) == None:
+ return False
+ return True
+
+## Is valid build option name or not
+#
+# @param BuildOptionName: The BuildOptionName string need to be validated
+#
+def IsValidBuildOptionName(BuildOptionName):
+ if not BuildOptionName:
+ return False
+
+ ToolOptionList = GetSplitValueList(BuildOptionName, '_', 4)
+
+ if len(ToolOptionList) != 5:
+ return False
+
+ ReIsValidBuildOption1 = re.compile(r"^\s*(\*)|([A-Z][a-zA-Z0-9]*)$")
+ ReIsValidBuildOption2 = re.compile(r"^\s*(\*)|([a-zA-Z][a-zA-Z0-9]*)$")
+
+ if ReIsValidBuildOption1.match(ToolOptionList[0]) == None:
+ return False
+
+ if ReIsValidBuildOption1.match(ToolOptionList[1]) == None:
+ return False
+
+ if ReIsValidBuildOption2.match(ToolOptionList[2]) == None:
+ return False
+
+ if ToolOptionList[3] == "*" and ToolOptionList[4] not in ['FAMILY', 'DLL', 'DPATH']:
+ return False
+
+ return True
+
+## IsValidToken
+#
+# Check if pattern string matches total token
+#
+# @param ReString: regular string
+# @param Token: Token to be matched
+#
+def IsValidToken(ReString, Token):
+ Match = re.compile(ReString).match(Token)
+ return Match and Match.start() == 0 and Match.end() == len(Token)
+
+## IsValidPath
+#
+# Check if path exist
+#
+# @param Path: Absolute path or relative path to be checked
+# @param Root: Root path
+#
+def IsValidPath(Path, Root):
+ Path = Path.strip()
+ OrigPath = Path.replace('\\', '/')
+
+ Path = os.path.normpath(Path).replace('\\', '/')
+ Root = os.path.normpath(Root).replace('\\', '/')
+ FullPath = os.path.normpath(os.path.join(Root, Path)).replace('\\', '/')
+
+ if not os.path.exists(FullPath):
+ return False
+
+ #
+ # If Path is absolute path.
+ # It should be in Root.
+ #
+ if os.path.isabs(Path):
+ if not Path.startswith(Root):
+ return False
+ return True
+
+ #
+ # Check illegal character
+ #
+ for Rel in ['/', './', '../']:
+ if OrigPath.startswith(Rel):
+ return False
+ for Rel in ['//', '/./', '/../']:
+ if Rel in OrigPath:
+ return False
+ for Rel in ['/.', '/..', '/']:
+ if OrigPath.endswith(Rel):
+ return False
+
+ Path = Path.rstrip('/')
+
+ #
+ # Check relative path
+ #
+ for Word in Path.split('/'):
+ if not IsValidWord(Word):
+ return False
+
+ return True
+
+## IsValidInstallPath
+#
+# Check if an install path valid or not.
+#
+# Absolute path or path starts with '.' or path contains '..' are invalid.
+#
+# @param Path: path to be checked
+#
+def IsValidInstallPath(Path):
+ if os.path.isabs(Path):
+ return False
+
+ if Path.startswith('.'):
+ return False
+
+ if Path.find('..') != -1:
+ return False
+
+ return True
+
+
+## IsValidCFormatGuid
+#
+# Check if GUID format has the from of {8,4,4,{2,2,2,2,2,2,2,2}}
+#
+# @param Guid: Guid to be checked
+#
+def IsValidCFormatGuid(Guid):
+ #
+ # Valid: { 0xf0b11735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
+ # 0xaf, 0x48, 0xce }}
+ # Invalid: { 0xf0b11735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
+ # 0xaf, 0x48, 0xce }} 0x123
+ # Invalid: { 0xf0b1 1735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
+ # 0xaf, 0x48, 0xce }}
+ #
+ List = ['{', 10, ',', 6, ',', 6, ',{', 4, ',', 4, ',', 4,
+ ',', 4, ',', 4, ',', 4, ',', 4, ',', 4, '}}']
+ Index = 0
+ Value = ''
+ SepValue = ''
+ for Char in Guid:
+ if Char not in '{},\t ':
+ Value += Char
+ continue
+ if Value:
+ try:
+ #
+ # Index may out of bound
+ #
+ if not SepValue or SepValue != List[Index]:
+ return False
+ Index += 1
+ SepValue = ''
+
+ if not Value.startswith('0x') and not Value.startswith('0X'):
+ return False
+
+ #
+ # Index may out of bound
+ #
+ if type(List[Index]) != type(1) or \
+ len(Value) > List[Index] or len(Value) < 3:
+ return False
+
+ #
+ # Check if string can be converted to integer
+ # Throw exception if not
+ #
+ int(Value, 16)
+ except BaseException:
+ #
+ # Exception caught means invalid format
+ #
+ return False
+ Value = ''
+ Index += 1
+ if Char in '{},':
+ SepValue += Char
+
+ return SepValue == '}}' and Value == ''
+
+## IsValidPcdType
+#
+# Check whether the PCD type is valid
+#
+# @param PcdTypeString: The PcdType string need to be checked.
+#
+def IsValidPcdType(PcdTypeString):
+ if PcdTypeString.upper() in PCD_USAGE_TYPE_LIST_OF_MODULE:
+ return True
+ else:
+ return False
+
+## IsValidWord
+#
+# Check whether the word is valid.
+# <Word> ::= (a-zA-Z0-9_)(a-zA-Z0-9_-){0,} Alphanumeric characters with
+# optional
+# dash "-" and/or underscore "_" characters. No whitespace
+# characters are permitted.
+#
+# @param Word: The word string need to be checked.
+#
+def IsValidWord(Word):
+ if not Word:
+ return False
+ #
+ # The first char should be alpha, _ or Digit.
+ #
+ if not Word[0].isalnum() and \
+ not Word[0] == '_' and \
+ not Word[0].isdigit():
+ return False
+
+ LastChar = ''
+ for Char in Word[1:]:
+ if (not Char.isalpha()) and \
+ (not Char.isdigit()) and \
+ Char != '-' and \
+ Char != '_' and \
+ Char != '.':
+ return False
+ if Char == '.' and LastChar == '.':
+ return False
+ LastChar = Char
+
+ return True
+
+
+## IsValidSimpleWord
+#
+# Check whether the SimpleWord is valid.
+# <SimpleWord> ::= (a-zA-Z0-9)(a-zA-Z0-9_-){0,}
+# A word that cannot contain a period character.
+#
+# @param Word: The word string need to be checked.
+#
+def IsValidSimpleWord(Word):
+ ReIsValidSimpleWord = \
+ re.compile(r"^[0-9A-Za-z][0-9A-Za-z\-_]*$", re.DOTALL)
+ Word = Word.strip()
+ if not Word:
+ return False
+
+ if not ReIsValidSimpleWord.match(Word):
+ return False
+
+ return True
+
+## IsValidDecVersion
+#
+# Check whether the decimal version is valid.
+# <DecVersion> ::= (0-9){1,} ["." (0-9){1,}]
+#
+# @param Word: The word string need to be checked.
+#
+def IsValidDecVersion(Word):
+ if Word.find('.') > -1:
+ ReIsValidDecVersion = re.compile(r"[0-9]+\.?[0-9]+$")
+ else:
+ ReIsValidDecVersion = re.compile(r"[0-9]+$")
+ if ReIsValidDecVersion.match(Word) == None:
+ return False
+ return True
+
+## IsValidHexVersion
+#
+# Check whether the hex version is valid.
+# <HexVersion> ::= "0x" <Major> <Minor>
+# <Major> ::= <HexDigit>{4}
+# <Minor> ::= <HexDigit>{4}
+#
+# @param Word: The word string need to be checked.
+#
+def IsValidHexVersion(Word):
+ ReIsValidHexVersion = re.compile(r"[0][xX][0-9A-Fa-f]{8}$", re.DOTALL)
+ if ReIsValidHexVersion.match(Word) == None:
+ return False
+
+ return True
+
+## IsValidBuildNumber
+#
+# Check whether the BUILD_NUMBER is valid.
+# ["BUILD_NUMBER" "=" <Integer>{1,4} <EOL>]
+#
+# @param Word: The BUILD_NUMBER string need to be checked.
+#
+def IsValidBuildNumber(Word):
+ ReIsValieBuildNumber = re.compile(r"[0-9]{1,4}$", re.DOTALL)
+ if ReIsValieBuildNumber.match(Word) == None:
+ return False
+
+ return True
+
+## IsValidDepex
+#
+# Check whether the Depex is valid.
+#
+# @param Word: The Depex string need to be checked.
+#
+def IsValidDepex(Word):
+ Index = Word.upper().find("PUSH")
+ if Index > -1:
+ return IsValidCFormatGuid(Word[Index+4:].strip())
+
+ ReIsValidCName = re.compile(r"^[A-Za-z_][0-9A-Za-z_\s\.]*$", re.DOTALL)
+ if ReIsValidCName.match(Word) == None:
+ return False
+
+ return True
+
+## IsValidNormalizedString
+#
+# Check
+# <NormalizedString> ::= <DblQuote> [{<Word>} {<Space>}]{1,} <DblQuote>
+# <Space> ::= 0x20
+#
+# @param String: string to be checked
+#
+def IsValidNormalizedString(String):
+ if String == '':
+ return True
+
+ for Char in String:
+ if Char == '\t':
+ return False
+
+ StringList = GetSplitValueList(String, TAB_SPACE_SPLIT)
+
+ for Item in StringList:
+ if not Item:
+ continue
+ if not IsValidWord(Item):
+ return False
+
+ return True
+
+## IsValidIdString
+#
+# Check whether the IdString is valid.
+#
+# @param IdString: The IdString need to be checked.
+#
+def IsValidIdString(String):
+ if IsValidSimpleWord(String.strip()):
+ return True
+
+ if String.strip().startswith('"') and \
+ String.strip().endswith('"'):
+ String = String[1:-1]
+ if String.strip() == "":
+ return True
+ if IsValidNormalizedString(String):
+ return True
+
+ return False
+
+## IsValidVersionString
+#
+# Check whether the VersionString is valid.
+# <AsciiString> ::= [ [<WhiteSpace>]{0,} [<AsciiChars>]{0,} ] {0,}
+# <WhiteSpace> ::= {<Tab>} {<Space>}
+# <Tab> ::= 0x09
+# <Space> ::= 0x20
+# <AsciiChars> ::= (0x21 - 0x7E)
+#
+# @param VersionString: The VersionString need to be checked.
+#
+def IsValidVersionString(VersionString):
+ VersionString = VersionString.strip()
+ for Char in VersionString:
+ if not (Char >= 0x21 and Char <= 0x7E):
+ return False
+
+ return True
+
+## IsValidPcdValue
+#
+# Check whether the PcdValue is valid.
+#
+# @param VersionString: The PcdValue need to be checked.
+#
+def IsValidPcdValue(PcdValue):
+ for Char in PcdValue:
+ if Char == '\n' or Char == '\t' or Char == '\f':
+ return False
+
+ #
+ # <Boolean>
+ #
+ if IsValidFeatureFlagExp(PcdValue, True)[0]:
+ return True
+
+ #
+ # <Number> ::= {<Integer>} {<HexNumber>}
+ # <Integer> ::= {(0-9)} {(1-9)(0-9){1,}}
+ # <HexNumber> ::= "0x" <HexDigit>{1,}
+ # <HexDigit> ::= (a-fA-F0-9)
+ #
+ if IsValidHex(PcdValue):
+ return True
+
+ ReIsValidIntegerSingle = re.compile(r"^\s*[0-9]\s*$", re.DOTALL)
+ if ReIsValidIntegerSingle.match(PcdValue) != None:
+ return True
+
+ ReIsValidIntegerMulti = re.compile(r"^\s*[1-9][0-9]+\s*$", re.DOTALL)
+ if ReIsValidIntegerMulti.match(PcdValue) != None:
+ return True
+
+
+ #
+ # <StringVal> ::= {<StringType>} {<Array>} {"$(" <MACRO> ")"}
+ # <StringType> ::= {<UnicodeString>} {<CString>}
+ #
+ ReIsValidStringType = re.compile(r"^\s*[\"L].*[\"]\s*$")
+ if ReIsValidStringType.match(PcdValue):
+ IsTrue = False
+ if PcdValue.strip().startswith('L\"'):
+ StringValue = PcdValue.strip().lstrip('L\"').rstrip('\"')
+ if IsValidBareCString(StringValue):
+ IsTrue = True
+ elif PcdValue.strip().startswith('\"'):
+ StringValue = PcdValue.strip().lstrip('\"').rstrip('\"')
+ if IsValidBareCString(StringValue):
+ IsTrue = True
+ if IsTrue:
+ return IsTrue
+
+ #
+ # <Array> ::= {<CArray>} {<NList>} {<CFormatGUID>}
+ # <CArray> ::= "{" [<NList>] <CArray>{0,} "}"
+ # <NList> ::= <HexByte> ["," <HexByte>]{0,}
+ # <HexDigit> ::= (a-fA-F0-9)
+ # <HexByte> ::= "0x" <HexDigit>{1,2}
+ #
+ if IsValidCFormatGuid(PcdValue):
+ return True
+
+ ReIsValidByteHex = re.compile(r"^\s*0x[0-9a-fA-F]{1,2}\s*$", re.DOTALL)
+ if PcdValue.strip().startswith('{') and PcdValue.strip().endswith('}') :
+ StringValue = PcdValue.strip().lstrip('{').rstrip('}')
+ ValueList = StringValue.split(',')
+ AllValidFlag = True
+ for ValueItem in ValueList:
+ if not ReIsValidByteHex.match(ValueItem.strip()):
+ AllValidFlag = False
+
+ if AllValidFlag:
+ return True
+
+ #
+ # NList
+ #
+ AllValidFlag = True
+ ValueList = PcdValue.split(',')
+ for ValueItem in ValueList:
+ if not ReIsValidByteHex.match(ValueItem.strip()):
+ AllValidFlag = False
+
+ if AllValidFlag:
+ return True
+
+ return False
+
+## IsValidCVariableName
+#
+# Check whether the PcdValue is valid.
+#
+# @param VersionString: The PcdValue need to be checked.
+#
+def IsValidCVariableName(CName):
+ ReIsValidCName = re.compile(r"^[A-Za-z_][0-9A-Za-z_]*$", re.DOTALL)
+ if ReIsValidCName.match(CName) == None:
+ return False
+
+ return True
+
+## IsValidIdentifier
+#
+# <Identifier> ::= <NonDigit> <Chars>{0,}
+# <Chars> ::= (a-zA-Z0-9_)
+# <NonDigit> ::= (a-zA-Z_)
+#
+# @param Ident: identifier to be checked
+#
+def IsValidIdentifier(Ident):
+ ReIdent = re.compile(r"^[A-Za-z_][0-9A-Za-z_]*$", re.DOTALL)
+ if ReIdent.match(Ident) == None:
+ return False
+
+ return True
+
+## IsValidDecVersionVal
+#
+# {(0-9){1,} "." (0-99)}
+#
+# @param Ver: version to be checked
+#
+def IsValidDecVersionVal(Ver):
+ ReVersion = re.compile(r"[0-9]+(\.[0-9]{1,2})$")
+
+ if ReVersion.match(Ver) == None:
+ return False
+
+ return True
+
+
+## IsValidLibName
+#
+# (A-Z)(a-zA-Z0-9){0,} and could not be "NULL"
+#
+def IsValidLibName(LibName):
+ if LibName == 'NULL':
+ return False
+ ReLibName = re.compile("^[A-Z]+[a-zA-Z0-9]*$")
+ if not ReLibName.match(LibName):
+ return False
+
+ return True
+
+# IsValidUserId
+#
+# <UserId> ::= (a-zA-Z)(a-zA-Z0-9_.){0,}
+# Words that contain period "." must be encapsulated in double quotation marks.
+#
+def IsValidUserId(UserId):
+ UserId = UserId.strip()
+ Quoted = False
+ if UserId.startswith('"') and UserId.endswith('"'):
+ Quoted = True
+ UserId = UserId[1:-1]
+ if not UserId or not UserId[0].isalpha():
+ return False
+ for Char in UserId[1:]:
+ if not Char.isalnum() and not Char in '_.':
+ return False
+ if Char == '.' and not Quoted:
+ return False
+ return True
+
diff --git a/BaseTools/Source/Python/UPT/Library/Parsing.py b/BaseTools/Source/Python/UPT/Library/Parsing.py
new file mode 100644
index 0000000000..95c51406b2
--- /dev/null
+++ b/BaseTools/Source/Python/UPT/Library/Parsing.py
@@ -0,0 +1,993 @@
+## @file
+# This file is used to define common parsing related functions used in parsing
+# INF/DEC/DSC process
+#
+# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials are licensed and made available
+# under the terms and conditions of the BSD License which accompanies this
+# distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+
+'''
+Parsing
+'''
+
+##
+# Import Modules
+#
+import os.path
+import re
+
+from Library.String import RaiseParserError
+from Library.String import GetSplitValueList
+from Library.String import CheckFileType
+from Library.String import CheckFileExist
+from Library.String import CleanString
+from Library.String import NormPath
+
+from Logger.ToolError import FILE_NOT_FOUND
+from Logger.ToolError import FatalError
+from Logger.ToolError import FORMAT_INVALID
+
+from Library import DataType
+
+from Library.Misc import GuidStructureStringToGuidString
+from Library.Misc import CheckGuidRegFormat
+from Logger import StringTable as ST
+import Logger.Log as Logger
+
+from Parser.DecParser import Dec
+
+gPKG_INFO_DICT = {}
+
+## GetBuildOption
+#
+# Parse a string with format "[<Family>:]<ToolFlag>=Flag"
+# Return (Family, ToolFlag, Flag)
+#
+# @param String: String with BuildOption statement
+# @param File: The file which defines build option, used in error report
+#
+def GetBuildOption(String, File, LineNo=-1):
+ (Family, ToolChain, Flag) = ('', '', '')
+ if String.find(DataType.TAB_EQUAL_SPLIT) < 0:
+ RaiseParserError(String, 'BuildOptions', File, \
+ '[<Family>:]<ToolFlag>=Flag', LineNo)
+ else:
+ List = GetSplitValueList(String, DataType.TAB_EQUAL_SPLIT, MaxSplit=1)
+ if List[0].find(':') > -1:
+ Family = List[0][ : List[0].find(':')].strip()
+ ToolChain = List[0][List[0].find(':') + 1 : ].strip()
+ else:
+ ToolChain = List[0].strip()
+ Flag = List[1].strip()
+ return (Family, ToolChain, Flag)
+
+## Get Library Class
+#
+# Get Library of Dsc as <LibraryClassKeyWord>|<LibraryInstance>
+#
+# @param Item: String as <LibraryClassKeyWord>|<LibraryInstance>
+# @param ContainerFile: The file which describes the library class, used for
+# error report
+#
+def GetLibraryClass(Item, ContainerFile, WorkspaceDir, LineNo=-1):
+ List = GetSplitValueList(Item[0])
+ SupMod = DataType.SUP_MODULE_LIST_STRING
+ if len(List) != 2:
+ RaiseParserError(Item[0], 'LibraryClasses', ContainerFile, \
+ '<LibraryClassKeyWord>|<LibraryInstance>')
+ else:
+ CheckFileType(List[1], '.Inf', ContainerFile, \
+ 'library class instance', Item[0], LineNo)
+ CheckFileExist(WorkspaceDir, List[1], ContainerFile, \
+ 'LibraryClasses', Item[0], LineNo)
+ if Item[1] != '':
+ SupMod = Item[1]
+
+ return (List[0], List[1], SupMod)
+
+## Get Library Class
+#
+# Get Library of Dsc as <LibraryClassKeyWord>[|<LibraryInstance>]
+# [|<TokenSpaceGuidCName>.<PcdCName>]
+#
+# @param Item: String as <LibraryClassKeyWord>|<LibraryInstance>
+# @param ContainerFile: The file which describes the library class, used for
+# error report
+#
+def GetLibraryClassOfInf(Item, ContainerFile, WorkspaceDir, LineNo = -1):
+ ItemList = GetSplitValueList((Item[0] + DataType.TAB_VALUE_SPLIT * 2))
+ SupMod = DataType.SUP_MODULE_LIST_STRING
+
+ if len(ItemList) > 5:
+ RaiseParserError\
+ (Item[0], 'LibraryClasses', ContainerFile, \
+ '<LibraryClassKeyWord>[|<LibraryInstance>]\
+ [|<TokenSpaceGuidCName>.<PcdCName>]')
+ else:
+ CheckFileType(ItemList[1], '.Inf', ContainerFile, 'LibraryClasses', \
+ Item[0], LineNo)
+ CheckFileExist(WorkspaceDir, ItemList[1], ContainerFile, \
+ 'LibraryClasses', Item[0], LineNo)
+ if ItemList[2] != '':
+ CheckPcdTokenInfo(ItemList[2], 'LibraryClasses', \
+ ContainerFile, LineNo)
+ if Item[1] != '':
+ SupMod = Item[1]
+
+ return (ItemList[0], ItemList[1], ItemList[2], SupMod)
+
+## CheckPcdTokenInfo
+#
+# Check if PcdTokenInfo is following <TokenSpaceGuidCName>.<PcdCName>
+#
+# @param TokenInfoString: String to be checked
+# @param Section: Used for error report
+# @param File: Used for error report
+#
+def CheckPcdTokenInfo(TokenInfoString, Section, File, LineNo=-1):
+ Format = '<TokenSpaceGuidCName>.<PcdCName>'
+ if TokenInfoString != '' and TokenInfoString != None:
+ TokenInfoList = GetSplitValueList(TokenInfoString, DataType.TAB_SPLIT)
+ if len(TokenInfoList) == 2:
+ return True
+
+ RaiseParserError(TokenInfoString, Section, File, Format, LineNo)
+
+## Get Pcd
+#
+# Get Pcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<Value>
+# [|<Type>|<MaximumDatumSize>]
+#
+# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
+# <Value>[|<Type>|<MaximumDatumSize>]
+# @param ContainerFile: The file which describes the pcd, used for error
+# report
+
+#
+def GetPcd(Item, Type, ContainerFile, LineNo=-1):
+ TokenGuid, TokenName, Value, MaximumDatumSize, Token = '', '', '', '', ''
+ List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
+
+ if len(List) < 4 or len(List) > 6:
+ RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
+ '<PcdTokenSpaceGuidCName>.<TokenCName>|<Value>\
+ [|<Type>|<MaximumDatumSize>]', LineNo)
+ else:
+ Value = List[1]
+ MaximumDatumSize = List[2]
+ Token = List[3]
+
+ if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
+ (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
+
+ return (TokenName, TokenGuid, Value, MaximumDatumSize, Token, Type)
+
+## Get FeatureFlagPcd
+#
+# Get FeatureFlagPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE
+#
+# @param Item: String as <PcdTokenSpaceGuidCName>
+# .<TokenCName>|TRUE/FALSE
+# @param ContainerFile: The file which describes the pcd, used for error
+# report
+#
+def GetFeatureFlagPcd(Item, Type, ContainerFile, LineNo=-1):
+ TokenGuid, TokenName, Value = '', '', ''
+ List = GetSplitValueList(Item)
+ if len(List) != 2:
+ RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
+ '<PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE', \
+ LineNo)
+ else:
+ Value = List[1]
+ if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
+ (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
+
+ return (TokenName, TokenGuid, Value, Type)
+
+## Get DynamicDefaultPcd
+#
+# Get DynamicDefaultPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>
+# |<Value>[|<DatumTyp>[|<MaxDatumSize>]]
+#
+# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
+# TRUE/FALSE
+# @param ContainerFile: The file which describes the pcd, used for error
+# report
+#
+def GetDynamicDefaultPcd(Item, Type, ContainerFile, LineNo=-1):
+ TokenGuid, TokenName, Value, DatumTyp, MaxDatumSize = '', '', '', '', ''
+ List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
+ if len(List) < 4 or len(List) > 8:
+ RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
+ '<PcdTokenSpaceGuidCName>.<TokenCName>|<Value>\
+ [|<DatumTyp>[|<MaxDatumSize>]]', LineNo)
+ else:
+ Value = List[1]
+ DatumTyp = List[2]
+ MaxDatumSize = List[3]
+ if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
+ (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
+
+ return (TokenName, TokenGuid, Value, DatumTyp, MaxDatumSize, Type)
+
+## Get DynamicHiiPcd
+#
+# Get DynamicHiiPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<String>|
+# <VariableGuidCName>|<VariableOffset>[|<DefaultValue>[|<MaximumDatumSize>]]
+#
+# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
+# TRUE/FALSE
+# @param ContainerFile: The file which describes the pcd, used for error
+# report
+#
+def GetDynamicHiiPcd(Item, Type, ContainerFile, LineNo = -1):
+ TokenGuid, TokenName, List1, List2, List3, List4, List5 = \
+ '', '', '', '', '', '', ''
+ List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
+ if len(List) < 6 or len(List) > 8:
+ RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
+ '<PcdTokenSpaceGuidCName>.<TokenCName>|<String>|\
+ <VariableGuidCName>|<VariableOffset>[|<DefaultValue>\
+ [|<MaximumDatumSize>]]', LineNo)
+ else:
+ List1, List2, List3, List4, List5 = \
+ List[1], List[2], List[3], List[4], List[5]
+ if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
+ (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
+
+ return (TokenName, TokenGuid, List1, List2, List3, List4, List5, Type)
+
+## Get DynamicVpdPcd
+#
+# Get DynamicVpdPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|
+# <VpdOffset>[|<MaximumDatumSize>]
+#
+# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>
+# |TRUE/FALSE
+# @param ContainerFile: The file which describes the pcd, used for error
+# report
+#
+def GetDynamicVpdPcd(Item, Type, ContainerFile, LineNo=-1):
+ TokenGuid, TokenName, List1, List2 = '', '', '', ''
+ List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT)
+ if len(List) < 3 or len(List) > 4:
+ RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
+ '<PcdTokenSpaceGuidCName>.<TokenCName>|<VpdOffset>\
+ [|<MaximumDatumSize>]', LineNo)
+ else:
+ List1, List2 = List[1], List[2]
+ if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
+ (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
+
+ return (TokenName, TokenGuid, List1, List2, Type)
+
+## GetComponent
+#
+# Parse block of the components defined in dsc file
+# Set KeyValues as [ ['component name', [lib1, lib2, lib3],
+# [bo1, bo2, bo3], [pcd1, pcd2, pcd3]], ...]
+#
+# @param Lines: The content to be parsed
+# @param KeyValues: To store data after parsing
+#
+def GetComponent(Lines, KeyValues):
+ (FindBlock, FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
+ FindPcdsPatchableInModule, FindPcdsFixedAtBuild, FindPcdsDynamic, \
+ FindPcdsDynamicEx) = (False, False, False, False, False, False, False, \
+ False)
+ ListItem = None
+ LibraryClassItem = []
+ BuildOption = []
+ Pcd = []
+
+ for Line in Lines:
+ Line = Line[0]
+ #
+ # Ignore !include statement
+ #
+ if Line.upper().find(DataType.TAB_INCLUDE.upper() + ' ') > -1 or \
+ Line.upper().find(DataType.TAB_DEFINE + ' ') > -1:
+ continue
+
+ if FindBlock == False:
+ ListItem = Line
+ #
+ # find '{' at line tail
+ #
+ if Line.endswith('{'):
+ FindBlock = True
+ ListItem = CleanString(Line.rsplit('{', 1)[0], \
+ DataType.TAB_COMMENT_SPLIT)
+
+ #
+ # Parse a block content
+ #
+ if FindBlock:
+ if Line.find('<LibraryClasses>') != -1:
+ (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
+ FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
+ FindPcdsDynamic, FindPcdsDynamicEx) = \
+ (True, False, False, False, False, False, False)
+ continue
+ if Line.find('<BuildOptions>') != -1:
+ (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
+ FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
+ FindPcdsDynamic, FindPcdsDynamicEx) = \
+ (False, True, False, False, False, False, False)
+ continue
+ if Line.find('<PcdsFeatureFlag>') != -1:
+ (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
+ FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
+ FindPcdsDynamic, FindPcdsDynamicEx) = \
+ (False, False, True, False, False, False, False)
+ continue
+ if Line.find('<PcdsPatchableInModule>') != -1:
+ (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
+ FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
+ FindPcdsDynamic, FindPcdsDynamicEx) = \
+ (False, False, False, True, False, False, False)
+ continue
+ if Line.find('<PcdsFixedAtBuild>') != -1:
+ (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
+ FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
+ FindPcdsDynamic, FindPcdsDynamicEx) = \
+ (False, False, False, False, True, False, False)
+ continue
+ if Line.find('<PcdsDynamic>') != -1:
+ (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
+ FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
+ FindPcdsDynamic, FindPcdsDynamicEx) = \
+ (False, False, False, False, False, True, False)
+ continue
+ if Line.find('<PcdsDynamicEx>') != -1:
+ (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
+ FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
+ FindPcdsDynamic, FindPcdsDynamicEx) = \
+ (False, False, False, False, False, False, True)
+ continue
+ if Line.endswith('}'):
+ #
+ # find '}' at line tail
+ #
+ KeyValues.append([ListItem, LibraryClassItem, \
+ BuildOption, Pcd])
+ (FindBlock, FindLibraryClass, FindBuildOption, \
+ FindPcdsFeatureFlag, FindPcdsPatchableInModule, \
+ FindPcdsFixedAtBuild, FindPcdsDynamic, FindPcdsDynamicEx) = \
+ (False, False, False, False, False, False, False, False)
+ LibraryClassItem, BuildOption, Pcd = [], [], []
+ continue
+
+ if FindBlock:
+ if FindLibraryClass:
+ LibraryClassItem.append(Line)
+ elif FindBuildOption:
+ BuildOption.append(Line)
+ elif FindPcdsFeatureFlag:
+ Pcd.append((DataType.TAB_PCDS_FEATURE_FLAG_NULL, Line))
+ elif FindPcdsPatchableInModule:
+ Pcd.append((DataType.TAB_PCDS_PATCHABLE_IN_MODULE_NULL, Line))
+ elif FindPcdsFixedAtBuild:
+ Pcd.append((DataType.TAB_PCDS_FIXED_AT_BUILD_NULL, Line))
+ elif FindPcdsDynamic:
+ Pcd.append((DataType.TAB_PCDS_DYNAMIC_DEFAULT_NULL, Line))
+ elif FindPcdsDynamicEx:
+ Pcd.append((DataType.TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL, Line))
+ else:
+ KeyValues.append([ListItem, [], [], []])
+
+ return True
+
+## GetExec
+#
+# Parse a string with format "InfFilename [EXEC = ExecFilename]"
+# Return (InfFilename, ExecFilename)
+#
+# @param String: String with EXEC statement
+#
+def GetExec(String):
+ InfFilename = ''
+ ExecFilename = ''
+ if String.find('EXEC') > -1:
+ InfFilename = String[ : String.find('EXEC')].strip()
+ ExecFilename = String[String.find('EXEC') + len('EXEC') : ].strip()
+ else:
+ InfFilename = String.strip()
+
+ return (InfFilename, ExecFilename)
+
+## GetComponents
+#
+# Parse block of the components defined in dsc file
+# Set KeyValues as [ ['component name', [lib1, lib2, lib3], [bo1, bo2, bo3],
+# [pcd1, pcd2, pcd3]], ...]
+#
+# @param Lines: The content to be parsed
+# @param Key: Reserved
+# @param KeyValues: To store data after parsing
+# @param CommentCharacter: Comment char, used to ignore comment content
+#
+# @retval True Get component successfully
+#
+def GetComponents(Lines, KeyValues, CommentCharacter):
+ if Lines.find(DataType.TAB_SECTION_END) > -1:
+ Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
+ (FindBlock, FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
+ FindPcdsPatchableInModule, FindPcdsFixedAtBuild, FindPcdsDynamic, \
+ FindPcdsDynamicEx) = \
+ (False, False, False, False, False, False, False, False)
+ ListItem = None
+ LibraryClassItem = []
+ BuildOption = []
+ Pcd = []
+
+ LineList = Lines.split('\n')
+ for Line in LineList:
+ Line = CleanString(Line, CommentCharacter)
+ if Line == None or Line == '':
+ continue
+
+ if FindBlock == False:
+ ListItem = Line
+ #
+ # find '{' at line tail
+ #
+ if Line.endswith('{'):
+ FindBlock = True
+ ListItem = CleanString(Line.rsplit('{', 1)[0], CommentCharacter)
+
+ #
+ # Parse a block content
+ #
+ if FindBlock:
+ if Line.find('<LibraryClasses>') != -1:
+ (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
+ FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
+ FindPcdsDynamic, FindPcdsDynamicEx) = \
+ (True, False, False, False, False, False, False)
+ continue
+ if Line.find('<BuildOptions>') != -1:
+ (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
+ FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
+ FindPcdsDynamic, FindPcdsDynamicEx) = \
+ (False, True, False, False, False, False, False)
+ continue
+ if Line.find('<PcdsFeatureFlag>') != -1:
+ (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
+ FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
+ FindPcdsDynamic, FindPcdsDynamicEx) = \
+ (False, False, True, False, False, False, False)
+ continue
+ if Line.find('<PcdsPatchableInModule>') != -1:
+ (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
+ FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
+ FindPcdsDynamic, FindPcdsDynamicEx) = \
+ (False, False, False, True, False, False, False)
+ continue
+ if Line.find('<PcdsFixedAtBuild>') != -1:
+ (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
+ FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
+ FindPcdsDynamic, FindPcdsDynamicEx) = \
+ (False, False, False, False, True, False, False)
+ continue
+ if Line.find('<PcdsDynamic>') != -1:
+ (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
+ FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
+ FindPcdsDynamic, FindPcdsDynamicEx) = \
+ (False, False, False, False, False, True, False)
+ continue
+ if Line.find('<PcdsDynamicEx>') != -1:
+ (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
+ FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
+ FindPcdsDynamic, FindPcdsDynamicEx) = \
+ (False, False, False, False, False, False, True)
+ continue
+ if Line.endswith('}'):
+ #
+ # find '}' at line tail
+ #
+ KeyValues.append([ListItem, LibraryClassItem, BuildOption, \
+ Pcd])
+ (FindBlock, FindLibraryClass, FindBuildOption, \
+ FindPcdsFeatureFlag, FindPcdsPatchableInModule, \
+ FindPcdsFixedAtBuild, FindPcdsDynamic, FindPcdsDynamicEx) = \
+ (False, False, False, False, False, False, False, False)
+ LibraryClassItem, BuildOption, Pcd = [], [], []
+ continue
+
+ if FindBlock:
+ if FindLibraryClass:
+ LibraryClassItem.append(Line)
+ elif FindBuildOption:
+ BuildOption.append(Line)
+ elif FindPcdsFeatureFlag:
+ Pcd.append((DataType.TAB_PCDS_FEATURE_FLAG, Line))
+ elif FindPcdsPatchableInModule:
+ Pcd.append((DataType.TAB_PCDS_PATCHABLE_IN_MODULE, Line))
+ elif FindPcdsFixedAtBuild:
+ Pcd.append((DataType.TAB_PCDS_FIXED_AT_BUILD, Line))
+ elif FindPcdsDynamic:
+ Pcd.append((DataType.TAB_PCDS_DYNAMIC, Line))
+ elif FindPcdsDynamicEx:
+ Pcd.append((DataType.TAB_PCDS_DYNAMIC_EX, Line))
+ else:
+ KeyValues.append([ListItem, [], [], []])
+
+ return True
+
+## Get Source
+#
+# Get Source of Inf as <Filename>[|<Family>[|<TagName>[|<ToolCode>
+# [|<PcdFeatureFlag>]]]]
+#
+# @param Item: String as <Filename>[|<Family>[|<TagName>[|<ToolCode>
+# [|<PcdFeatureFlag>]]]]
+# @param ContainerFile: The file which describes the library class, used
+# for error report
+#
+def GetSource(Item, ContainerFile, FileRelativePath, LineNo=-1):
+ ItemNew = Item + DataType.TAB_VALUE_SPLIT * 4
+ List = GetSplitValueList(ItemNew)
+ if len(List) < 5 or len(List) > 9:
+ RaiseParserError(Item, 'Sources', ContainerFile, \
+ '<Filename>[|<Family>[|<TagName>[|<ToolCode>\
+ [|<PcdFeatureFlag>]]]]', LineNo)
+ List[0] = NormPath(List[0])
+ CheckFileExist(FileRelativePath, List[0], ContainerFile, 'Sources', \
+ Item, LineNo)
+ if List[4] != '':
+ CheckPcdTokenInfo(List[4], 'Sources', ContainerFile, LineNo)
+
+ return (List[0], List[1], List[2], List[3], List[4])
+
+## Get Binary
+#
+# Get Binary of Inf as <Filename>[|<Family>[|<TagName>[|<ToolCode>
+# [|<PcdFeatureFlag>]]]]
+#
+# @param Item: String as <Filename>[|<Family>[|<TagName>
+# [|<ToolCode>[|<PcdFeatureFlag>]]]]
+# @param ContainerFile: The file which describes the library class,
+# used for error report
+#
+def GetBinary(Item, ContainerFile, LineNo=-1):
+ ItemNew = Item + DataType.TAB_VALUE_SPLIT
+ List = GetSplitValueList(ItemNew)
+ if len(List) < 3 or len(List) > 5:
+ RaiseParserError(Item, 'Binaries', ContainerFile, \
+ "<FileType>|<Filename>[|<Target>\
+ [|<TokenSpaceGuidCName>.<PcdCName>]]", LineNo)
+
+ if len(List) >= 4:
+ if List[3] != '':
+ CheckPcdTokenInfo(List[3], 'Binaries', ContainerFile, LineNo)
+ return (List[0], List[1], List[2], List[3])
+ elif len(List) == 3:
+ return (List[0], List[1], List[2], '')
+
+## Get Guids/Protocols/Ppis
+#
+# Get Guids/Protocols/Ppis of Inf as <GuidCName>[|<PcdFeatureFlag>]
+#
+# @param Item: String as <GuidCName>[|<PcdFeatureFlag>]
+# @param Type: Type of parsing string
+# @param ContainerFile: The file which describes the library class,
+# used for error report
+#
+def GetGuidsProtocolsPpisOfInf(Item):
+ ItemNew = Item + DataType.TAB_VALUE_SPLIT
+ List = GetSplitValueList(ItemNew)
+ return (List[0], List[1])
+
+## Get Guids/Protocols/Ppis
+#
+# Get Guids/Protocols/Ppis of Dec as <GuidCName>=<GuidValue>
+#
+# @param Item: String as <GuidCName>=<GuidValue>
+# @param Type: Type of parsing string
+# @param ContainerFile: The file which describes the library class,
+# used for error report
+#
+def GetGuidsProtocolsPpisOfDec(Item, Type, ContainerFile, LineNo=-1):
+ List = GetSplitValueList(Item, DataType.TAB_EQUAL_SPLIT)
+ if len(List) != 2:
+ RaiseParserError(Item, Type, ContainerFile, '<CName>=<GuidValue>', \
+ LineNo)
+ #
+ #convert C-Format Guid to Register Format
+ #
+ if List[1][0] == '{' and List[1][-1] == '}':
+ RegisterFormatGuid = GuidStructureStringToGuidString(List[1])
+ if RegisterFormatGuid == '':
+ RaiseParserError(Item, Type, ContainerFile, \
+ 'CFormat or RegisterFormat', LineNo)
+ else:
+ if CheckGuidRegFormat(List[1]):
+ RegisterFormatGuid = List[1]
+ else:
+ RaiseParserError(Item, Type, ContainerFile, \
+ 'CFormat or RegisterFormat', LineNo)
+
+ return (List[0], RegisterFormatGuid)
+
+## GetPackage
+#
+# Get Package of Inf as <PackagePath>[|<PcdFeatureFlag>]
+#
+# @param Item: String as <PackagePath>[|<PcdFeatureFlag>]
+# @param Type: Type of parsing string
+# @param ContainerFile: The file which describes the library class,
+# used for error report
+#
+def GetPackage(Item, ContainerFile, FileRelativePath, LineNo=-1):
+ ItemNew = Item + DataType.TAB_VALUE_SPLIT
+ List = GetSplitValueList(ItemNew)
+ CheckFileType(List[0], '.Dec', ContainerFile, 'package', List[0], LineNo)
+ CheckFileExist(FileRelativePath, List[0], ContainerFile, 'Packages', \
+ List[0], LineNo)
+ if List[1] != '':
+ CheckPcdTokenInfo(List[1], 'Packages', ContainerFile, LineNo)
+
+ return (List[0], List[1])
+
+## Get Pcd Values of Inf
+#
+# Get Pcd of Inf as <TokenSpaceGuidCName>.<PcdCName>[|<Value>]
+#
+# @param Item: The string describes pcd
+# @param Type: The type of Pcd
+# @param File: The file which describes the pcd, used for error report
+#
+def GetPcdOfInf(Item, Type, File, LineNo):
+ Format = '<TokenSpaceGuidCName>.<PcdCName>[|<Value>]'
+ TokenGuid, TokenName, Value, InfType = '', '', '', ''
+
+ if Type == DataType.TAB_PCDS_FIXED_AT_BUILD:
+ InfType = DataType.TAB_INF_FIXED_PCD
+ elif Type == DataType.TAB_PCDS_PATCHABLE_IN_MODULE:
+ InfType = DataType.TAB_INF_PATCH_PCD
+ elif Type == DataType.TAB_PCDS_FEATURE_FLAG:
+ InfType = DataType.TAB_INF_FEATURE_PCD
+ elif Type == DataType.TAB_PCDS_DYNAMIC_EX:
+ InfType = DataType.TAB_INF_PCD_EX
+ elif Type == DataType.TAB_PCDS_DYNAMIC:
+ InfType = DataType.TAB_INF_PCD
+ List = GetSplitValueList(Item, DataType.TAB_VALUE_SPLIT, 1)
+ TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
+ if len(TokenInfo) != 2:
+ RaiseParserError(Item, InfType, File, Format, LineNo)
+ else:
+ TokenGuid = TokenInfo[0]
+ TokenName = TokenInfo[1]
+
+ if len(List) > 1:
+ Value = List[1]
+ else:
+ Value = None
+ return (TokenGuid, TokenName, Value, InfType)
+
+
+## Get Pcd Values of Dec
+#
+# Get Pcd of Dec as <TokenSpcCName>.<TokenCName>|<Value>|<DatumType>|<Token>
+# @param Item: Pcd item
+# @param Type: Pcd type
+# @param File: Dec file
+# @param LineNo: Line number
+#
+def GetPcdOfDec(Item, Type, File, LineNo=-1):
+ Format = '<TokenSpaceGuidCName>.<PcdCName>|<Value>|<DatumType>|<Token>'
+ TokenGuid, TokenName, Value, DatumType, Token = '', '', '', '', ''
+ List = GetSplitValueList(Item)
+ if len(List) != 4:
+ RaiseParserError(Item, 'Pcds' + Type, File, Format, LineNo)
+ else:
+ Value = List[1]
+ DatumType = List[2]
+ Token = List[3]
+ TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
+ if len(TokenInfo) != 2:
+ RaiseParserError(Item, 'Pcds' + Type, File, Format, LineNo)
+ else:
+ TokenGuid = TokenInfo[0]
+ TokenName = TokenInfo[1]
+
+ return (TokenGuid, TokenName, Value, DatumType, Token, Type)
+
+## Parse DEFINE statement
+#
+# Get DEFINE macros
+#
+# @param LineValue: A DEFINE line value
+# @param StartLine: A DEFINE start line
+# @param Table: A table
+# @param FileID: File ID
+# @param Filename: File name
+# @param SectionName: DEFINE section name
+# @param SectionModel: DEFINE section model
+# @param Arch: DEFINE arch
+#
+def ParseDefine(LineValue, StartLine, Table, FileID, SectionName, \
+ SectionModel, Arch):
+ Logger.Debug(Logger.DEBUG_2, ST.MSG_DEFINE_STATEMENT_FOUND % (LineValue, \
+ SectionName))
+ Define = \
+ GetSplitValueList(CleanString\
+ (LineValue[LineValue.upper().\
+ find(DataType.TAB_DEFINE.upper() + ' ') + \
+ len(DataType.TAB_DEFINE + ' ') : ]), \
+ DataType.TAB_EQUAL_SPLIT, 1)
+ Table.Insert(DataType.MODEL_META_DATA_DEFINE, Define[0], Define[1], '', \
+ '', '', Arch, SectionModel, FileID, StartLine, -1, \
+ StartLine, -1, 0)
+
+## InsertSectionItems
+#
+# Insert item data of a section to a dict
+#
+# @param Model: A model
+# @param CurrentSection: Current section
+# @param SectionItemList: Section item list
+# @param ArchList: Arch list
+# @param ThirdList: Third list
+# @param RecordSet: Record set
+#
+def InsertSectionItems(Model, SectionItemList, ArchList, \
+ ThirdList, RecordSet):
+ #
+ # Insert each item data of a section
+ #
+ for Index in range(0, len(ArchList)):
+ Arch = ArchList[Index]
+ Third = ThirdList[Index]
+ if Arch == '':
+ Arch = DataType.TAB_ARCH_COMMON
+
+ Records = RecordSet[Model]
+ for SectionItem in SectionItemList:
+ LineValue, StartLine, Comment = SectionItem[0], \
+ SectionItem[1], SectionItem[2]
+
+ Logger.Debug(4, ST.MSG_PARSING %LineValue)
+ #
+ # And then parse DEFINE statement
+ #
+ if LineValue.upper().find(DataType.TAB_DEFINE.upper() + ' ') > -1:
+ continue
+ #
+ # At last parse other sections
+ #
+ IdNum = -1
+ Records.append([LineValue, Arch, StartLine, IdNum, Third, Comment])
+
+ if RecordSet != {}:
+ RecordSet[Model] = Records
+
+## GenMetaDatSectionItem
+#
+# @param Key: A key
+# @param Value: A value
+# @param List: A list
+#
+def GenMetaDatSectionItem(Key, Value, List):
+ if Key not in List:
+ List[Key] = [Value]
+ else:
+ List[Key].append(Value)
+
+## GetPkgInfoFromDec
+#
+# get package name, guid, version info from dec files
+#
+# @param Path: File path
+#
+def GetPkgInfoFromDec(Path):
+ PkgName = None
+ PkgGuid = None
+ PkgVersion = None
+
+ Path = Path.replace('\\', '/')
+
+ if not os.path.exists(Path):
+ Logger.Error("\nUPT", FILE_NOT_FOUND, File = Path)
+
+ if Path in gPKG_INFO_DICT:
+ return gPKG_INFO_DICT[Path]
+
+ try:
+ DecParser = Dec(Path)
+ PkgName = DecParser.GetPackageName()
+ PkgGuid = DecParser.GetPackageGuid()
+ PkgVersion = DecParser.GetPackageVersion()
+ gPKG_INFO_DICT[Path] = (PkgName, PkgGuid, PkgVersion)
+ return PkgName, PkgGuid, PkgVersion
+ except FatalError:
+ return None, None, None
+
+
+## GetWorkspacePackage
+#
+# Get a list of workspace package information.
+#
+def GetWorkspacePackage():
+ DecFileList = []
+ WorkspaceDir = os.environ["WORKSPACE"]
+ for Root, Dirs, Files in os.walk(WorkspaceDir):
+ if 'CVS' in Dirs:
+ Dirs.remove('CVS')
+ if '.svn' in Dirs:
+ Dirs.remove('.svn')
+ for Dir in Dirs:
+ if Dir.startswith('.'):
+ Dirs.remove(Dir)
+ for FileSp in Files:
+ if FileSp.startswith('.'):
+ continue
+ Ext = os.path.splitext(FileSp)[1]
+ if Ext.lower() in ['.dec']:
+ DecFileList.append\
+ (os.path.normpath(os.path.join(Root, FileSp)))
+ #
+ # abstract package guid, version info from DecFile List
+ #
+ PkgList = []
+ for DecFile in DecFileList:
+ (PkgName, PkgGuid, PkgVersion) = GetPkgInfoFromDec(DecFile)
+ if PkgName and PkgGuid and PkgVersion:
+ PkgList.append((PkgName, PkgGuid, PkgVersion, DecFile))
+
+ return PkgList
+
+## GetWorkspaceModule
+#
+# Get a list of workspace modules.
+#
+def GetWorkspaceModule():
+ InfFileList = []
+ WorkspaceDir = os.environ["WORKSPACE"]
+ for Root, Dirs, Files in os.walk(WorkspaceDir):
+ if 'CVS' in Dirs:
+ Dirs.remove('CVS')
+ if '.svn' in Dirs:
+ Dirs.remove('.svn')
+ if 'Build' in Dirs:
+ Dirs.remove('Build')
+ for Dir in Dirs:
+ if Dir.startswith('.'):
+ Dirs.remove(Dir)
+ for FileSp in Files:
+ if FileSp.startswith('.'):
+ continue
+ Ext = os.path.splitext(FileSp)[1]
+ if Ext.lower() in ['.inf']:
+ InfFileList.append\
+ (os.path.normpath(os.path.join(Root, FileSp)))
+
+ return InfFileList
+
+## MacroParser used to parse macro definition
+#
+# @param Line: The content contain linestring and line number
+# @param FileName: The meta-file file name
+# @param SectionType: Section for the Line belong to
+# @param FileLocalMacros: A list contain Macro defined in [Defines] section.
+#
+def MacroParser(Line, FileName, SectionType, FileLocalMacros):
+ MacroDefPattern = re.compile("^(DEFINE)[ \t]+")
+ LineContent = Line[0]
+ LineNo = Line[1]
+ Match = MacroDefPattern.match(LineContent)
+ if not Match:
+ #
+ # Not 'DEFINE/EDK_GLOBAL' statement, call decorated method
+ #
+ return None, None
+
+ TokenList = GetSplitValueList(LineContent[Match.end(1):], \
+ DataType.TAB_EQUAL_SPLIT, 1)
+ #
+ # Syntax check
+ #
+ if not TokenList[0]:
+ Logger.Error('Parser', FORMAT_INVALID, ST.ERR_MACRONAME_NOGIVEN,
+ ExtraData=LineContent, File=FileName, Line=LineNo)
+ if len(TokenList) < 2:
+ Logger.Error('Parser', FORMAT_INVALID, ST.ERR_MACROVALUE_NOGIVEN,
+ ExtraData=LineContent, File=FileName, Line=LineNo)
+
+ Name, Value = TokenList
+
+ #
+ # DEFINE defined macros
+ #
+ if SectionType == DataType.MODEL_META_DATA_HEADER:
+ FileLocalMacros[Name] = Value
+
+ ReIsValidMacroName = re.compile(r"^[A-Z][A-Z0-9_]*$", re.DOTALL)
+ if ReIsValidMacroName.match(Name) == None:
+ Logger.Error('Parser',
+ FORMAT_INVALID,
+ ST.ERR_MACRONAME_INVALID%(Name),
+ ExtraData=LineContent,
+ File=FileName,
+ Line=LineNo)
+
+ # Validate MACRO Value
+ #
+ # <MacroDefinition> ::= [<Comments>]{0,}
+ # "DEFINE" <MACRO> "=" [{<PATH>} {<VALUE>}] <EOL>
+ # <Value> ::= {<NumVal>} {<Boolean>} {<AsciiString>} {<GUID>}
+ # {<CString>} {<UnicodeString>} {<CArray>}
+ #
+ # The definition of <NumVal>, <PATH>, <Boolean>, <GUID>, <CString>,
+ # <UnicodeString>, <CArray> are subset of <AsciiString>.
+ #
+ ReIsValidMacroValue = re.compile(r"^[\x20-\x7e]*$", re.DOTALL)
+ if ReIsValidMacroValue.match(Value) == None:
+ Logger.Error('Parser',
+ FORMAT_INVALID,
+ ST.ERR_MACROVALUE_INVALID%(Value),
+ ExtraData=LineContent,
+ File=FileName,
+ Line=LineNo)
+
+ return Name, Value
+
+## GenSection
+#
+# generate section contents
+#
+# @param SectionName: indicate the name of the section, details refer to
+# INF, DEC specs
+# @param SectionDict: section statement dict, key is SectionAttrs(arch,
+# moduletype or platform may exist as needed) list
+# seperated by space,
+# value is statement
+#
+def GenSection(SectionName, SectionDict, SplitArch=True):
+ Content = ''
+ for SectionAttrs in SectionDict:
+ StatementList = SectionDict[SectionAttrs]
+ if SectionAttrs and SectionName != 'Defines' and SectionAttrs.strip().upper() != DataType.TAB_ARCH_COMMON:
+ if SplitArch:
+ ArchList = GetSplitValueList(SectionAttrs, DataType.TAB_SPACE_SPLIT)
+ else:
+ if SectionName != 'UserExtensions':
+ ArchList = GetSplitValueList(SectionAttrs, DataType.TAB_COMMENT_SPLIT)
+ else:
+ ArchList = [SectionAttrs]
+ for Index in xrange(0, len(ArchList)):
+ ArchList[Index] = ConvertArchForInstall(ArchList[Index])
+ Section = '[' + SectionName + '.' + (', ' + SectionName + '.').join(ArchList) + ']'
+ else:
+ Section = '[' + SectionName + ']'
+ Content += '\n\n' + Section + '\n'
+ if StatementList != None:
+ for Statement in StatementList:
+ Content += Statement + '\n'
+
+ return Content
+
+## ConvertArchForInstall
+# if Arch.upper() is in "IA32", "X64", "IPF", and "EBC", it must be upper case. "common" must be lower case.
+# Anything else, the case must be preserved
+#
+# @param Arch: the arch string that need to be converted, it should be stripped before pass in
+# @return: the arch string that get converted
+#
+def ConvertArchForInstall(Arch):
+ if Arch.upper() in [DataType.TAB_ARCH_IA32, DataType.TAB_ARCH_X64,
+ DataType.TAB_ARCH_IPF, DataType.TAB_ARCH_EBC]:
+ Arch = Arch.upper()
+ elif Arch.upper() == DataType.TAB_ARCH_COMMON:
+ Arch = Arch.lower()
+
+ return Arch
diff --git a/BaseTools/Source/Python/UPT/Library/String.py b/BaseTools/Source/Python/UPT/Library/String.py
new file mode 100644
index 0000000000..47301aebb0
--- /dev/null
+++ b/BaseTools/Source/Python/UPT/Library/String.py
@@ -0,0 +1,968 @@
+## @file
+# This file is used to define common string related functions used in parsing
+# process
+#
+# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials are licensed and made available
+# under the terms and conditions of the BSD License which accompanies this
+# distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+'''
+String
+'''
+##
+# Import Modules
+#
+import re
+import os.path
+from string import strip
+import Logger.Log as Logger
+import Library.DataType as DataType
+from Logger.ToolError import FORMAT_INVALID
+from Logger.ToolError import PARSER_ERROR
+from Logger import StringTable as ST
+
+#
+# Regular expression for matching macro used in DSC/DEC/INF file inclusion
+#
+gMACRO_PATTERN = re.compile("\$\(([_A-Z][_A-Z0-9]*)\)", re.UNICODE)
+
+## GetSplitValueList
+#
+# Get a value list from a string with multiple values splited with SplitTag
+# The default SplitTag is DataType.TAB_VALUE_SPLIT
+# 'AAA|BBB|CCC' -> ['AAA', 'BBB', 'CCC']
+#
+# @param String: The input string to be splitted
+# @param SplitTag: The split key, default is DataType.TAB_VALUE_SPLIT
+# @param MaxSplit: The max number of split values, default is -1
+#
+#
+def GetSplitValueList(String, SplitTag=DataType.TAB_VALUE_SPLIT, MaxSplit=-1):
+ return map(lambda l: l.strip(), String.split(SplitTag, MaxSplit))
+
+## MergeArches
+#
+# Find a key's all arches in dict, add the new arch to the list
+# If not exist any arch, set the arch directly
+#
+# @param Dict: The input value for Dict
+# @param Key: The input value for Key
+# @param Arch: The Arch to be added or merged
+#
+def MergeArches(Dict, Key, Arch):
+ if Key in Dict.keys():
+ Dict[Key].append(Arch)
+ else:
+ Dict[Key] = Arch.split()
+
+## GenDefines
+#
+# Parse a string with format "DEFINE <VarName> = <PATH>"
+# Generate a map Defines[VarName] = PATH
+# Return False if invalid format
+#
+# @param String: String with DEFINE statement
+# @param Arch: Supportted Arch
+# @param Defines: DEFINE statement to be parsed
+#
+def GenDefines(String, Arch, Defines):
+ if String.find(DataType.TAB_DEFINE + ' ') > -1:
+ List = String.replace(DataType.TAB_DEFINE + ' ', '').\
+ split(DataType.TAB_EQUAL_SPLIT)
+ if len(List) == 2:
+ Defines[(CleanString(List[0]), Arch)] = CleanString(List[1])
+ return 0
+ else:
+ return -1
+ return 1
+
+## GetLibraryClassesWithModuleType
+#
+# Get Library Class definition when no module type defined
+#
+# @param Lines: The content to be parsed
+# @param Key: Reserved
+# @param KeyValues: To store data after parsing
+# @param CommentCharacter: Comment char, used to ignore comment content
+#
+def GetLibraryClassesWithModuleType(Lines, Key, KeyValues, CommentCharacter):
+ NewKey = SplitModuleType(Key)
+ Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
+ LineList = Lines.splitlines()
+ for Line in LineList:
+ Line = CleanString(Line, CommentCharacter)
+ if Line != '' and Line[0] != CommentCharacter:
+ KeyValues.append([CleanString(Line, CommentCharacter), NewKey[1]])
+
+ return True
+
+## GetDynamics
+#
+# Get Dynamic Pcds
+#
+# @param Lines: The content to be parsed
+# @param Key: Reserved
+# @param KeyValues: To store data after parsing
+# @param CommentCharacter: Comment char, used to ignore comment content
+#
+def GetDynamics(Lines, Key, KeyValues, CommentCharacter):
+ #
+ # Get SkuId Name List
+ #
+ SkuIdNameList = SplitModuleType(Key)
+
+ Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
+ LineList = Lines.splitlines()
+ for Line in LineList:
+ Line = CleanString(Line, CommentCharacter)
+ if Line != '' and Line[0] != CommentCharacter:
+ KeyValues.append([CleanString(Line, CommentCharacter), SkuIdNameList[1]])
+
+ return True
+
+## SplitModuleType
+#
+# Split ModuleType out of section defien to get key
+# [LibraryClass.Arch.ModuleType|ModuleType|ModuleType] -> [
+# 'LibraryClass.Arch', ['ModuleType', 'ModuleType', 'ModuleType'] ]
+#
+# @param Key: String to be parsed
+#
+def SplitModuleType(Key):
+ KeyList = Key.split(DataType.TAB_SPLIT)
+ #
+ # Fill in for arch
+ #
+ KeyList.append('')
+ #
+ # Fill in for moduletype
+ #
+ KeyList.append('')
+ ReturnValue = []
+ KeyValue = KeyList[0]
+ if KeyList[1] != '':
+ KeyValue = KeyValue + DataType.TAB_SPLIT + KeyList[1]
+ ReturnValue.append(KeyValue)
+ ReturnValue.append(GetSplitValueList(KeyList[2]))
+
+ return ReturnValue
+
+## Replace macro in string
+#
+# This method replace macros used in given string. The macros are given in a
+# dictionary.
+#
+# @param String String to be processed
+# @param MacroDefinitions The macro definitions in the form of dictionary
+# @param SelfReplacement To decide whether replace un-defined macro to ''
+# @param Line: The content contain line string and line number
+# @param FileName: The meta-file file name
+#
+def ReplaceMacro(String, MacroDefinitions = None, SelfReplacement = False, Line = None, FileName = None, Flag = False):
+ LastString = String
+ if MacroDefinitions == None:
+ MacroDefinitions = {}
+ while MacroDefinitions:
+ QuotedStringList = []
+ HaveQuotedMacroFlag = False
+ if not Flag:
+ MacroUsed = gMACRO_PATTERN.findall(String)
+ else:
+ ReQuotedString = re.compile('\"')
+ QuotedStringList = ReQuotedString.split(String)
+ if len(QuotedStringList) >= 3:
+ HaveQuotedMacroFlag = True
+ Count = 0
+ MacroString = ""
+ for QuotedStringItem in QuotedStringList:
+ Count += 1
+ if Count % 2 != 0:
+ MacroString += QuotedStringItem
+
+ if Count == len(QuotedStringList) and Count%2 == 0:
+ MacroString += QuotedStringItem
+
+ MacroUsed = gMACRO_PATTERN.findall(MacroString)
+ #
+ # no macro found in String, stop replacing
+ #
+ if len(MacroUsed) == 0:
+ break
+ for Macro in MacroUsed:
+ if Macro not in MacroDefinitions:
+ if SelfReplacement:
+ String = String.replace("$(%s)" % Macro, '')
+ Logger.Debug(5, "Delete undefined MACROs in file %s line %d: %s!" %(FileName, Line[1], Line[0]))
+ continue
+ if not HaveQuotedMacroFlag:
+ String = String.replace("$(%s)" % Macro, MacroDefinitions[Macro])
+ else:
+ Count = 0
+ for QuotedStringItem in QuotedStringList:
+ Count += 1
+ if Count % 2 != 0:
+ QuotedStringList[Count-1] = QuotedStringList[Count-1].replace("$(%s)" % Macro,
+ MacroDefinitions[Macro])
+ elif Count == len(QuotedStringList) and Count%2 == 0:
+ QuotedStringList[Count-1] = QuotedStringList[Count-1].replace("$(%s)" % Macro,
+ MacroDefinitions[Macro])
+
+ RetString = ''
+ if HaveQuotedMacroFlag:
+ Count = 0
+ for QuotedStringItem in QuotedStringList:
+ Count += 1
+ if Count != len(QuotedStringList):
+ RetString += QuotedStringList[Count-1] + "\""
+ else:
+ RetString += QuotedStringList[Count-1]
+
+ String = RetString
+
+ #
+ # in case there's macro not defined
+ #
+ if String == LastString:
+ break
+ LastString = String
+
+ return String
+
+## NormPath
+#
+# Create a normal path
+# And replace DFEINE in the path
+#
+# @param Path: The input value for Path to be converted
+# @param Defines: A set for DEFINE statement
+#
+def NormPath(Path, Defines = None):
+ IsRelativePath = False
+ if Defines == None:
+ Defines = {}
+ if Path:
+ if Path[0] == '.':
+ IsRelativePath = True
+ #
+ # Replace with Define
+ #
+ if Defines:
+ Path = ReplaceMacro(Path, Defines)
+ #
+ # To local path format
+ #
+ Path = os.path.normpath(Path)
+
+ if IsRelativePath and Path[0] != '.':
+ Path = os.path.join('.', Path)
+ return Path
+
+## CleanString
+#
+# Remove comments in a string
+# Remove spaces
+#
+# @param Line: The string to be cleaned
+# @param CommentCharacter: Comment char, used to ignore comment content,
+# default is DataType.TAB_COMMENT_SPLIT
+#
+def CleanString(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
+ #
+ # remove whitespace
+ #
+ Line = Line.strip()
+ #
+ # Replace EDK1's comment character
+ #
+ if AllowCppStyleComment:
+ Line = Line.replace(DataType.TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
+ #
+ # remove comments, but we should escape comment character in string
+ #
+ InString = False
+ for Index in range(0, len(Line)):
+ if Line[Index] == '"':
+ InString = not InString
+ elif Line[Index] == CommentCharacter and not InString:
+ Line = Line[0: Index]
+ break
+ #
+ # remove whitespace again
+ #
+ Line = Line.strip()
+
+ return Line
+
+## CleanString2
+#
+# Split comments in a string
+# Remove spaces
+#
+# @param Line: The string to be cleaned
+# @param CommentCharacter: Comment char, used to ignore comment content,
+# default is DataType.TAB_COMMENT_SPLIT
+#
+def CleanString2(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
+ #
+ # remove whitespace
+ #
+ Line = Line.strip()
+ #
+ # Replace EDK1's comment character
+ #
+ if AllowCppStyleComment:
+ Line = Line.replace(DataType.TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
+ #
+ # separate comments and statements
+ #
+ LineParts = Line.split(CommentCharacter, 1)
+ #
+ # remove whitespace again
+ #
+ Line = LineParts[0].strip()
+ if len(LineParts) > 1:
+ Comment = LineParts[1].strip()
+ #
+ # Remove prefixed and trailing comment characters
+ #
+ Start = 0
+ End = len(Comment)
+ while Start < End and Comment.startswith(CommentCharacter, Start, End):
+ Start += 1
+ while End >= 0 and Comment.endswith(CommentCharacter, Start, End):
+ End -= 1
+ Comment = Comment[Start:End]
+ Comment = Comment.strip()
+ else:
+ Comment = ''
+
+ return Line, Comment
+
+## GetMultipleValuesOfKeyFromLines
+#
+# Parse multiple strings to clean comment and spaces
+# The result is saved to KeyValues
+#
+# @param Lines: The content to be parsed
+# @param Key: Reserved
+# @param KeyValues: To store data after parsing
+# @param CommentCharacter: Comment char, used to ignore comment content
+#
+def GetMultipleValuesOfKeyFromLines(Lines, Key, KeyValues, CommentCharacter):
+ if Key:
+ pass
+ if KeyValues:
+ pass
+ Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
+ LineList = Lines.split('\n')
+ for Line in LineList:
+ Line = CleanString(Line, CommentCharacter)
+ if Line != '' and Line[0] != CommentCharacter:
+ KeyValues += [Line]
+ return True
+
+## GetDefineValue
+#
+# Parse a DEFINE statement to get defined value
+# DEFINE Key Value
+#
+# @param String: The content to be parsed
+# @param Key: The key of DEFINE statement
+# @param CommentCharacter: Comment char, used to ignore comment content
+#
+def GetDefineValue(String, Key, CommentCharacter):
+ if CommentCharacter:
+ pass
+ String = CleanString(String)
+ return String[String.find(Key + ' ') + len(Key + ' ') : ]
+
+## GetSingleValueOfKeyFromLines
+#
+# Parse multiple strings as below to get value of each definition line
+# Key1 = Value1
+# Key2 = Value2
+# The result is saved to Dictionary
+#
+# @param Lines: The content to be parsed
+# @param Dictionary: To store data after parsing
+# @param CommentCharacter: Comment char, be used to ignore comment content
+# @param KeySplitCharacter: Key split char, between key name and key value.
+# Key1 = Value1, '=' is the key split char
+# @param ValueSplitFlag: Value split flag, be used to decide if has
+# multiple values
+# @param ValueSplitCharacter: Value split char, be used to split multiple
+# values. Key1 = Value1|Value2, '|' is the value
+# split char
+#
+def GetSingleValueOfKeyFromLines(Lines, Dictionary, CommentCharacter, KeySplitCharacter, \
+ ValueSplitFlag, ValueSplitCharacter):
+ Lines = Lines.split('\n')
+ Keys = []
+ Value = ''
+ DefineValues = ['']
+ SpecValues = ['']
+
+ for Line in Lines:
+ #
+ # Handle DEFINE and SPEC
+ #
+ if Line.find(DataType.TAB_INF_DEFINES_DEFINE + ' ') > -1:
+ if '' in DefineValues:
+ DefineValues.remove('')
+ DefineValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_DEFINE, CommentCharacter))
+ continue
+ if Line.find(DataType.TAB_INF_DEFINES_SPEC + ' ') > -1:
+ if '' in SpecValues:
+ SpecValues.remove('')
+ SpecValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_SPEC, CommentCharacter))
+ continue
+
+ #
+ # Handle Others
+ #
+ LineList = Line.split(KeySplitCharacter, 1)
+ if len(LineList) >= 2:
+ Key = LineList[0].split()
+ if len(Key) == 1 and Key[0][0] != CommentCharacter:
+ #
+ # Remove comments and white spaces
+ #
+ LineList[1] = CleanString(LineList[1], CommentCharacter)
+ if ValueSplitFlag:
+ Value = map(strip, LineList[1].split(ValueSplitCharacter))
+ else:
+ Value = CleanString(LineList[1], CommentCharacter).splitlines()
+
+ if Key[0] in Dictionary:
+ if Key[0] not in Keys:
+ Dictionary[Key[0]] = Value
+ Keys.append(Key[0])
+ else:
+ Dictionary[Key[0]].extend(Value)
+ else:
+ Dictionary[DataType.TAB_INF_DEFINES_MACRO][Key[0]] = Value[0]
+
+ if DefineValues == []:
+ DefineValues = ['']
+ if SpecValues == []:
+ SpecValues = ['']
+ Dictionary[DataType.TAB_INF_DEFINES_DEFINE] = DefineValues
+ Dictionary[DataType.TAB_INF_DEFINES_SPEC] = SpecValues
+
+ return True
+
+## The content to be parsed
+#
+# Do pre-check for a file before it is parsed
+# Check $()
+# Check []
+#
+# @param FileName: Used for error report
+# @param FileContent: File content to be parsed
+# @param SupSectionTag: Used for error report
+#
+def PreCheck(FileName, FileContent, SupSectionTag):
+ if SupSectionTag:
+ pass
+ LineNo = 0
+ IsFailed = False
+ NewFileContent = ''
+ for Line in FileContent.splitlines():
+ LineNo = LineNo + 1
+ #
+ # Clean current line
+ #
+ Line = CleanString(Line)
+ #
+ # Remove commented line
+ #
+ if Line.find(DataType.TAB_COMMA_SPLIT) == 0:
+ Line = ''
+ #
+ # Check $()
+ #
+ if Line.find('$') > -1:
+ if Line.find('$(') < 0 or Line.find(')') < 0:
+ Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
+ #
+ # Check []
+ #
+ if Line.find('[') > -1 or Line.find(']') > -1:
+ #
+ # Only get one '[' or one ']'
+ #
+ if not (Line.find('[') > -1 and Line.find(']') > -1):
+ Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
+ #
+ # Regenerate FileContent
+ #
+ NewFileContent = NewFileContent + Line + '\r\n'
+
+ if IsFailed:
+ Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
+
+ return NewFileContent
+
+## CheckFileType
+#
+# Check if the Filename is including ExtName
+# Return True if it exists
+# Raise a error message if it not exists
+#
+# @param CheckFilename: Name of the file to be checked
+# @param ExtName: Ext name of the file to be checked
+# @param ContainerFilename: The container file which describes the file to be
+# checked, used for error report
+# @param SectionName: Used for error report
+# @param Line: The line in container file which defines the file
+# to be checked
+#
+def CheckFileType(CheckFilename, ExtName, ContainerFilename, SectionName, Line, LineNo=-1):
+ if CheckFilename != '' and CheckFilename != None:
+ (Root, Ext) = os.path.splitext(CheckFilename)
+ if Ext.upper() != ExtName.upper() and Root:
+ ContainerFile = open(ContainerFilename, 'r').read()
+ if LineNo == -1:
+ LineNo = GetLineNo(ContainerFile, Line)
+ ErrorMsg = ST.ERR_SECTIONNAME_INVALID % (SectionName, CheckFilename, ExtName)
+ Logger.Error("Parser", PARSER_ERROR, ErrorMsg, Line=LineNo, \
+ File=ContainerFilename, RaiseError=Logger.IS_RAISE_ERROR)
+
+ return True
+
+## CheckFileExist
+#
+# Check if the file exists
+# Return True if it exists
+# Raise a error message if it not exists
+#
+# @param CheckFilename: Name of the file to be checked
+# @param WorkspaceDir: Current workspace dir
+# @param ContainerFilename: The container file which describes the file to
+# be checked, used for error report
+# @param SectionName: Used for error report
+# @param Line: The line in container file which defines the
+# file to be checked
+#
+def CheckFileExist(WorkspaceDir, CheckFilename, ContainerFilename, SectionName, Line, LineNo=-1):
+ CheckFile = ''
+ if CheckFilename != '' and CheckFilename != None:
+ CheckFile = WorkspaceFile(WorkspaceDir, CheckFilename)
+ if not os.path.isfile(CheckFile):
+ ContainerFile = open(ContainerFilename, 'r').read()
+ if LineNo == -1:
+ LineNo = GetLineNo(ContainerFile, Line)
+ ErrorMsg = ST.ERR_CHECKFILE_NOTFOUND % (CheckFile, SectionName)
+ Logger.Error("Parser", PARSER_ERROR, ErrorMsg,
+ File=ContainerFilename, Line = LineNo, RaiseError=Logger.IS_RAISE_ERROR)
+ return CheckFile
+
+## GetLineNo
+#
+# Find the index of a line in a file
+#
+# @param FileContent: Search scope
+# @param Line: Search key
+#
+def GetLineNo(FileContent, Line, IsIgnoreComment=True):
+ LineList = FileContent.splitlines()
+ for Index in range(len(LineList)):
+ if LineList[Index].find(Line) > -1:
+ #
+ # Ignore statement in comment
+ #
+ if IsIgnoreComment:
+ if LineList[Index].strip()[0] == DataType.TAB_COMMENT_SPLIT:
+ continue
+ return Index + 1
+
+ return -1
+
+## RaiseParserError
+#
+# Raise a parser error
+#
+# @param Line: String which has error
+# @param Section: Used for error report
+# @param File: File which has the string
+# @param Format: Correct format
+#
+def RaiseParserError(Line, Section, File, Format='', LineNo=-1):
+ if LineNo == -1:
+ LineNo = GetLineNo(open(os.path.normpath(File), 'r').read(), Line)
+ ErrorMsg = ST.ERR_INVALID_NOTFOUND % (Line, Section)
+ if Format != '':
+ Format = "Correct format is " + Format
+ Logger.Error("Parser", PARSER_ERROR, ErrorMsg, File=File, Line=LineNo, \
+ ExtraData=Format, RaiseError=Logger.IS_RAISE_ERROR)
+
+## WorkspaceFile
+#
+# Return a full path with workspace dir
+#
+# @param WorkspaceDir: Workspace dir
+# @param Filename: Relative file name
+#
+def WorkspaceFile(WorkspaceDir, Filename):
+ return os.path.join(NormPath(WorkspaceDir), NormPath(Filename))
+
+## Split string
+#
+# Revmove '"' which startswith and endswith string
+#
+# @param String: The string need to be splited
+#
+def SplitString(String):
+ if String.startswith('\"'):
+ String = String[1:]
+ if String.endswith('\"'):
+ String = String[:-1]
+ return String
+
+## Convert To Sql String
+#
+# Replace "'" with "''" in each item of StringList
+#
+# @param StringList: A list for strings to be converted
+#
+def ConvertToSqlString(StringList):
+ return map(lambda s: s.replace("'", "''") , StringList)
+
+## Convert To Sql String
+#
+# Replace "'" with "''" in the String
+#
+# @param String: A String to be converted
+#
+def ConvertToSqlString2(String):
+ return String.replace("'", "''")
+
+## RemoveBlockComment
+#
+# Remove comment block
+#
+# @param Lines: Block Comment Lines
+#
+def RemoveBlockComment(Lines):
+ IsFindBlockComment = False
+ ReservedLine = ''
+ NewLines = []
+
+ for Line in Lines:
+ Line = Line.strip()
+ #
+ # Remove comment block
+ #
+ if Line.find(DataType.TAB_COMMENT_EDK1_START) > -1:
+ ReservedLine = GetSplitValueList(Line, DataType.TAB_COMMENT_EDK1_START, 1)[0]
+ IsFindBlockComment = True
+ if Line.find(DataType.TAB_COMMENT_EDK1_END) > -1:
+ Line = ReservedLine + GetSplitValueList(Line, DataType.TAB_COMMENT_EDK1_END, 1)[1]
+ ReservedLine = ''
+ IsFindBlockComment = False
+ if IsFindBlockComment:
+ NewLines.append('')
+ continue
+ NewLines.append(Line)
+ return NewLines
+
+## GetStringOfList
+#
+# Get String of a List
+#
+# @param Lines: string list
+# @param Split: split character
+#
+def GetStringOfList(List, Split = ' '):
+ if type(List) != type([]):
+ return List
+ Str = ''
+ for Item in List:
+ Str = Str + Item + Split
+ return Str.strip()
+
+## Get HelpTextList
+#
+# Get HelpTextList from HelpTextClassList
+#
+# @param HelpTextClassList: Help Text Class List
+#
+def GetHelpTextList(HelpTextClassList):
+ List = []
+ if HelpTextClassList:
+ for HelpText in HelpTextClassList:
+ if HelpText.String.endswith('\n'):
+ HelpText.String = HelpText.String[0: len(HelpText.String) - len('\n')]
+ List.extend(HelpText.String.split('\n'))
+ return List
+
+## Get String Array Length
+#
+# Get String Array Length
+#
+# @param String: the source string
+#
+def StringArrayLength(String):
+ if isinstance(String, unicode):
+ return (len(String) + 1) * 2 + 1
+ elif String.startswith('L"'):
+ return (len(String) - 3 + 1) * 2
+ elif String.startswith('"'):
+ return (len(String) - 2 + 1)
+ else:
+ return len(String.split()) + 1
+
+## RemoveDupOption
+#
+# Remove Dup Option
+#
+# @param OptionString: the option string
+# @param Which: Which flag
+# @param Against: Against flag
+#
+def RemoveDupOption(OptionString, Which="/I", Against=None):
+ OptionList = OptionString.split()
+ ValueList = []
+ if Against:
+ ValueList += Against
+ for Index in range(len(OptionList)):
+ Opt = OptionList[Index]
+ if not Opt.startswith(Which):
+ continue
+ if len(Opt) > len(Which):
+ Val = Opt[len(Which):]
+ else:
+ Val = ""
+ if Val in ValueList:
+ OptionList[Index] = ""
+ else:
+ ValueList.append(Val)
+ return " ".join(OptionList)
+
+## Check if the string is HexDgit
+#
+# Return true if all characters in the string are digits and there is at
+# least one character
+# or valid Hexs (started with 0x, following by hexdigit letters)
+# , false otherwise.
+# @param string: input string
+#
+def IsHexDigit(Str):
+ try:
+ int(Str, 10)
+ return True
+ except ValueError:
+ if len(Str) > 2 and Str.upper().startswith('0X'):
+ try:
+ int(Str, 16)
+ return True
+ except ValueError:
+ return False
+ return False
+
+## Check if the string is HexDgit and its interger value within limit of UINT32
+#
+# Return true if all characters in the string are digits and there is at
+# least one character
+# or valid Hexs (started with 0x, following by hexdigit letters)
+# , false otherwise.
+# @param string: input string
+#
+def IsHexDigitUINT32(Str):
+ try:
+ Value = int(Str, 10)
+ if (Value <= 0xFFFFFFFF) and (Value >= 0):
+ return True
+ except ValueError:
+ if len(Str) > 2 and Str.upper().startswith('0X'):
+ try:
+ Value = int(Str, 16)
+ if (Value <= 0xFFFFFFFF) and (Value >= 0):
+ return True
+ except ValueError:
+ return False
+ return False
+
+## CleanSpecialChar
+#
+# The ASCII text files of type INF, DEC, INI are edited by developers,
+# and may contain characters that cannot be directly translated to strings that
+# are conformant with the UDP XML Schema. Any characters in this category
+# (0x00-0x08, TAB [0x09], 0x0B, 0x0C, 0x0E-0x1F, 0x80-0xFF)
+# must be converted to a space character[0x20] as part of the parsing process.
+#
+def ConvertSpecialChar(Lines):
+ RetLines = []
+ for line in Lines:
+ ReMatchSpecialChar = re.compile(r"[\x00-\x08]|\x09|\x0b|\x0c|[\x0e-\x1f]|[\x7f-\xff]")
+ RetLines.append(ReMatchSpecialChar.sub(' ', line))
+
+ return RetLines
+
+## __GetTokenList
+#
+# Assume Str is a valid feature flag expression.
+# Return a list which contains tokens: alpha numeric token and other token
+# Whitespace are not stripped
+#
+def __GetTokenList(Str):
+ InQuote = False
+ Token = ''
+ TokenOP = ''
+ PreChar = ''
+ List = []
+ for Char in Str:
+ if InQuote:
+ Token += Char
+ if Char == '"' and PreChar != '\\':
+ InQuote = not InQuote
+ List.append(Token)
+ Token = ''
+ continue
+ if Char == '"':
+ if Token and Token != 'L':
+ List.append(Token)
+ Token = ''
+ if TokenOP:
+ List.append(TokenOP)
+ TokenOP = ''
+ InQuote = not InQuote
+ Token += Char
+ continue
+
+ if not (Char.isalnum() or Char in '_'):
+ TokenOP += Char
+ if Token:
+ List.append(Token)
+ Token = ''
+ else:
+ Token += Char
+ if TokenOP:
+ List.append(TokenOP)
+ TokenOP = ''
+
+ if PreChar == '\\' and Char == '\\':
+ PreChar = ''
+ else:
+ PreChar = Char
+ if Token:
+ List.append(Token)
+ if TokenOP:
+ List.append(TokenOP)
+ return List
+
+## ConvertNEToNOTEQ
+#
+# Convert NE operator to NOT EQ
+# For example: 1 NE 2 -> 1 NOT EQ 2
+#
+# @param Expr: Feature flag expression to be converted
+#
+def ConvertNEToNOTEQ(Expr):
+ List = __GetTokenList(Expr)
+ for Index in range(len(List)):
+ if List[Index] == 'NE':
+ List[Index] = 'NOT EQ'
+ return ''.join(List)
+
+## ConvertNOTEQToNE
+#
+# Convert NOT EQ operator to NE
+# For example: 1 NOT NE 2 -> 1 NE 2
+#
+# @param Expr: Feature flag expression to be converted
+#
+def ConvertNOTEQToNE(Expr):
+ List = __GetTokenList(Expr)
+ HasNOT = False
+ RetList = []
+ for Token in List:
+ if HasNOT and Token == 'EQ':
+ # At least, 'NOT' is in the list
+ while not RetList[-1].strip():
+ RetList.pop()
+ RetList[-1] = 'NE'
+ HasNOT = False
+ continue
+ if Token == 'NOT':
+ HasNOT = True
+ elif Token.strip():
+ HasNOT = False
+ RetList.append(Token)
+
+ return ''.join(RetList)
+
+## SplitPcdEntry
+#
+# Split an PCD entry string to Token.CName and PCD value and FFE.
+# NOTE: PCD Value and FFE can contain "|" in it's expression. And in INF specification, have below rule.
+# When using the characters "|" or "||" in an expression, the expression must be encapsulated in
+# open "(" and close ")" parenthesis.
+#
+# @param String An PCD entry string need to be split.
+#
+# @return List [PcdTokenCName, Value, FFE]
+#
+def SplitPcdEntry(String):
+ if not String:
+ return ['', '',''], False
+
+ PcdTokenCName = ''
+ PcdValue = ''
+ PcdFeatureFlagExp = ''
+
+ ValueList = GetSplitValueList(String, "|", 1)
+
+ #
+ # Only contain TokenCName
+ #
+ if len(ValueList) == 1:
+ return [ValueList[0]], True
+
+ NewValueList = []
+
+ if len(ValueList) == 2:
+ PcdTokenCName = ValueList[0]
+ ValueList = GetSplitValueList(ValueList[1], "|")
+
+ RemainCount = 0
+ for Item in ValueList:
+ ParenthesisCount = 0
+ for Char in Item:
+ if Char == "(":
+ ParenthesisCount += 1
+ if Char == ")":
+ ParenthesisCount -= 1
+
+ #
+ # An individual item
+ #
+ if RemainCount == 0 and ParenthesisCount >= 0:
+ NewValueList.append(Item)
+ RemainCount = ParenthesisCount
+ elif RemainCount > 0 and RemainCount + ParenthesisCount >= 0:
+ NewValueList[-1] = NewValueList[-1] + '|' + Item
+ RemainCount = RemainCount + ParenthesisCount
+ elif RemainCount > 0 and RemainCount + ParenthesisCount < 0:
+ #
+ # ERROR, return
+ #
+ return ['', '', ''], False
+
+ if len(NewValueList) == 1:
+ PcdValue = NewValueList[0]
+ return [PcdTokenCName, PcdValue], True
+ elif len(NewValueList) == 2:
+ PcdValue = NewValueList[0]
+ PcdFeatureFlagExp = NewValueList[1]
+ return [PcdTokenCName, PcdValue, PcdFeatureFlagExp], True
+ else:
+ return ['', '', ''], False
+
+ return ['', '', ''], False
diff --git a/BaseTools/Source/Python/UPT/Library/Xml/XmlRoutines.py b/BaseTools/Source/Python/UPT/Library/Xml/XmlRoutines.py
new file mode 100644
index 0000000000..7029e59889
--- /dev/null
+++ b/BaseTools/Source/Python/UPT/Library/Xml/XmlRoutines.py
@@ -0,0 +1,228 @@
+## @file
+# This is an XML API that uses a syntax similar to XPath, but it is written in
+# standard python so that no extra python packages are required to use it.
+#
+# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials are licensed and made available
+# under the terms and conditions of the BSD License which accompanies this
+# distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+
+'''
+XmlRoutines
+'''
+
+##
+# Import Modules
+#
+import xml.dom.minidom
+import re
+from Logger.ToolError import PARSER_ERROR
+import Logger.Log as Logger
+
+## Create a element of XML
+#
+# @param Name
+# @param String
+# @param NodeList
+# @param AttributeList
+#
+def CreateXmlElement(Name, String, NodeList, AttributeList):
+ Doc = xml.dom.minidom.Document()
+ Element = Doc.createElement(Name)
+ if String != '' and String != None:
+ Element.appendChild(Doc.createTextNode(String))
+
+ for Item in NodeList:
+ if type(Item) == type([]):
+ Key = Item[0]
+ Value = Item[1]
+ if Key != '' and Key != None and Value != '' and Value != None:
+ Node = Doc.createElement(Key)
+ Node.appendChild(Doc.createTextNode(Value))
+ Element.appendChild(Node)
+ else:
+ Element.appendChild(Item)
+ for Item in AttributeList:
+ Key = Item[0]
+ Value = Item[1]
+ if Key != '' and Key != None and Value != '' and Value != None:
+ Element.setAttribute(Key, Value)
+
+ return Element
+
+## Get a list of XML nodes using XPath style syntax.
+#
+# Return a list of XML DOM nodes from the root Dom specified by XPath String.
+# If the input Dom or String is not valid, then an empty list is returned.
+#
+# @param Dom The root XML DOM node.
+# @param String A XPath style path.
+#
+def XmlList(Dom, String):
+ if String == None or String == "" or Dom == None or Dom == "":
+ return []
+ if Dom.nodeType == Dom.DOCUMENT_NODE:
+ Dom = Dom.documentElement
+ if String[0] == "/":
+ String = String[1:]
+ TagList = String.split('/')
+ Nodes = [Dom]
+ Index = 0
+ End = len(TagList) - 1
+ while Index <= End:
+ ChildNodes = []
+ for Node in Nodes:
+ if Node.nodeType == Node.ELEMENT_NODE and Node.tagName == \
+ TagList[Index]:
+ if Index < End:
+ ChildNodes.extend(Node.childNodes)
+ else:
+ ChildNodes.append(Node)
+ Nodes = ChildNodes
+ ChildNodes = []
+ Index += 1
+
+ return Nodes
+
+
+## Get a single XML node using XPath style syntax.
+#
+# Return a single XML DOM node from the root Dom specified by XPath String.
+# If the input Dom or String is not valid, then an empty string is returned.
+#
+# @param Dom The root XML DOM node.
+# @param String A XPath style path.
+#
+def XmlNode(Dom, String):
+ if String == None or String == "" or Dom == None or Dom == "":
+ return None
+ if Dom.nodeType == Dom.DOCUMENT_NODE:
+ Dom = Dom.documentElement
+ if String[0] == "/":
+ String = String[1:]
+ TagList = String.split('/')
+ Index = 0
+ End = len(TagList) - 1
+ ChildNodes = [Dom]
+ while Index <= End:
+ for Node in ChildNodes:
+ if Node.nodeType == Node.ELEMENT_NODE and \
+ Node.tagName == TagList[Index]:
+ if Index < End:
+ ChildNodes = Node.childNodes
+ else:
+ return Node
+ break
+ Index += 1
+ return None
+
+
+## Get a single XML element using XPath style syntax.
+#
+# Return a single XML element from the root Dom specified by XPath String.
+# If the input Dom or String is not valid, then an empty string is returned.
+#
+# @param Dom The root XML DOM object.
+# @param Strin A XPath style path.
+#
+def XmlElement(Dom, String):
+ try:
+ return XmlNode(Dom, String).firstChild.data.strip()
+ except BaseException:
+ return ""
+
+## Get a single XML element using XPath style syntax.
+#
+# Similar with XmlElement, but do not strip all the leading and tailing space
+# and newline, instead just remove the newline and spaces introduced by
+# toprettyxml()
+#
+# @param Dom The root XML DOM object.
+# @param Strin A XPath style path.
+#
+def XmlElement2(Dom, String):
+ try:
+ HelpStr = XmlNode(Dom, String).firstChild.data
+ gRemovePrettyRe = re.compile(r"""(?:(\n *) )(.*)\1""", re.DOTALL)
+ HelpStr = re.sub(gRemovePrettyRe, r"\2", HelpStr)
+ return HelpStr
+ except BaseException:
+ return ""
+
+
+## Get a single XML element of the current node.
+#
+# Return a single XML element specified by the current root Dom.
+# If the input Dom is not valid, then an empty string is returned.
+#
+# @param Dom The root XML DOM object.
+#
+def XmlElementData(Dom):
+ try:
+ return Dom.firstChild.data.strip()
+ except BaseException:
+ return ""
+
+
+## Get a list of XML elements using XPath style syntax.
+#
+# Return a list of XML elements from the root Dom specified by XPath String.
+# If the input Dom or String is not valid, then an empty list is returned.
+#
+# @param Dom The root XML DOM object.
+# @param String A XPath style path.
+#
+def XmlElementList(Dom, String):
+ return map(XmlElementData, XmlList(Dom, String))
+
+
+## Get the XML attribute of the current node.
+#
+# Return a single XML attribute named Attribute from the current root Dom.
+# If the input Dom or Attribute is not valid, then an empty string is returned.
+#
+# @param Dom The root XML DOM object.
+# @param Attribute The name of Attribute.
+#
+def XmlAttribute(Dom, Attribute):
+ try:
+ return Dom.getAttribute(Attribute)
+ except BaseException:
+ return ''
+
+
+## Get the XML node name of the current node.
+#
+# Return a single XML node name from the current root Dom.
+# If the input Dom is not valid, then an empty string is returned.
+#
+# @param Dom The root XML DOM object.
+#
+def XmlNodeName(Dom):
+ try:
+ return Dom.nodeName.strip()
+ except BaseException:
+ return ''
+
+## Parse an XML file.
+#
+# Parse the input XML file named FileName and return a XML DOM it stands for.
+# If the input File is not a valid XML file, then an empty string is returned.
+#
+# @param FileName The XML file name.
+#
+def XmlParseFile(FileName):
+ try:
+ XmlFile = open(FileName)
+ Dom = xml.dom.minidom.parse(XmlFile)
+ XmlFile.close()
+ return Dom
+ except BaseException, XExcept:
+ XmlFile.close()
+ Logger.Error('\nUPT', PARSER_ERROR, XExcept, File=FileName, RaiseError=True)
diff --git a/BaseTools/Source/Python/UPT/Library/Xml/__init__.py b/BaseTools/Source/Python/UPT/Library/Xml/__init__.py
new file mode 100644
index 0000000000..5d268d990b
--- /dev/null
+++ b/BaseTools/Source/Python/UPT/Library/Xml/__init__.py
@@ -0,0 +1,20 @@
+## @file
+# Python 'Library' package initialization file.
+#
+# This file is required to make Python interpreter treat the directory
+# as containing package.
+#
+# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials are licensed and made available
+# under the terms and conditions of the BSD License which accompanies this
+# distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+
+'''
+Xml
+''' \ No newline at end of file
diff --git a/BaseTools/Source/Python/UPT/Library/__init__.py b/BaseTools/Source/Python/UPT/Library/__init__.py
new file mode 100644
index 0000000000..b265bc873c
--- /dev/null
+++ b/BaseTools/Source/Python/UPT/Library/__init__.py
@@ -0,0 +1,20 @@
+## @file
+# Python 'Library' package initialization file.
+#
+# This file is required to make Python interpreter treat the directory
+# as containing package.
+#
+# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials are licensed and made available
+# under the terms and conditions of the BSD License which accompanies this
+# distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+
+'''
+Library
+''' \ No newline at end of file