summaryrefslogtreecommitdiff
path: root/BaseTools/Source/Python/UPT/Library
diff options
context:
space:
mode:
authorGuo Mang <mang.guo@intel.com>2018-04-25 17:24:58 +0800
committerGuo Mang <mang.guo@intel.com>2018-04-25 17:26:11 +0800
commit6e3789d7424660b14ef3d7123221c97db5d8aff5 (patch)
tree6a5a7f1e0bc5a5296f2de0c8f02091c85e3443b7 /BaseTools/Source/Python/UPT/Library
parentd33896d88d9d32d516129e92e25b80f8fddc6f7b (diff)
downloadedk2-platforms-6e3789d7424660b14ef3d7123221c97db5d8aff5.tar.xz
Remove unused files
Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Guo Mang <mang.guo@intel.com>
Diffstat (limited to 'BaseTools/Source/Python/UPT/Library')
-rw-r--r--BaseTools/Source/Python/UPT/Library/CommentGenerating.py246
-rw-r--r--BaseTools/Source/Python/UPT/Library/CommentParsing.py601
-rw-r--r--BaseTools/Source/Python/UPT/Library/DataType.py958
-rw-r--r--BaseTools/Source/Python/UPT/Library/ExpressionValidate.py572
-rw-r--r--BaseTools/Source/Python/UPT/Library/GlobalData.py111
-rw-r--r--BaseTools/Source/Python/UPT/Library/Misc.py1125
-rw-r--r--BaseTools/Source/Python/UPT/Library/ParserValidate.py733
-rw-r--r--BaseTools/Source/Python/UPT/Library/Parsing.py1020
-rw-r--r--BaseTools/Source/Python/UPT/Library/String.py988
-rw-r--r--BaseTools/Source/Python/UPT/Library/UniClassObject.py1070
-rw-r--r--BaseTools/Source/Python/UPT/Library/Xml/XmlRoutines.py229
-rw-r--r--BaseTools/Source/Python/UPT/Library/Xml/__init__.py20
-rw-r--r--BaseTools/Source/Python/UPT/Library/__init__.py20
13 files changed, 0 insertions, 7693 deletions
diff --git a/BaseTools/Source/Python/UPT/Library/CommentGenerating.py b/BaseTools/Source/Python/UPT/Library/CommentGenerating.py
deleted file mode 100644
index 9c6e3aad9f..0000000000
--- a/BaseTools/Source/Python/UPT/Library/CommentGenerating.py
+++ /dev/null
@@ -1,246 +0,0 @@
-## @file
-# This file is used to define comment generating interface
-#
-# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License which accompanies this
-# distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-'''
-CommentGenerating
-'''
-
-##
-# Import Modules
-#
-from Library.String import GetSplitValueList
-from Library.DataType import TAB_SPACE_SPLIT
-from Library.DataType import TAB_INF_GUIDTYPE_VAR
-from Library.DataType import USAGE_ITEM_NOTIFY
-from Library.DataType import ITEM_UNDEFINED
-from Library.DataType import TAB_HEADER_COMMENT
-from Library.DataType import TAB_BINARY_HEADER_COMMENT
-from Library.DataType import TAB_COMMENT_SPLIT
-from Library.DataType import TAB_SPECIAL_COMMENT
-from Library.DataType import END_OF_LINE
-from Library.DataType import TAB_COMMENT_EDK1_SPLIT
-from Library.DataType import TAB_COMMENT_EDK1_START
-from Library.DataType import TAB_COMMENT_EDK1_END
-from Library.DataType import TAB_STAR
-from Library.DataType import TAB_PCD_PROMPT
-from Library.UniClassObject import ConvertSpecialUnicodes
-from Library.Misc import GetLocalValue
-## GenTailCommentLines
-#
-# @param TailCommentLines: the tail comment lines that need to be generated
-# @param LeadingSpaceNum: the number of leading space needed for non-first
-# line tail comment
-#
-def GenTailCommentLines (TailCommentLines, LeadingSpaceNum = 0):
- TailCommentLines = TailCommentLines.rstrip(END_OF_LINE)
- CommentStr = TAB_SPACE_SPLIT*2 + TAB_SPECIAL_COMMENT + TAB_SPACE_SPLIT + \
- (END_OF_LINE + LeadingSpaceNum * TAB_SPACE_SPLIT + TAB_SPACE_SPLIT*2 + TAB_SPECIAL_COMMENT + \
- TAB_SPACE_SPLIT).join(GetSplitValueList(TailCommentLines, END_OF_LINE))
-
- return CommentStr
-
-## GenGenericComment
-#
-# @param CommentLines: Generic comment Text, maybe Multiple Lines
-#
-def GenGenericComment (CommentLines):
- if not CommentLines:
- return ''
- CommentLines = CommentLines.rstrip(END_OF_LINE)
- CommentStr = TAB_SPECIAL_COMMENT + TAB_SPACE_SPLIT + (END_OF_LINE + TAB_COMMENT_SPLIT + TAB_SPACE_SPLIT).join\
- (GetSplitValueList(CommentLines, END_OF_LINE)) + END_OF_LINE
- return CommentStr
-
-## GenGenericCommentF
-#
-# similar to GenGenericComment but will remove <EOL> at end of comment once,
-# and for line with only <EOL>, '#\n' will be generated instead of '# \n'
-#
-# @param CommentLines: Generic comment Text, maybe Multiple Lines
-# @return CommentStr: Generated comment line
-#
-def GenGenericCommentF (CommentLines, NumOfPound=1, IsPrompt=False, IsInfLibraryClass=False):
- if not CommentLines:
- return ''
- #
- # if comment end with '\n', then remove it to prevent one extra line
- # generate later on
- #
- if CommentLines.endswith(END_OF_LINE):
- CommentLines = CommentLines[:-1]
- CommentStr = ''
- if IsPrompt:
- CommentStr += TAB_COMMENT_SPLIT * NumOfPound + TAB_SPACE_SPLIT + TAB_PCD_PROMPT + TAB_SPACE_SPLIT + \
- CommentLines.replace(END_OF_LINE, '') + END_OF_LINE
- else:
- CommentLineList = GetSplitValueList(CommentLines, END_OF_LINE)
- FindLibraryClass = False
- for Line in CommentLineList:
- # If this comment is for @libraryclass and it has multiple lines
- # make sure the second lines align to the first line after @libraryclass as below
- #
- # ## @libraryclass XYZ FIRST_LINE
- # ## ABC SECOND_LINE
- #
- if IsInfLibraryClass and Line.find(u'@libraryclass ') > -1:
- FindLibraryClass = True
- if Line == '':
- CommentStr += TAB_COMMENT_SPLIT * NumOfPound + END_OF_LINE
- else:
- if FindLibraryClass and Line.find(u'@libraryclass ') > -1:
- CommentStr += TAB_COMMENT_SPLIT * NumOfPound + TAB_SPACE_SPLIT + Line + END_OF_LINE
- elif FindLibraryClass:
- CommentStr += TAB_COMMENT_SPLIT * NumOfPound + TAB_SPACE_SPLIT * 16 + Line + END_OF_LINE
- else:
- CommentStr += TAB_COMMENT_SPLIT * NumOfPound + TAB_SPACE_SPLIT + Line + END_OF_LINE
-
- return CommentStr
-
-
-## GenHeaderCommentSection
-#
-# Generate Header comment sections
-#
-# @param Abstract One line of abstract
-# @param Description multiple lines of Description
-# @param Copyright possible multiple copyright lines
-# @param License possible multiple license lines
-#
-def GenHeaderCommentSection(Abstract, Description, Copyright, License, IsBinaryHeader=False, \
- CommChar=TAB_COMMENT_SPLIT):
- Content = ''
-
- #
- # Convert special character to (c), (r) and (tm).
- #
- if isinstance(Abstract, unicode):
- Abstract = ConvertSpecialUnicodes(Abstract)
- if isinstance(Description, unicode):
- Description = ConvertSpecialUnicodes(Description)
- if IsBinaryHeader:
- Content += CommChar * 2 + TAB_SPACE_SPLIT + TAB_BINARY_HEADER_COMMENT + '\r\n'
- elif CommChar == TAB_COMMENT_EDK1_SPLIT:
- Content += CommChar + TAB_SPACE_SPLIT + TAB_COMMENT_EDK1_START + TAB_STAR + TAB_SPACE_SPLIT +\
- TAB_HEADER_COMMENT + '\r\n'
- else:
- Content += CommChar * 2 + TAB_SPACE_SPLIT + TAB_HEADER_COMMENT + '\r\n'
- if Abstract:
- Abstract = Abstract.rstrip('\r\n')
- Content += CommChar + TAB_SPACE_SPLIT + ('\r\n' + CommChar + TAB_SPACE_SPLIT).join(GetSplitValueList\
- (Abstract, '\n'))
- Content += '\r\n' + CommChar + '\r\n'
- else:
- Content += CommChar + '\r\n'
-
- if Description:
- Description = Description.rstrip('\r\n')
- Content += CommChar + TAB_SPACE_SPLIT + ('\r\n' + CommChar + TAB_SPACE_SPLIT).join(GetSplitValueList\
- (Description, '\n'))
- Content += '\r\n' + CommChar + '\r\n'
-
- #
- # There is no '#\n' line to separate multiple copyright lines in code base
- #
- if Copyright:
- Copyright = Copyright.rstrip('\r\n')
- Content += CommChar + TAB_SPACE_SPLIT + ('\r\n' + CommChar + TAB_SPACE_SPLIT).join\
- (GetSplitValueList(Copyright, '\n'))
- Content += '\r\n' + CommChar + '\r\n'
-
- if License:
- License = License.rstrip('\r\n')
- Content += CommChar + TAB_SPACE_SPLIT + ('\r\n' + CommChar + TAB_SPACE_SPLIT).join(GetSplitValueList\
- (License, '\n'))
- Content += '\r\n' + CommChar + '\r\n'
-
- if CommChar == TAB_COMMENT_EDK1_SPLIT:
- Content += CommChar + TAB_SPACE_SPLIT + TAB_STAR + TAB_COMMENT_EDK1_END + '\r\n'
- else:
- Content += CommChar * 2 + '\r\n'
-
- return Content
-
-
-## GenInfPcdTailComment
-# Generate Pcd tail comment for Inf, this would be one line comment
-#
-# @param Usage: Usage type
-# @param TailCommentText: Comment text for tail comment
-#
-def GenInfPcdTailComment (Usage, TailCommentText):
- if (Usage == ITEM_UNDEFINED) and (not TailCommentText):
- return ''
-
- CommentLine = TAB_SPACE_SPLIT.join([Usage, TailCommentText])
- return GenTailCommentLines(CommentLine)
-
-## GenInfProtocolPPITailComment
-# Generate Protocol/PPI tail comment for Inf
-#
-# @param Usage: Usage type
-# @param TailCommentText: Comment text for tail comment
-#
-def GenInfProtocolPPITailComment (Usage, Notify, TailCommentText):
- if (not Notify) and (Usage == ITEM_UNDEFINED) and (not TailCommentText):
- return ''
-
- if Notify:
- CommentLine = USAGE_ITEM_NOTIFY + " ## "
- else:
- CommentLine = ''
-
- CommentLine += TAB_SPACE_SPLIT.join([Usage, TailCommentText])
- return GenTailCommentLines(CommentLine)
-
-## GenInfGuidTailComment
-# Generate Guid tail comment for Inf
-#
-# @param Usage: Usage type
-# @param TailCommentText: Comment text for tail comment
-#
-def GenInfGuidTailComment (Usage, GuidTypeList, VariableName, TailCommentText):
- GuidType = GuidTypeList[0]
- if (Usage == ITEM_UNDEFINED) and (GuidType == ITEM_UNDEFINED) and \
- (not TailCommentText):
- return ''
-
- FirstLine = Usage + " ## " + GuidType
- if GuidType == TAB_INF_GUIDTYPE_VAR:
- FirstLine += ":" + VariableName
-
- CommentLine = TAB_SPACE_SPLIT.join([FirstLine, TailCommentText])
- return GenTailCommentLines(CommentLine)
-
-## GenDecGuidTailComment
-#
-# @param SupModuleList: Supported module type list
-#
-def GenDecTailComment (SupModuleList):
- CommentLine = TAB_SPACE_SPLIT.join(SupModuleList)
- return GenTailCommentLines(CommentLine)
-
-
-## _GetHelpStr
-# get HelpString from a list of HelpTextObject, the priority refer to
-# related HLD
-#
-# @param HelpTextObjList: List of HelpTextObject
-#
-# @return HelpStr: the help text string found, '' means no help text found
-#
-def _GetHelpStr(HelpTextObjList):
- ValueList = []
- for HelpObj in HelpTextObjList:
- ValueList.append((HelpObj.GetLang(), HelpObj.GetString()))
- return GetLocalValue(ValueList, True)
diff --git a/BaseTools/Source/Python/UPT/Library/CommentParsing.py b/BaseTools/Source/Python/UPT/Library/CommentParsing.py
deleted file mode 100644
index e6d45103f9..0000000000
--- a/BaseTools/Source/Python/UPT/Library/CommentParsing.py
+++ /dev/null
@@ -1,601 +0,0 @@
-## @file
-# This file is used to define comment parsing interface
-#
-# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License which accompanies this
-# distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-'''
-CommentParsing
-'''
-
-##
-# Import Modules
-#
-import re
-
-from Library.String import GetSplitValueList
-from Library.String import CleanString2
-from Library.DataType import HEADER_COMMENT_NOT_STARTED
-from Library.DataType import TAB_COMMENT_SPLIT
-from Library.DataType import HEADER_COMMENT_LICENSE
-from Library.DataType import HEADER_COMMENT_ABSTRACT
-from Library.DataType import HEADER_COMMENT_COPYRIGHT
-from Library.DataType import HEADER_COMMENT_DESCRIPTION
-from Library.DataType import TAB_SPACE_SPLIT
-from Library.DataType import TAB_COMMA_SPLIT
-from Library.DataType import SUP_MODULE_LIST
-from Library.DataType import TAB_VALUE_SPLIT
-from Library.DataType import TAB_PCD_VALIDRANGE
-from Library.DataType import TAB_PCD_VALIDLIST
-from Library.DataType import TAB_PCD_EXPRESSION
-from Library.DataType import TAB_PCD_PROMPT
-from Library.DataType import TAB_CAPHEX_START
-from Library.DataType import TAB_HEX_START
-from Library.DataType import PCD_ERR_CODE_MAX_SIZE
-from Library.ExpressionValidate import IsValidRangeExpr
-from Library.ExpressionValidate import IsValidListExpr
-from Library.ExpressionValidate import IsValidLogicalExpr
-from Object.POM.CommonObject import TextObject
-from Object.POM.CommonObject import PcdErrorObject
-import Logger.Log as Logger
-from Logger.ToolError import FORMAT_INVALID
-from Logger.ToolError import FORMAT_NOT_SUPPORTED
-from Logger import StringTable as ST
-
-## ParseHeaderCommentSection
-#
-# Parse Header comment section lines, extract Abstract, Description, Copyright
-# , License lines
-#
-# @param CommentList: List of (Comment, LineNumber)
-# @param FileName: FileName of the comment
-#
-def ParseHeaderCommentSection(CommentList, FileName = None, IsBinaryHeader = False):
- Abstract = ''
- Description = ''
- Copyright = ''
- License = ''
- EndOfLine = "\n"
- if IsBinaryHeader:
- STR_HEADER_COMMENT_START = "@BinaryHeader"
- else:
- STR_HEADER_COMMENT_START = "@file"
- HeaderCommentStage = HEADER_COMMENT_NOT_STARTED
-
- #
- # first find the last copyright line
- #
- Last = 0
- for Index in xrange(len(CommentList)-1, 0, -1):
- Line = CommentList[Index][0]
- if _IsCopyrightLine(Line):
- Last = Index
- break
-
- for Item in CommentList:
- Line = Item[0]
- LineNo = Item[1]
-
- if not Line.startswith(TAB_COMMENT_SPLIT) and Line:
- Logger.Error("\nUPT", FORMAT_INVALID, ST.ERR_INVALID_COMMENT_FORMAT, FileName, Item[1])
- Comment = CleanString2(Line)[1]
- Comment = Comment.strip()
- #
- # if there are blank lines between License or Description, keep them as they would be
- # indication of different block; or in the position that Abstract should be, also keep it
- # as it indicates that no abstract
- #
- if not Comment and HeaderCommentStage not in [HEADER_COMMENT_LICENSE, \
- HEADER_COMMENT_DESCRIPTION, HEADER_COMMENT_ABSTRACT]:
- continue
-
- if HeaderCommentStage == HEADER_COMMENT_NOT_STARTED:
- if Comment.startswith(STR_HEADER_COMMENT_START):
- HeaderCommentStage = HEADER_COMMENT_ABSTRACT
- else:
- License += Comment + EndOfLine
- else:
- if HeaderCommentStage == HEADER_COMMENT_ABSTRACT:
- #
- # in case there is no abstract and description
- #
- if not Comment:
- HeaderCommentStage = HEADER_COMMENT_DESCRIPTION
- elif _IsCopyrightLine(Comment):
- Result, ErrMsg = _ValidateCopyright(Comment)
- ValidateCopyright(Result, ST.WRN_INVALID_COPYRIGHT, FileName, LineNo, ErrMsg)
- Copyright += Comment + EndOfLine
- HeaderCommentStage = HEADER_COMMENT_COPYRIGHT
- else:
- Abstract += Comment + EndOfLine
- HeaderCommentStage = HEADER_COMMENT_DESCRIPTION
- elif HeaderCommentStage == HEADER_COMMENT_DESCRIPTION:
- #
- # in case there is no description
- #
- if _IsCopyrightLine(Comment):
- Result, ErrMsg = _ValidateCopyright(Comment)
- ValidateCopyright(Result, ST.WRN_INVALID_COPYRIGHT, FileName, LineNo, ErrMsg)
- Copyright += Comment + EndOfLine
- HeaderCommentStage = HEADER_COMMENT_COPYRIGHT
- else:
- Description += Comment + EndOfLine
- elif HeaderCommentStage == HEADER_COMMENT_COPYRIGHT:
- if _IsCopyrightLine(Comment):
- Result, ErrMsg = _ValidateCopyright(Comment)
- ValidateCopyright(Result, ST.WRN_INVALID_COPYRIGHT, FileName, LineNo, ErrMsg)
- Copyright += Comment + EndOfLine
- else:
- #
- # Contents after copyright line are license, those non-copyright lines in between
- # copyright line will be discarded
- #
- if LineNo > Last:
- if License:
- License += EndOfLine
- License += Comment + EndOfLine
- HeaderCommentStage = HEADER_COMMENT_LICENSE
- else:
- if not Comment and not License:
- continue
- License += Comment + EndOfLine
-
- return Abstract.strip(), Description.strip(), Copyright.strip(), License.strip()
-
-## _IsCopyrightLine
-# check whether current line is copyright line, the criteria is whether there is case insensitive keyword "Copyright"
-# followed by zero or more white space characters followed by a "(" character
-#
-# @param LineContent: the line need to be checked
-# @return: True if current line is copyright line, False else
-#
-def _IsCopyrightLine (LineContent):
- LineContent = LineContent.upper()
- Result = False
-
- ReIsCopyrightRe = re.compile(r"""(^|\s)COPYRIGHT *\(""", re.DOTALL)
- if ReIsCopyrightRe.search(LineContent):
- Result = True
-
- return Result
-
-## ParseGenericComment
-#
-# @param GenericComment: Generic comment list, element of
-# (CommentLine, LineNum)
-# @param ContainerFile: Input value for filename of Dec file
-#
-def ParseGenericComment (GenericComment, ContainerFile=None, SkipTag=None):
- if ContainerFile:
- pass
- HelpTxt = None
- HelpStr = ''
-
- for Item in GenericComment:
- CommentLine = Item[0]
- Comment = CleanString2(CommentLine)[1]
- if SkipTag is not None and Comment.startswith(SkipTag):
- Comment = Comment.replace(SkipTag, '', 1)
- HelpStr += Comment + '\n'
-
- if HelpStr:
- HelpTxt = TextObject()
- if HelpStr.endswith('\n') and not HelpStr.endswith('\n\n') and HelpStr != '\n':
- HelpStr = HelpStr[:-1]
- HelpTxt.SetString(HelpStr)
-
- return HelpTxt
-
-## ParsePcdErrorCode
-#
-# @param Value: original ErrorCode value
-# @param ContainerFile: Input value for filename of Dec file
-# @param LineNum: Line Num
-#
-def ParsePcdErrorCode (Value = None, ContainerFile = None, LineNum = None):
- try:
- if Value.strip().startswith((TAB_HEX_START, TAB_CAPHEX_START)):
- Base = 16
- else:
- Base = 10
- ErrorCode = long(Value, Base)
- if ErrorCode > PCD_ERR_CODE_MAX_SIZE or ErrorCode < 0:
- Logger.Error('Parser',
- FORMAT_NOT_SUPPORTED,
- "The format %s of ErrorCode is not valid, should be UNIT32 type or long type" % Value,
- File = ContainerFile,
- Line = LineNum)
- #
- # To delete the tailing 'L'
- #
- return hex(ErrorCode)[:-1]
- except ValueError, XStr:
- if XStr:
- pass
- Logger.Error('Parser',
- FORMAT_NOT_SUPPORTED,
- "The format %s of ErrorCode is not valid, should be UNIT32 type or long type" % Value,
- File = ContainerFile,
- Line = LineNum)
-
-## ParseDecPcdGenericComment
-#
-# @param GenericComment: Generic comment list, element of (CommentLine,
-# LineNum)
-# @param ContainerFile: Input value for filename of Dec file
-#
-def ParseDecPcdGenericComment (GenericComment, ContainerFile, TokenSpaceGuidCName, CName, MacroReplaceDict):
- HelpStr = ''
- PromptStr = ''
- PcdErr = None
- PcdErrList = []
- ValidValueNum = 0
- ValidRangeNum = 0
- ExpressionNum = 0
-
- for (CommentLine, LineNum) in GenericComment:
- Comment = CleanString2(CommentLine)[1]
- #
- # To replace Macro
- #
- MACRO_PATTERN = '[\t\s]*\$\([A-Z][_A-Z0-9]*\)'
- MatchedStrs = re.findall(MACRO_PATTERN, Comment)
- for MatchedStr in MatchedStrs:
- if MatchedStr:
- Macro = MatchedStr.strip().lstrip('$(').rstrip(')').strip()
- if Macro in MacroReplaceDict:
- Comment = Comment.replace(MatchedStr, MacroReplaceDict[Macro])
- if Comment.startswith(TAB_PCD_VALIDRANGE):
- if ValidValueNum > 0 or ExpressionNum > 0:
- Logger.Error('Parser',
- FORMAT_NOT_SUPPORTED,
- ST.WRN_MULTI_PCD_RANGES,
- File = ContainerFile,
- Line = LineNum)
- else:
- PcdErr = PcdErrorObject()
- PcdErr.SetTokenSpaceGuidCName(TokenSpaceGuidCName)
- PcdErr.SetCName(CName)
- PcdErr.SetFileLine(Comment)
- PcdErr.SetLineNum(LineNum)
- ValidRangeNum += 1
- ValidRange = Comment.replace(TAB_PCD_VALIDRANGE, "", 1).strip()
- Valid, Cause = _CheckRangeExpression(ValidRange)
- if Valid:
- ValueList = ValidRange.split(TAB_VALUE_SPLIT)
- if len(ValueList) > 1:
- PcdErr.SetValidValueRange((TAB_VALUE_SPLIT.join(ValueList[1:])).strip())
- PcdErr.SetErrorNumber(ParsePcdErrorCode(ValueList[0], ContainerFile, LineNum))
- else:
- PcdErr.SetValidValueRange(ValidRange)
- PcdErrList.append(PcdErr)
- else:
- Logger.Error("Parser",
- FORMAT_NOT_SUPPORTED,
- Cause,
- ContainerFile,
- LineNum)
- elif Comment.startswith(TAB_PCD_VALIDLIST):
- if ValidRangeNum > 0 or ExpressionNum > 0:
- Logger.Error('Parser',
- FORMAT_NOT_SUPPORTED,
- ST.WRN_MULTI_PCD_RANGES,
- File = ContainerFile,
- Line = LineNum)
- elif ValidValueNum > 0:
- Logger.Error('Parser',
- FORMAT_NOT_SUPPORTED,
- ST.WRN_MULTI_PCD_VALIDVALUE,
- File = ContainerFile,
- Line = LineNum)
- else:
- PcdErr = PcdErrorObject()
- PcdErr.SetTokenSpaceGuidCName(TokenSpaceGuidCName)
- PcdErr.SetCName(CName)
- PcdErr.SetFileLine(Comment)
- PcdErr.SetLineNum(LineNum)
- ValidValueNum += 1
- ValidValueExpr = Comment.replace(TAB_PCD_VALIDLIST, "", 1).strip()
- Valid, Cause = _CheckListExpression(ValidValueExpr)
- if Valid:
- ValidValue = Comment.replace(TAB_PCD_VALIDLIST, "", 1).replace(TAB_COMMA_SPLIT, TAB_SPACE_SPLIT)
- ValueList = ValidValue.split(TAB_VALUE_SPLIT)
- if len(ValueList) > 1:
- PcdErr.SetValidValue((TAB_VALUE_SPLIT.join(ValueList[1:])).strip())
- PcdErr.SetErrorNumber(ParsePcdErrorCode(ValueList[0], ContainerFile, LineNum))
- else:
- PcdErr.SetValidValue(ValidValue)
- PcdErrList.append(PcdErr)
- else:
- Logger.Error("Parser",
- FORMAT_NOT_SUPPORTED,
- Cause,
- ContainerFile,
- LineNum)
- elif Comment.startswith(TAB_PCD_EXPRESSION):
- if ValidRangeNum > 0 or ValidValueNum > 0:
- Logger.Error('Parser',
- FORMAT_NOT_SUPPORTED,
- ST.WRN_MULTI_PCD_RANGES,
- File = ContainerFile,
- Line = LineNum)
- else:
- PcdErr = PcdErrorObject()
- PcdErr.SetTokenSpaceGuidCName(TokenSpaceGuidCName)
- PcdErr.SetCName(CName)
- PcdErr.SetFileLine(Comment)
- PcdErr.SetLineNum(LineNum)
- ExpressionNum += 1
- Expression = Comment.replace(TAB_PCD_EXPRESSION, "", 1).strip()
- Valid, Cause = _CheckExpression(Expression)
- if Valid:
- ValueList = Expression.split(TAB_VALUE_SPLIT)
- if len(ValueList) > 1:
- PcdErr.SetExpression((TAB_VALUE_SPLIT.join(ValueList[1:])).strip())
- PcdErr.SetErrorNumber(ParsePcdErrorCode(ValueList[0], ContainerFile, LineNum))
- else:
- PcdErr.SetExpression(Expression)
- PcdErrList.append(PcdErr)
- else:
- Logger.Error("Parser",
- FORMAT_NOT_SUPPORTED,
- Cause,
- ContainerFile,
- LineNum)
- elif Comment.startswith(TAB_PCD_PROMPT):
- if PromptStr:
- Logger.Error('Parser',
- FORMAT_NOT_SUPPORTED,
- ST.WRN_MULTI_PCD_PROMPT,
- File = ContainerFile,
- Line = LineNum)
- PromptStr = Comment.replace(TAB_PCD_PROMPT, "", 1).strip()
- else:
- if Comment:
- HelpStr += Comment + '\n'
-
- #
- # remove the last EOL if the comment is of format 'FOO\n'
- #
- if HelpStr.endswith('\n'):
- if HelpStr != '\n' and not HelpStr.endswith('\n\n'):
- HelpStr = HelpStr[:-1]
-
- return HelpStr, PcdErrList, PromptStr
-
-## ParseDecPcdTailComment
-#
-# @param TailCommentList: Tail comment list of Pcd, item of format (Comment, LineNum)
-# @param ContainerFile: Input value for filename of Dec file
-# @retVal SupModuleList: The supported module type list detected
-# @retVal HelpStr: The generic help text string detected
-#
-def ParseDecPcdTailComment (TailCommentList, ContainerFile):
- assert(len(TailCommentList) == 1)
- TailComment = TailCommentList[0][0]
- LineNum = TailCommentList[0][1]
-
- Comment = TailComment.lstrip(" #")
-
- ReFindFirstWordRe = re.compile(r"""^([^ #]*)""", re.DOTALL)
-
- #
- # get first word and compare with SUP_MODULE_LIST
- #
- MatchObject = ReFindFirstWordRe.match(Comment)
- if not (MatchObject and MatchObject.group(1) in SUP_MODULE_LIST):
- return None, Comment
-
- #
- # parse line, it must have supported module type specified
- #
- if Comment.find(TAB_COMMENT_SPLIT) == -1:
- Comment += TAB_COMMENT_SPLIT
- SupMode, HelpStr = GetSplitValueList(Comment, TAB_COMMENT_SPLIT, 1)
- SupModuleList = []
- for Mod in GetSplitValueList(SupMode, TAB_SPACE_SPLIT):
- if not Mod:
- continue
- elif Mod not in SUP_MODULE_LIST:
- Logger.Error("UPT",
- FORMAT_INVALID,
- ST.WRN_INVALID_MODULE_TYPE%Mod,
- ContainerFile,
- LineNum)
- else:
- SupModuleList.append(Mod)
-
- return SupModuleList, HelpStr
-
-## _CheckListExpression
-#
-# @param Expression: Pcd value list expression
-#
-def _CheckListExpression(Expression):
- ListExpr = ''
- if TAB_VALUE_SPLIT in Expression:
- ListExpr = Expression[Expression.find(TAB_VALUE_SPLIT)+1:]
- else:
- ListExpr = Expression
-
- return IsValidListExpr(ListExpr)
-
-## _CheckExpreesion
-#
-# @param Expression: Pcd value expression
-#
-def _CheckExpression(Expression):
- Expr = ''
- if TAB_VALUE_SPLIT in Expression:
- Expr = Expression[Expression.find(TAB_VALUE_SPLIT)+1:]
- else:
- Expr = Expression
- return IsValidLogicalExpr(Expr, True)
-
-## _CheckRangeExpression
-#
-# @param Expression: Pcd range expression
-#
-def _CheckRangeExpression(Expression):
- RangeExpr = ''
- if TAB_VALUE_SPLIT in Expression:
- RangeExpr = Expression[Expression.find(TAB_VALUE_SPLIT)+1:]
- else:
- RangeExpr = Expression
-
- return IsValidRangeExpr(RangeExpr)
-
-## ValidateCopyright
-#
-#
-#
-def ValidateCopyright(Result, ErrType, FileName, LineNo, ErrMsg):
- if not Result:
- Logger.Warn("\nUPT", ErrType, FileName, LineNo, ErrMsg)
-
-## _ValidateCopyright
-#
-# @param Line: Line that contains copyright information, # stripped
-#
-# @retval Result: True if line is conformed to Spec format, False else
-# @retval ErrMsg: the detailed error description
-#
-def _ValidateCopyright(Line):
- if Line:
- pass
- Result = True
- ErrMsg = ''
-
- return Result, ErrMsg
-
-def GenerateTokenList (Comment):
- #
- # Tokenize Comment using '#' and ' ' as token seperators
- #
- RelplacedComment = None
- while Comment != RelplacedComment:
- RelplacedComment = Comment
- Comment = Comment.replace('##', '#').replace(' ', ' ').replace(' ', '#').strip('# ')
- return Comment.split('#')
-
-
-#
-# Comment - Comment to parse
-# TypeTokens - A dictionary of type token synonyms
-# RemoveTokens - A list of tokens to remove from help text
-# ParseVariable - True for parsing [Guids]. Otherwise False
-#
-def ParseComment (Comment, UsageTokens, TypeTokens, RemoveTokens, ParseVariable):
- #
- # Initialize return values
- #
- Usage = None
- Type = None
- String = None
-
- Comment = Comment[0]
-
- NumTokens = 2
- if ParseVariable:
- #
- # Remove white space around first instance of ':' from Comment if 'Variable'
- # is in front of ':' and Variable is the 1st or 2nd token in Comment.
- #
- List = Comment.split(':', 1)
- if len(List) > 1:
- SubList = GenerateTokenList (List[0].strip())
- if len(SubList) in [1, 2] and SubList[-1] == 'Variable':
- if List[1].strip().find('L"') == 0:
- Comment = List[0].strip() + ':' + List[1].strip()
-
- #
- # Remove first instance of L"<VariableName> from Comment and put into String
- # if and only if L"<VariableName>" is the 1st token, the 2nd token. Or
- # L"<VariableName>" is the third token immediately following 'Variable:'.
- #
- End = -1
- Start = Comment.find('Variable:L"')
- if Start >= 0:
- String = Comment[Start + 9:]
- End = String[2:].find('"')
- else:
- Start = Comment.find('L"')
- if Start >= 0:
- String = Comment[Start:]
- End = String[2:].find('"')
- if End >= 0:
- SubList = GenerateTokenList (Comment[:Start])
- if len(SubList) < 2:
- Comment = Comment[:Start] + String[End + 3:]
- String = String[:End + 3]
- Type = 'Variable'
- NumTokens = 1
-
- #
- # Initialze HelpText to Comment.
- # Content will be remove from HelpText as matching tokens are found
- #
- HelpText = Comment
-
- #
- # Tokenize Comment using '#' and ' ' as token seperators
- #
- List = GenerateTokenList (Comment)
-
- #
- # Search first two tokens for Usage and Type and remove any matching tokens
- # from HelpText
- #
- for Token in List[0:NumTokens]:
- if Usage == None and Token in UsageTokens:
- Usage = UsageTokens[Token]
- HelpText = HelpText.replace(Token, '')
- if Usage != None or not ParseVariable:
- for Token in List[0:NumTokens]:
- if Type == None and Token in TypeTokens:
- Type = TypeTokens[Token]
- HelpText = HelpText.replace(Token, '')
- if Usage != None:
- for Token in List[0:NumTokens]:
- if Token in RemoveTokens:
- HelpText = HelpText.replace(Token, '')
-
- #
- # If no Usage token is present and set Usage to UNDEFINED
- #
- if Usage == None:
- Usage = 'UNDEFINED'
-
- #
- # If no Type token is present and set Type to UNDEFINED
- #
- if Type == None:
- Type = 'UNDEFINED'
-
- #
- # If Type is not 'Variable:', then set String to None
- #
- if Type != 'Variable':
- String = None
-
- #
- # Strip ' ' and '#' from the beginning of HelpText
- # If HelpText is an empty string after all parsing is
- # complete then set HelpText to None
- #
- HelpText = HelpText.lstrip('# ')
- if HelpText == '':
- HelpText = None
-
- #
- # Return parsing results
- #
- return Usage, Type, String, HelpText
diff --git a/BaseTools/Source/Python/UPT/Library/DataType.py b/BaseTools/Source/Python/UPT/Library/DataType.py
deleted file mode 100644
index c151be3bc4..0000000000
--- a/BaseTools/Source/Python/UPT/Library/DataType.py
+++ /dev/null
@@ -1,958 +0,0 @@
-## @file
-# This file is used to define class for data type structure
-#
-# Copyright (c) 2011 - 2016, Intel Corporation. All rights reserved.<BR>
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License which accompanies this
-# distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-
-'''
-DataType
-'''
-
-##
-# Module List Items
-#
-MODULE_LIST = ["BASE",
- "SEC",
- "PEI_CORE",
- "PEIM",
- "DXE_CORE",
- "DXE_DRIVER",
- "SMM_CORE",
- "DXE_RUNTIME_DRIVER",
- "DXE_SAL_DRIVER",
- "DXE_SMM_DRIVER",
- "UEFI_DRIVER",
- "UEFI_APPLICATION",
- "USER_DEFINED"]
-
-VALID_DEPEX_MODULE_TYPE_LIST = ["PEIM",
- "DXE_DRIVER",
- "DXE_SMM_DRIVER",
- "DXE_RUNTIME_DRIVER",
- "DXE_SAL_DRIVER",
- "UEFI_DRIVER",
- ]
-##
-# Usage List Items
-#
-USAGE_LIST = ["CONSUMES",
- "SOMETIMES_CONSUMES",
- "PRODUCES",
- "SOMETIMES_PRODUCES"]
-
-TAB_LANGUAGE_EN_US = 'en-US'
-TAB_LANGUAGE_ENG = 'eng'
-TAB_LANGUAGE_EN = 'en'
-TAB_LANGUAGE_EN_X = 'en-x-tianocore'
-
-USAGE_ITEM_PRODUCES = 'PRODUCES'
-USAGE_ITEM_SOMETIMES_PRODUCES = 'SOMETIMES_PRODUCES'
-USAGE_ITEM_CONSUMES = 'CONSUMES'
-USAGE_ITEM_SOMETIMES_CONSUMES = 'SOMETIMES_CONSUMES'
-USAGE_ITEM_TO_START = 'TO_START'
-USAGE_ITEM_BY_START = 'BY_START'
-USAGE_ITEM_NOTIFY = 'NOTIFY'
-USAGE_ITEM_UNDEFINED = 'UNDEFINED'
-
-USAGE_CONSUMES_LIST = [USAGE_ITEM_CONSUMES,
- 'CONSUMED',
- 'ALWAYS_CONSUMED',
- 'ALWAYS_CONSUMES'
- ]
-
-USAGE_PRODUCES_LIST = [USAGE_ITEM_PRODUCES,
- 'PRODUCED',
- 'ALWAYS_PRODUCED',
- 'ALWAYS_PRODUCES'
- ]
-
-USAGE_SOMETIMES_PRODUCES_LIST = [USAGE_ITEM_SOMETIMES_PRODUCES,
- 'SOMETIMES_PRODUCED'
- ]
-
-USAGE_SOMETIMES_CONSUMES_LIST = [USAGE_ITEM_SOMETIMES_CONSUMES,
- 'SOMETIMES_CONSUMED'
- ]
-
-ITEM_UNDEFINED = 'UNDEFINED'
-
-TAB_PCD_VALIDRANGE = '@ValidRange'
-TAB_PCD_VALIDLIST = '@ValidList'
-TAB_PCD_EXPRESSION = '@Expression'
-TAB_PCD_PROMPT = '@Prompt'
-TAB_STR_TOKENCNAME = 'STR'
-TAB_STR_TOKENPROMPT = 'PROMPT'
-TAB_STR_TOKENHELP = 'HELP'
-TAB_STR_TOKENERR = 'ERR'
-
-#
-# Dictionary of usage tokens and their synonmys
-#
-ALL_USAGE_TOKENS = {
- "PRODUCES" : "PRODUCES",
- "PRODUCED" : "PRODUCES",
- "ALWAYS_PRODUCES" : "PRODUCES",
- "ALWAYS_PRODUCED" : "PRODUCES",
- "SOMETIMES_PRODUCES" : "SOMETIMES_PRODUCES",
- "SOMETIMES_PRODUCED" : "SOMETIMES_PRODUCES",
- "CONSUMES" : "CONSUMES",
- "CONSUMED" : "CONSUMES",
- "ALWAYS_CONSUMES" : "CONSUMES",
- "ALWAYS_CONSUMED" : "CONSUMES",
- "SOMETIMES_CONSUMES" : "SOMETIMES_CONSUMES",
- "SOMETIMES_CONSUMED" : "SOMETIMES_CONSUMES",
- "SOMETIME_CONSUMES" : "SOMETIMES_CONSUMES",
- "UNDEFINED" : "UNDEFINED"
- }
-
-PROTOCOL_USAGE_TOKENS = {
- "TO_START" : "TO_START",
- "BY_START" : "BY_START"
- }
-
-PROTOCOL_USAGE_TOKENS.update (ALL_USAGE_TOKENS)
-
-#
-# Dictionary of GUID type tokens
-#
-GUID_TYPE_TOKENS = {
- "Event" : "Event",
- "File" : "File",
- "FV" : "FV",
- "GUID" : "GUID",
- "Guid" : "GUID",
- "HII" : "HII",
- "HOB" : "HOB",
- "Hob" : "HOB",
- "Hob:" : "HOB",
- "SystemTable" : "SystemTable",
- "TokenSpaceGuid" : "TokenSpaceGuid",
- "UNDEFINED" : "UNDEFINED"
- }
-
-#
-# Dictionary of Protocol Notify tokens and their synonyms
-#
-PROTOCOL_NOTIFY_TOKENS = {
- "NOTIFY" : "NOTIFY",
- "PROTOCOL_NOTIFY" : "NOTIFY",
- "UNDEFINED" : "UNDEFINED"
- }
-
-#
-# Dictionary of PPI Notify tokens and their synonyms
-#
-PPI_NOTIFY_TOKENS = {
- "NOTIFY" : "NOTIFY",
- "PPI_NOTIFY" : "NOTIFY",
- "UNDEFINED" : "UNDEFINED"
- }
-
-EVENT_TOKENS = {
- "EVENT_TYPE_PERIODIC_TIMER" : "EVENT_TYPE_PERIODIC_TIMER",
- "EVENT_TYPE_RELATIVE_TIMER" : "EVENT_TYPE_RELATIVE_TIMER",
- "UNDEFINED" : "UNDEFINED"
- }
-
-BOOTMODE_TOKENS = {
- "FULL" : "FULL",
- "MINIMAL" : "MINIMAL",
- "NO_CHANGE" : "NO_CHANGE",
- "DIAGNOSTICS" : "DIAGNOSTICS",
- "DEFAULT" : "DEFAULT",
- "S2_RESUME" : "S2_RESUME",
- "S3_RESUME" : "S3_RESUME",
- "S4_RESUME" : "S4_RESUME",
- "S5_RESUME" : "S5_RESUME",
- "FLASH_UPDATE" : "FLASH_UPDATE",
- "RECOVERY_FULL" : "RECOVERY_FULL",
- "RECOVERY_MINIMAL" : "RECOVERY_MINIMAL",
- "RECOVERY_NO_CHANGE" : "RECOVERY_NO_CHANGE",
- "RECOVERY_DIAGNOSTICS" : "RECOVERY_DIAGNOSTICS",
- "RECOVERY_DEFAULT" : "RECOVERY_DEFAULT",
- "RECOVERY_S2_RESUME" : "RECOVERY_S2_RESUME",
- "RECOVERY_S3_RESUME" : "RECOVERY_S3_RESUME",
- "RECOVERY_S4_RESUME" : "RECOVERY_S4_RESUME",
- "RECOVERY_S5_RESUME" : "RECOVERY_S5_RESUME",
- "RECOVERY_FLASH_UPDATE" : "RECOVERY_FLASH_UPDATE",
- "UNDEFINED" : "UNDEFINED"
- }
-
-HOB_TOKENS = {
- "PHIT" : "PHIT",
- "MEMORY_ALLOCATION" : "MEMORY_ALLOCATION",
- "LOAD_PEIM" : "LOAD_PEIM",
- "RESOURCE_DESCRIPTOR" : "RESOURCE_DESCRIPTOR",
- "FIRMWARE_VOLUME" : "FIRMWARE_VOLUME",
- "UNDEFINED" : "UNDEFINED"
- }
-
-##
-# Usage List Items for Protocol
-#
-PROTOCOL_USAGE_LIST = USAGE_LIST + ["TO_START", "BY_START"]
-
-##
-# End of Line
-# Use this but not os.linesep for os.linesep has bug in it.
-#
-END_OF_LINE = '\n'
-
-##
-# Arch List Items
-#
-ARCH_LIST = ["IA32",
- "X64",
- "IPF",
- "EBC",
- "COMMON"]
-##
-# PCD driver type list items
-#
-PCD_DIRVER_TYPE_LIST = ["PEI_PCD_DRIVER", "DXE_PCD_DRIVER"]
-
-##
-# Boot Mode List Items
-#
-BOOT_MODE_LIST = ["FULL",
- "MINIMAL",
- "NO_CHANGE",
- "DIAGNOSTICS",
- "DEFAULT",
- "S2_RESUME",
- "S3_RESUME",
- "S4_RESUME",
- "S5_RESUME",
- "FLASH_UPDATE",
- "RECOVERY_FULL",
- "RECOVERY_MINIMAL",
- "RECOVERY_NO_CHANGE",
- "RECOVERY_DIAGNOSTICS",
- "RECOVERY_DEFAULT",
- "RECOVERY_S2_RESUME",
- "RECOVERY_S3_RESUME",
- "RECOVERY_S4_RESUME",
- "RECOVERY_S5_RESUME",
- "RECOVERY_FLASH_UPDATE"]
-
-##
-# Event Type List Items
-#
-EVENT_TYPE_LIST = ["EVENT_TYPE_PERIODIC_TIMER",
- "EVENT_TYPE_RELATIVE_TIMER"]
-
-##
-# Hob Type List Items
-#
-HOB_TYPE_LIST = ["PHIT",
- "MEMORY_ALLOCATION",
- "RESOURCE_DESCRIPTOR",
- "FIRMWARE_VOLUME",
- "LOAD_PEIM"]
-
-##
-# GUID_TYPE_LIST
-#
-GUID_TYPE_LIST = ["Event", "File", "FV", "GUID", "HII", "HOB",
- "SystemTable", "TokenSpaceGuid", "Variable"]
-##
-# PCD Usage Type List of Package
-#
-PCD_USAGE_TYPE_LIST_OF_PACKAGE = ["FeatureFlag", "PatchableInModule",
- "FixedAtBuild", "Dynamic", "DynamicEx"]
-
-##
-# PCD Usage Type List of Module
-#
-PCD_USAGE_TYPE_LIST_OF_MODULE = ["FEATUREPCD", "PATCHPCD", "FIXEDPCD", "PCD", "PCDEX"]
-##
-# PCD Usage Type List of UPT
-#
-PCD_USAGE_TYPE_LIST_OF_UPT = PCD_USAGE_TYPE_LIST_OF_MODULE
-
-##
-# Binary File Type List
-#
-BINARY_FILE_TYPE_LIST = ["PE32", "PIC", "TE", "DXE_DEPEX", "VER", "UI", "COMPAT16", "FV", "BIN", "RAW",
- "ACPI", "ASL",
- "PEI_DEPEX",
- "SMM_DEPEX",
- "SUBTYPE_GUID",
- "DISPOSABLE"
- ]
-BINARY_FILE_TYPE_LIST_IN_UDP = \
- ["GUID", "FREEFORM",
- "UEFI_IMAGE", "PE32", "PIC",
- "PEI_DEPEX",
- "DXE_DEPEX",
- "SMM_DEPEX",
- "FV", "TE",
- "BIN", "VER", "UI"
- ]
-
-SUBTYPE_GUID_BINARY_FILE_TYPE = "FREEFORM"
-##
-# Possible values for COMPONENT_TYPE, and their descriptions, are listed in
-# the table,
-# "Component (module) Types." For each component, the BASE_NAME and
-# COMPONENT_TYPE
-# are required. The COMPONENT_TYPE definition is case sensitive.
-#
-COMPONENT_TYPE_LIST = [
- "APPLICATION",
- "ACPITABLE",
- "APRIORI",
- "BINARY",
- "BS_DRIVER",
- "CONFIG",
- "FILE",
- "FVIMAGEFILE",
- "LIBRARY",
- "LOGO",
- "LEGACY16",
- "MICROCODE",
- "PE32_PEIM",
- "PEI_CORE",
- "RAWFILE",
- "RT_DRIVER",
- "SAL_RT_DRIVER",
- "SECURITY_CORE",
- "COMBINED_PEIM_DRIVER",
- "PIC_PEIM",
- "RELOCATABLE_PEIM"
- ]
-
-##
-# Common Definitions
-#
-TAB_SPLIT = '.'
-TAB_COMMENT_EDK1_START = '/*'
-TAB_COMMENT_EDK1_END = '*/'
-TAB_COMMENT_EDK1_SPLIT = '//'
-TAB_COMMENT_SPLIT = '#'
-TAB_EQUAL_SPLIT = '='
-TAB_DEQUAL_SPLIT = '=='
-TAB_VALUE_SPLIT = '|'
-TAB_COMMA_SPLIT = ','
-TAB_HORIZON_LINE_SPLIT = '-'
-TAB_SPACE_SPLIT = ' '
-TAB_UNDERLINE_SPLIT = '_'
-TAB_SEMI_COLON_SPLIT = ';'
-TAB_COLON_SPLIT = ':'
-TAB_SECTION_START = '['
-TAB_SECTION_END = ']'
-TAB_OPTION_START = '<'
-TAB_OPTION_END = '>'
-TAB_SLASH = '\\'
-TAB_BACK_SLASH = '/'
-TAB_SPECIAL_COMMENT = '##'
-TAB_HEADER_COMMENT = '@file'
-TAB_BINARY_HEADER_COMMENT = '@BinaryHeader'
-TAB_STAR = '*'
-TAB_ENCODING_UTF16LE = 'utf_16_le'
-TAB_CAPHEX_START = '0X'
-TAB_HEX_START = '0x'
-TAB_PCD_ERROR = 'Error'
-TAB_PCD_ERROR_SECTION_COMMENT = 'Error message section'
-TAB_UNI_FILE_SUFFIXS = ['.uni', '.UNI', '.Uni']
-
-TAB_EDK_SOURCE = '$(EDK_SOURCE)'
-TAB_EFI_SOURCE = '$(EFI_SOURCE)'
-TAB_WORKSPACE = '$(WORKSPACE)'
-
-TAB_ARCH_NULL = ''
-TAB_ARCH_COMMON = 'COMMON'
-TAB_ARCH_IA32 = 'IA32'
-TAB_ARCH_X64 = 'X64'
-TAB_ARCH_IPF = 'IPF'
-TAB_ARCH_ARM = 'ARM'
-TAB_ARCH_EBC = 'EBC'
-
-ARCH_LIST = \
-[TAB_ARCH_IA32, TAB_ARCH_X64, TAB_ARCH_IPF, TAB_ARCH_ARM, TAB_ARCH_EBC]
-
-SUP_MODULE_BASE = 'BASE'
-SUP_MODULE_SEC = 'SEC'
-SUP_MODULE_PEI_CORE = 'PEI_CORE'
-SUP_MODULE_PEIM = 'PEIM'
-SUP_MODULE_DXE_CORE = 'DXE_CORE'
-SUP_MODULE_DXE_DRIVER = 'DXE_DRIVER'
-SUP_MODULE_DXE_RUNTIME_DRIVER = 'DXE_RUNTIME_DRIVER'
-SUP_MODULE_DXE_SAL_DRIVER = 'DXE_SAL_DRIVER'
-SUP_MODULE_DXE_SMM_DRIVER = 'DXE_SMM_DRIVER'
-SUP_MODULE_UEFI_DRIVER = 'UEFI_DRIVER'
-SUP_MODULE_UEFI_APPLICATION = 'UEFI_APPLICATION'
-SUP_MODULE_USER_DEFINED = 'USER_DEFINED'
-SUP_MODULE_SMM_CORE = 'SMM_CORE'
-
-SUP_MODULE_LIST = \
-[SUP_MODULE_BASE, SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, \
-SUP_MODULE_DXE_CORE, SUP_MODULE_DXE_DRIVER, \
- SUP_MODULE_DXE_RUNTIME_DRIVER, SUP_MODULE_DXE_SAL_DRIVER, \
- SUP_MODULE_DXE_SMM_DRIVER, SUP_MODULE_UEFI_DRIVER, \
- SUP_MODULE_UEFI_APPLICATION, SUP_MODULE_USER_DEFINED, \
- SUP_MODULE_SMM_CORE]
-SUP_MODULE_LIST_STRING = TAB_VALUE_SPLIT.join(l for l in SUP_MODULE_LIST)
-
-EDK_COMPONENT_TYPE_LIBRARY = 'LIBRARY'
-EDK_COMPONENT_TYPE_SECUARITY_CORE = 'SECUARITY_CORE'
-EDK_COMPONENT_TYPE_PEI_CORE = 'PEI_CORE'
-EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER = 'COMBINED_PEIM_DRIVER'
-EDK_COMPONENT_TYPE_PIC_PEIM = 'PIC_PEIM'
-EDK_COMPONENT_TYPE_RELOCATABLE_PEIM = 'RELOCATABLE_PEIM'
-EDK_COMPONENT_TYPE_BS_DRIVER = 'BS_DRIVER'
-EDK_COMPONENT_TYPE_RT_DRIVER = 'RT_DRIVER'
-EDK_COMPONENT_TYPE_SAL_RT_DRIVER = 'SAL_RT_DRIVER'
-EDK_COMPONENT_TYPE_APPLICATION = 'APPLICATION'
-EDK_NAME = 'EDK'
-EDKII_NAME = 'EDKII'
-
-BINARY_FILE_TYPE_FW = 'FW'
-BINARY_FILE_TYPE_GUID = 'GUID'
-BINARY_FILE_TYPE_PREEFORM = 'PREEFORM'
-BINARY_FILE_TYPE_UEFI_APP = 'UEFI_APP'
-BINARY_FILE_TYPE_UNI_UI = 'UNI_UI'
-BINARY_FILE_TYPE_SEC_UI = 'SEC_UI'
-BINARY_FILE_TYPE_UNI_VER = 'UNI_VER'
-BINARY_FILE_TYPE_SEC_VER = 'SEC_VER'
-BINARY_FILE_TYPE_LIB = 'LIB'
-BINARY_FILE_TYPE_PE32 = 'PE32'
-BINARY_FILE_TYPE_PIC = 'PIC'
-BINARY_FILE_TYPE_PEI_DEPEX = 'PEI_DEPEX'
-BINARY_FILE_TYPE_DXE_DEPEX = 'DXE_DEPEX'
-BINARY_FILE_TYPE_SMM_DEPEX = 'SMM_DEPEX'
-BINARY_FILE_TYPE_TE = 'TE'
-BINARY_FILE_TYPE_VER = 'VER'
-BINARY_FILE_TYPE_UI = 'UI'
-BINARY_FILE_TYPE_BIN = 'BIN'
-BINARY_FILE_TYPE_FV = 'FV'
-BINARY_FILE_TYPE_UI_LIST = [BINARY_FILE_TYPE_UNI_UI,
- BINARY_FILE_TYPE_SEC_UI,
- BINARY_FILE_TYPE_UI
- ]
-BINARY_FILE_TYPE_VER_LIST = [BINARY_FILE_TYPE_UNI_VER,
- BINARY_FILE_TYPE_SEC_VER,
- BINARY_FILE_TYPE_VER
- ]
-
-DEPEX_SECTION_LIST = ['<PEI_DEPEX>',
- '<DXE_DEPEX>',
- '<SMM_DEPEX>'
- ]
-
-PLATFORM_COMPONENT_TYPE_LIBRARY = 'LIBRARY'
-PLATFORM_COMPONENT_TYPE_LIBRARY_CLASS = 'LIBRARY_CLASS'
-PLATFORM_COMPONENT_TYPE_MODULE = 'MODULE'
-
-TAB_LIBRARIES = 'Libraries'
-
-TAB_SOURCE = 'Source'
-TAB_SOURCES = 'Sources'
-TAB_SOURCES_COMMON = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_SOURCES_IA32 = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_IA32
-TAB_SOURCES_X64 = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_X64
-TAB_SOURCES_IPF = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_IPF
-TAB_SOURCES_ARM = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_ARM
-TAB_SOURCES_EBC = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_EBC
-
-TAB_BINARIES = 'Binaries'
-TAB_BINARIES_COMMON = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_BINARIES_IA32 = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_IA32
-TAB_BINARIES_X64 = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_X64
-TAB_BINARIES_IPF = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_IPF
-TAB_BINARIES_ARM = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_ARM
-TAB_BINARIES_EBC = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_EBC
-
-TAB_INCLUDES = 'Includes'
-TAB_INCLUDES_COMMON = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_INCLUDES_IA32 = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_IA32
-TAB_INCLUDES_X64 = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_X64
-TAB_INCLUDES_IPF = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_IPF
-TAB_INCLUDES_ARM = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_ARM
-TAB_INCLUDES_EBC = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_EBC
-
-TAB_GUIDS = 'Guids'
-TAB_GUIDS_COMMON = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_GUIDS_IA32 = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_IA32
-TAB_GUIDS_X64 = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_X64
-TAB_GUIDS_IPF = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_IPF
-TAB_GUIDS_ARM = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_ARM
-TAB_GUIDS_EBC = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_EBC
-
-TAB_PROTOCOLS = 'Protocols'
-TAB_PROTOCOLS_COMMON = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_PROTOCOLS_IA32 = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_IA32
-TAB_PROTOCOLS_X64 = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_X64
-TAB_PROTOCOLS_IPF = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_IPF
-TAB_PROTOCOLS_ARM = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_ARM
-TAB_PROTOCOLS_EBC = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_EBC
-
-TAB_PPIS = 'Ppis'
-TAB_PPIS_COMMON = TAB_PPIS + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_PPIS_IA32 = TAB_PPIS + TAB_SPLIT + TAB_ARCH_IA32
-TAB_PPIS_X64 = TAB_PPIS + TAB_SPLIT + TAB_ARCH_X64
-TAB_PPIS_IPF = TAB_PPIS + TAB_SPLIT + TAB_ARCH_IPF
-TAB_PPIS_ARM = TAB_PPIS + TAB_SPLIT + TAB_ARCH_ARM
-TAB_PPIS_EBC = TAB_PPIS + TAB_SPLIT + TAB_ARCH_EBC
-
-TAB_LIBRARY_CLASSES = 'LibraryClasses'
-TAB_LIBRARY_CLASSES_COMMON = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_LIBRARY_CLASSES_IA32 = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_IA32
-TAB_LIBRARY_CLASSES_X64 = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_X64
-TAB_LIBRARY_CLASSES_IPF = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_IPF
-TAB_LIBRARY_CLASSES_ARM = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_ARM
-TAB_LIBRARY_CLASSES_EBC = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_EBC
-
-TAB_PACKAGES = 'Packages'
-TAB_PACKAGES_COMMON = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_PACKAGES_IA32 = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_IA32
-TAB_PACKAGES_X64 = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_X64
-TAB_PACKAGES_IPF = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_IPF
-TAB_PACKAGES_ARM = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_ARM
-TAB_PACKAGES_EBC = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_EBC
-
-TAB_PCDS = 'Pcds'
-TAB_PCDS_FIXED_AT_BUILD = 'FixedAtBuild'
-TAB_PCDS_PATCHABLE_IN_MODULE = 'PatchableInModule'
-TAB_PCDS_FEATURE_FLAG = 'FeatureFlag'
-TAB_PCDS_DYNAMIC_EX = 'DynamicEx'
-TAB_PCDS_DYNAMIC_EX_DEFAULT = 'DynamicExDefault'
-TAB_PCDS_DYNAMIC_EX_VPD = 'DynamicExVpd'
-TAB_PCDS_DYNAMIC_EX_HII = 'DynamicExHii'
-TAB_PCDS_DYNAMIC = 'Dynamic'
-TAB_PCDS_DYNAMIC_DEFAULT = 'DynamicDefault'
-TAB_PCDS_DYNAMIC_VPD = 'DynamicVpd'
-TAB_PCDS_DYNAMIC_HII = 'DynamicHii'
-
-TAB_PTR_TYPE_PCD = 'VOID*'
-
-PCD_DYNAMIC_TYPE_LIST = [TAB_PCDS_DYNAMIC, TAB_PCDS_DYNAMIC_DEFAULT, \
- TAB_PCDS_DYNAMIC_VPD, TAB_PCDS_DYNAMIC_HII]
-PCD_DYNAMIC_EX_TYPE_LIST = [TAB_PCDS_DYNAMIC_EX, TAB_PCDS_DYNAMIC_EX_DEFAULT, \
- TAB_PCDS_DYNAMIC_EX_VPD, TAB_PCDS_DYNAMIC_EX_HII]
-
-## Dynamic-ex PCD types
-#
-gDYNAMIC_EX_PCD = [TAB_PCDS_DYNAMIC_EX, TAB_PCDS_DYNAMIC_EX_DEFAULT, \
- TAB_PCDS_DYNAMIC_EX_VPD, TAB_PCDS_DYNAMIC_EX_HII]
-
-TAB_PCDS_FIXED_AT_BUILD_NULL = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD
-TAB_PCDS_FIXED_AT_BUILD_COMMON = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
-TAB_SPLIT + TAB_ARCH_COMMON
-TAB_PCDS_FIXED_AT_BUILD_IA32 = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
-TAB_SPLIT + TAB_ARCH_IA32
-TAB_PCDS_FIXED_AT_BUILD_X64 = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
-TAB_SPLIT + TAB_ARCH_X64
-TAB_PCDS_FIXED_AT_BUILD_IPF = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
-TAB_SPLIT + TAB_ARCH_IPF
-TAB_PCDS_FIXED_AT_BUILD_ARM = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
-TAB_SPLIT + TAB_ARCH_ARM
-TAB_PCDS_FIXED_AT_BUILD_EBC = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
-TAB_SPLIT + TAB_ARCH_EBC
-
-TAB_PCDS_PATCHABLE_IN_MODULE_NULL = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE
-TAB_PCDS_PATCHABLE_IN_MODULE_COMMON = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE \
-+ TAB_SPLIT + TAB_ARCH_COMMON
-TAB_PCDS_PATCHABLE_IN_MODULE_IA32 = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
-TAB_SPLIT + TAB_ARCH_IA32
-TAB_PCDS_PATCHABLE_IN_MODULE_X64 = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
-TAB_SPLIT + TAB_ARCH_X64
-TAB_PCDS_PATCHABLE_IN_MODULE_IPF = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
-TAB_SPLIT + TAB_ARCH_IPF
-TAB_PCDS_PATCHABLE_IN_MODULE_ARM = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
-TAB_SPLIT + TAB_ARCH_ARM
-TAB_PCDS_PATCHABLE_IN_MODULE_EBC = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
-TAB_SPLIT + TAB_ARCH_EBC
-
-TAB_PCDS_FEATURE_FLAG_NULL = TAB_PCDS + TAB_PCDS_FEATURE_FLAG
-TAB_PCDS_FEATURE_FLAG_COMMON = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT \
-+ TAB_ARCH_COMMON
-TAB_PCDS_FEATURE_FLAG_IA32 = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
-TAB_ARCH_IA32
-TAB_PCDS_FEATURE_FLAG_X64 = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
-TAB_ARCH_X64
-TAB_PCDS_FEATURE_FLAG_IPF = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
-TAB_ARCH_IPF
-TAB_PCDS_FEATURE_FLAG_ARM = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
-TAB_ARCH_ARM
-TAB_PCDS_FEATURE_FLAG_EBC = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
-TAB_ARCH_EBC
-
-TAB_PCDS_DYNAMIC_EX_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX
-TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX_DEFAULT
-TAB_PCDS_DYNAMIC_EX_HII_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX_HII
-TAB_PCDS_DYNAMIC_EX_VPD_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX_VPD
-TAB_PCDS_DYNAMIC_EX_COMMON = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
-TAB_ARCH_COMMON
-TAB_PCDS_DYNAMIC_EX_IA32 = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
-TAB_ARCH_IA32
-TAB_PCDS_DYNAMIC_EX_X64 = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
-TAB_ARCH_X64
-TAB_PCDS_DYNAMIC_EX_IPF = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
-TAB_ARCH_IPF
-TAB_PCDS_DYNAMIC_EX_ARM = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
-TAB_ARCH_ARM
-TAB_PCDS_DYNAMIC_EX_EBC = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
-TAB_ARCH_EBC
-
-TAB_PCDS_DYNAMIC_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC
-TAB_PCDS_DYNAMIC_DEFAULT_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_DEFAULT
-TAB_PCDS_DYNAMIC_HII_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_HII
-TAB_PCDS_DYNAMIC_VPD_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_VPD
-TAB_PCDS_DYNAMIC_COMMON = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + \
-TAB_ARCH_COMMON
-TAB_PCDS_DYNAMIC_IA32 = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_IA32
-TAB_PCDS_DYNAMIC_X64 = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_X64
-TAB_PCDS_DYNAMIC_IPF = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_IPF
-TAB_PCDS_DYNAMIC_ARM = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_ARM
-TAB_PCDS_DYNAMIC_EBC = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_EBC
-
-TAB_PCD_DYNAMIC_TYPE_LIST = [TAB_PCDS_DYNAMIC_DEFAULT_NULL, \
- TAB_PCDS_DYNAMIC_VPD_NULL, \
- TAB_PCDS_DYNAMIC_HII_NULL]
-TAB_PCD_DYNAMIC_EX_TYPE_LIST = [TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL, \
- TAB_PCDS_DYNAMIC_EX_VPD_NULL, \
- TAB_PCDS_DYNAMIC_EX_HII_NULL]
-
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE = \
-'PcdLoadFixAddressPeiCodePageNumber'
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE = 'UINT32'
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE = \
-'PcdLoadFixAddressBootTimeCodePageNumber'
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE = 'UINT32'
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE = \
-'PcdLoadFixAddressRuntimeCodePageNumber'
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE = 'UINT32'
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE = \
-'PcdLoadFixAddressSmmCodePageNumber'
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE = 'UINT32'
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_LIST = \
-[TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE, \
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE, \
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE, \
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE]
-PCD_SECTION_LIST = [TAB_PCDS_FIXED_AT_BUILD_NULL.upper(), \
- TAB_PCDS_PATCHABLE_IN_MODULE_NULL.upper(), \
- TAB_PCDS_FEATURE_FLAG_NULL.upper(), \
- TAB_PCDS_DYNAMIC_EX_NULL.upper(), \
- TAB_PCDS_DYNAMIC_NULL.upper()]
-INF_PCD_SECTION_LIST = ["FixedPcd".upper(), "FeaturePcd".upper(), \
- "PatchPcd".upper(), "Pcd".upper(), "PcdEx".upper()]
-
-TAB_DEPEX = 'Depex'
-TAB_DEPEX_COMMON = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_DEPEX_IA32 = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_IA32
-TAB_DEPEX_X64 = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_X64
-TAB_DEPEX_IPF = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_IPF
-TAB_DEPEX_ARM = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_ARM
-TAB_DEPEX_EBC = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_EBC
-
-TAB_SKUIDS = 'SkuIds'
-
-TAB_LIBRARIES = 'Libraries'
-TAB_LIBRARIES_COMMON = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_LIBRARIES_IA32 = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_IA32
-TAB_LIBRARIES_X64 = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_X64
-TAB_LIBRARIES_IPF = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_IPF
-TAB_LIBRARIES_ARM = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_ARM
-TAB_LIBRARIES_EBC = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_EBC
-
-TAB_COMPONENTS = 'Components'
-TAB_COMPONENTS_COMMON = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_COMPONENTS_IA32 = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_IA32
-TAB_COMPONENTS_X64 = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_X64
-TAB_COMPONENTS_IPF = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_IPF
-TAB_COMPONENTS_ARM = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_ARM
-TAB_COMPONENTS_EBC = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_EBC
-
-TAB_COMPONENTS_SOURCE_OVERRIDE_PATH = 'SOURCE_OVERRIDE_PATH'
-
-TAB_BUILD_OPTIONS = 'BuildOptions'
-
-TAB_DEFINE = 'DEFINE'
-TAB_NMAKE = 'Nmake'
-TAB_USER_EXTENSIONS = 'UserExtensions'
-TAB_INCLUDE = '!include'
-TAB_PRIVATE = 'Private'
-TAB_INTEL = 'Intel'
-
-#
-# Common Define
-#
-TAB_COMMON_DEFINES = 'Defines'
-
-#
-# Inf Definitions
-#
-TAB_INF_DEFINES = TAB_COMMON_DEFINES
-TAB_INF_DEFINES_INF_VERSION = 'INF_VERSION'
-TAB_INF_DEFINES_BASE_NAME = 'BASE_NAME'
-TAB_INF_DEFINES_FILE_GUID = 'FILE_GUID'
-TAB_INF_DEFINES_MODULE_TYPE = 'MODULE_TYPE'
-TAB_INF_DEFINES_EFI_SPECIFICATION_VERSION = 'EFI_SPECIFICATION_VERSION'
-TAB_INF_DEFINES_UEFI_SPECIFICATION_VERSION = 'UEFI_SPECIFICATION_VERSION'
-TAB_INF_DEFINES_PI_SPECIFICATION_VERSION = 'PI_SPECIFICATION_VERSION'
-TAB_INF_DEFINES_EDK_RELEASE_VERSION = 'EDK_RELEASE_VERSION'
-TAB_INF_DEFINES_MODULE_UNI_FILE = 'MODULE_UNI_FILE'
-TAB_INF_DEFINES_BINARY_MODULE = 'BINARY_MODULE'
-TAB_INF_DEFINES_LIBRARY_CLASS = 'LIBRARY_CLASS'
-TAB_INF_DEFINES_COMPONENT_TYPE = 'COMPONENT_TYPE'
-TAB_INF_DEFINES_MAKEFILE_NAME = 'MAKEFILE_NAME'
-TAB_INF_DEFINES_BUILD_NUMBER = 'BUILD_NUMBER'
-TAB_INF_DEFINES_BUILD_TYPE = 'BUILD_TYPE'
-TAB_INF_DEFINES_FFS_EXT = 'FFS_EXT'
-TAB_INF_DEFINES_FV_EXT = 'FV_EXT'
-TAB_INF_DEFINES_SOURCE_FV = 'SOURCE_FV'
-TAB_INF_DEFINES_PACKAGE = 'PACKAGE'
-TAB_INF_DEFINES_VERSION_NUMBER = 'VERSION_NUMBER'
-TAB_INF_DEFINES_VERSION = 'VERSION'
-TAB_INF_DEFINES_VERSION_STRING = 'VERSION_STRING'
-TAB_INF_DEFINES_PCD_IS_DRIVER = 'PCD_IS_DRIVER'
-TAB_INF_DEFINES_TIANO_EDK1_FLASHMAP_H = 'TIANO_EDK1_FLASHMAP_H'
-TAB_INF_DEFINES_ENTRY_POINT = 'ENTRY_POINT'
-TAB_INF_DEFINES_UNLOAD_IMAGE = 'UNLOAD_IMAGE'
-TAB_INF_DEFINES_CONSTRUCTOR = 'CONSTRUCTOR'
-TAB_INF_DEFINES_DESTRUCTOR = 'DESTRUCTOR'
-TAB_INF_DEFINES_PCI_VENDOR_ID = 'PCI_VENDOR_ID'
-TAB_INF_DEFINES_PCI_DEVICE_ID = 'PCI_DEVICE_ID'
-TAB_INF_DEFINES_PCI_CLASS_CODE = 'PCI_CLASS_CODE'
-TAB_INF_DEFINES_PCI_REVISION = 'PCI_REVISION'
-TAB_INF_DEFINES_PCI_COMPRESS = 'PCI_COMPRESS'
-TAB_INF_DEFINES_DEFINE = 'DEFINE'
-TAB_INF_DEFINES_SPEC = 'SPEC'
-TAB_INF_DEFINES_UEFI_HII_RESOURCE_SECTION = 'UEFI_HII_RESOURCE_SECTION'
-TAB_INF_DEFINES_CUSTOM_MAKEFILE = 'CUSTOM_MAKEFILE'
-TAB_INF_DEFINES_MACRO = '__MACROS__'
-TAB_INF_DEFINES_SHADOW = 'SHADOW'
-TAB_INF_DEFINES_DPX_SOURCE = 'DPX_SOURCE'
-TAB_INF_FIXED_PCD = 'FixedPcd'
-TAB_INF_FEATURE_PCD = 'FeaturePcd'
-TAB_INF_PATCH_PCD = 'PatchPcd'
-TAB_INF_PCD = 'Pcd'
-TAB_INF_PCD_EX = 'PcdEx'
-TAB_INF_GUIDTYPE_VAR = 'Variable'
-TAB_INF_ABSTRACT = 'STR_MODULE_ABSTRACT'
-TAB_INF_DESCRIPTION = 'STR_MODULE_DESCRIPTION'
-TAB_INF_LICENSE = 'STR_MODULE_LICENSE'
-TAB_INF_BINARY_ABSTRACT = 'STR_MODULE_BINARY_ABSTRACT'
-TAB_INF_BINARY_DESCRIPTION = 'STR_MODULE_BINARY_DESCRIPTION'
-TAB_INF_BINARY_LICENSE = 'STR_MODULE_BINARY_LICENSE'
-#
-# Dec Definitions
-#
-TAB_DEC_DEFINES = TAB_COMMON_DEFINES
-TAB_DEC_DEFINES_DEC_SPECIFICATION = 'DEC_SPECIFICATION'
-TAB_DEC_DEFINES_PACKAGE_NAME = 'PACKAGE_NAME'
-TAB_DEC_DEFINES_PACKAGE_GUID = 'PACKAGE_GUID'
-TAB_DEC_DEFINES_PACKAGE_VERSION = 'PACKAGE_VERSION'
-TAB_DEC_DEFINES_PKG_UNI_FILE = 'PACKAGE_UNI_FILE'
-TAB_DEC_PACKAGE_ABSTRACT = 'STR_PACKAGE_ABSTRACT'
-TAB_DEC_PACKAGE_DESCRIPTION = 'STR_PACKAGE_DESCRIPTION'
-TAB_DEC_PACKAGE_LICENSE = 'STR_PACKAGE_LICENSE'
-TAB_DEC_BINARY_ABSTRACT = 'STR_PACKAGE_BINARY_ABSTRACT'
-TAB_DEC_BINARY_DESCRIPTION = 'STR_PACKAGE_BINARY_DESCRIPTION'
-TAB_DEC_BINARY_LICENSE = 'STR_PACKAGE_ASBUILT_LICENSE'
-#
-# Dsc Definitions
-#
-TAB_DSC_DEFINES = TAB_COMMON_DEFINES
-TAB_DSC_DEFINES_PLATFORM_NAME = 'PLATFORM_NAME'
-TAB_DSC_DEFINES_PLATFORM_GUID = 'PLATFORM_GUID'
-TAB_DSC_DEFINES_PLATFORM_VERSION = 'PLATFORM_VERSION'
-TAB_DSC_DEFINES_DSC_SPECIFICATION = 'DSC_SPECIFICATION'
-TAB_DSC_DEFINES_OUTPUT_DIRECTORY = 'OUTPUT_DIRECTORY'
-TAB_DSC_DEFINES_SUPPORTED_ARCHITECTURES = 'SUPPORTED_ARCHITECTURES'
-TAB_DSC_DEFINES_BUILD_TARGETS = 'BUILD_TARGETS'
-TAB_DSC_DEFINES_SKUID_IDENTIFIER = 'SKUID_IDENTIFIER'
-TAB_DSC_DEFINES_FLASH_DEFINITION = 'FLASH_DEFINITION'
-TAB_DSC_DEFINES_BUILD_NUMBER = 'BUILD_NUMBER'
-TAB_DSC_DEFINES_MAKEFILE_NAME = 'MAKEFILE_NAME'
-TAB_DSC_DEFINES_BS_BASE_ADDRESS = 'BsBaseAddress'
-TAB_DSC_DEFINES_RT_BASE_ADDRESS = 'RtBaseAddress'
-TAB_DSC_DEFINES_DEFINE = 'DEFINE'
-TAB_FIX_LOAD_TOP_MEMORY_ADDRESS = 'FIX_LOAD_TOP_MEMORY_ADDRESS'
-
-#
-# TargetTxt Definitions
-#
-TAB_TAT_DEFINES_ACTIVE_PLATFORM = 'ACTIVE_PLATFORM'
-TAB_TAT_DEFINES_ACTIVE_MODULE = 'ACTIVE_MODULE'
-TAB_TAT_DEFINES_TOOL_CHAIN_CONF = 'TOOL_CHAIN_CONF'
-TAB_TAT_DEFINES_MULTIPLE_THREAD = 'MULTIPLE_THREAD'
-TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER = 'MAX_CONCURRENT_THREAD_NUMBER'
-TAB_TAT_DEFINES_TARGET = 'TARGET'
-TAB_TAT_DEFINES_TOOL_CHAIN_TAG = 'TOOL_CHAIN_TAG'
-TAB_TAT_DEFINES_TARGET_ARCH = 'TARGET_ARCH'
-TAB_TAT_DEFINES_BUILD_RULE_CONF = "BUILD_RULE_CONF"
-
-#
-# ToolDef Definitions
-#
-TAB_TOD_DEFINES_TARGET = 'TARGET'
-TAB_TOD_DEFINES_TOOL_CHAIN_TAG = 'TOOL_CHAIN_TAG'
-TAB_TOD_DEFINES_TARGET_ARCH = 'TARGET_ARCH'
-TAB_TOD_DEFINES_COMMAND_TYPE = 'COMMAND_TYPE'
-TAB_TOD_DEFINES_FAMILY = 'FAMILY'
-TAB_TOD_DEFINES_BUILDRULEFAMILY = 'BUILDRULEFAMILY'
-
-#
-# Conditional Statements
-#
-TAB_IF = '!if'
-TAB_END_IF = '!endif'
-TAB_ELSE_IF = '!elseif'
-TAB_ELSE = '!else'
-TAB_IF_DEF = '!ifdef'
-TAB_IF_N_DEF = '!ifndef'
-TAB_IF_EXIST = '!if exist'
-
-#
-# Unknown section
-#
-TAB_UNKNOWN = 'UNKNOWN'
-
-#
-# Header section (virtual section for abstract, description, copyright,
-# license)
-#
-TAB_HEADER = 'Header'
-TAB_HEADER_ABSTRACT = 'Abstract'
-TAB_HEADER_DESCRIPTION = 'Description'
-TAB_HEADER_COPYRIGHT = 'Copyright'
-TAB_HEADER_LICENSE = 'License'
-TAB_BINARY_HEADER_IDENTIFIER = 'BinaryHeader'
-TAB_BINARY_HEADER_USERID = 'TianoCore'
-
-#
-# Build database path
-#
-DATABASE_PATH = ":memory:"
-#
-# used by ECC
-#
-MODIFIER_LIST = ['IN', 'OUT', 'OPTIONAL', 'UNALIGNED', 'EFI_RUNTIMESERVICE', \
- 'EFI_BOOTSERVICE', 'EFIAPI']
-#
-# Dependency Expression
-#
-DEPEX_SUPPORTED_OPCODE = ["BEFORE", "AFTER", "PUSH", "AND", "OR", "NOT", \
- "END", "SOR", "TRUE", "FALSE", '(', ')']
-
-TAB_STATIC_LIBRARY = "STATIC-LIBRARY-FILE"
-TAB_DYNAMIC_LIBRARY = "DYNAMIC-LIBRARY-FILE"
-TAB_FRAMEWORK_IMAGE = "EFI-IMAGE-FILE"
-TAB_C_CODE_FILE = "C-CODE-FILE"
-TAB_C_HEADER_FILE = "C-HEADER-FILE"
-TAB_UNICODE_FILE = "UNICODE-TEXT-FILE"
-TAB_DEPENDENCY_EXPRESSION_FILE = "DEPENDENCY-EXPRESSION-FILE"
-TAB_UNKNOWN_FILE = "UNKNOWN-TYPE-FILE"
-TAB_DEFAULT_BINARY_FILE = "_BINARY_FILE_"
-#
-# used to indicate the state of processing header comment section of dec,
-# inf files
-#
-HEADER_COMMENT_NOT_STARTED = -1
-HEADER_COMMENT_STARTED = 0
-HEADER_COMMENT_FILE = 1
-HEADER_COMMENT_ABSTRACT = 2
-HEADER_COMMENT_DESCRIPTION = 3
-HEADER_COMMENT_COPYRIGHT = 4
-HEADER_COMMENT_LICENSE = 5
-HEADER_COMMENT_END = 6
-
-#
-# Static values for data models
-#
-MODEL_UNKNOWN = 0
-
-MODEL_FILE_C = 1001
-MODEL_FILE_H = 1002
-MODEL_FILE_ASM = 1003
-MODEL_FILE_INF = 1011
-MODEL_FILE_DEC = 1012
-MODEL_FILE_DSC = 1013
-MODEL_FILE_FDF = 1014
-MODEL_FILE_INC = 1015
-MODEL_FILE_CIF = 1016
-
-MODEL_IDENTIFIER_FILE_HEADER = 2001
-MODEL_IDENTIFIER_FUNCTION_HEADER = 2002
-MODEL_IDENTIFIER_COMMENT = 2003
-MODEL_IDENTIFIER_PARAMETER = 2004
-MODEL_IDENTIFIER_STRUCTURE = 2005
-MODEL_IDENTIFIER_VARIABLE = 2006
-MODEL_IDENTIFIER_INCLUDE = 2007
-MODEL_IDENTIFIER_PREDICATE_EXPRESSION = 2008
-MODEL_IDENTIFIER_ENUMERATE = 2009
-MODEL_IDENTIFIER_PCD = 2010
-MODEL_IDENTIFIER_UNION = 2011
-MODEL_IDENTIFIER_MACRO_IFDEF = 2012
-MODEL_IDENTIFIER_MACRO_IFNDEF = 2013
-MODEL_IDENTIFIER_MACRO_DEFINE = 2014
-MODEL_IDENTIFIER_MACRO_ENDIF = 2015
-MODEL_IDENTIFIER_MACRO_PROGMA = 2016
-MODEL_IDENTIFIER_FUNCTION_CALLING = 2018
-MODEL_IDENTIFIER_TYPEDEF = 2017
-MODEL_IDENTIFIER_FUNCTION_DECLARATION = 2019
-MODEL_IDENTIFIER_ASSIGNMENT_EXPRESSION = 2020
-
-MODEL_EFI_PROTOCOL = 3001
-MODEL_EFI_PPI = 3002
-MODEL_EFI_GUID = 3003
-MODEL_EFI_LIBRARY_CLASS = 3004
-MODEL_EFI_LIBRARY_INSTANCE = 3005
-MODEL_EFI_PCD = 3006
-MODEL_EFI_SOURCE_FILE = 3007
-MODEL_EFI_BINARY_FILE = 3008
-MODEL_EFI_SKU_ID = 3009
-MODEL_EFI_INCLUDE = 3010
-MODEL_EFI_DEPEX = 3011
-
-MODEL_PCD = 4000
-MODEL_PCD_FIXED_AT_BUILD = 4001
-MODEL_PCD_PATCHABLE_IN_MODULE = 4002
-MODEL_PCD_FEATURE_FLAG = 4003
-MODEL_PCD_DYNAMIC_EX = 4004
-MODEL_PCD_DYNAMIC_EX_DEFAULT = 4005
-MODEL_PCD_DYNAMIC_EX_VPD = 4006
-MODEL_PCD_DYNAMIC_EX_HII = 4007
-MODEL_PCD_DYNAMIC = 4008
-MODEL_PCD_DYNAMIC_DEFAULT = 4009
-MODEL_PCD_DYNAMIC_VPD = 4010
-MODEL_PCD_DYNAMIC_HII = 4011
-
-MODEL_META_DATA_FILE_HEADER = 5000
-MODEL_META_DATA_HEADER = 5001
-MODEL_META_DATA_INCLUDE = 5002
-MODEL_META_DATA_DEFINE = 5003
-MODEL_META_DATA_CONDITIONAL_STATEMENT_IF = 5004
-MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE = 5005
-MODEL_META_DATA_CONDITIONAL_STATEMENT_IFDEF = 5006
-MODEL_META_DATA_CONDITIONAL_STATEMENT_IFNDEF = 5007
-MODEL_META_DATA_BUILD_OPTION = 5008
-MODEL_META_DATA_COMPONENT = 5009
-MODEL_META_DATA_USER_EXTENSION = 5010
-MODEL_META_DATA_PACKAGE = 5011
-MODEL_META_DATA_NMAKE = 5012
-MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSEIF = 50013
-MODEL_META_DATA_CONDITIONAL_STATEMENT_ENDIF = 5014
-MODEL_META_DATA_COMPONENT_SOURCE_OVERRIDE_PATH = 5015
-
-TOOL_FAMILY_LIST = ["MSFT",
- "INTEL",
- "GCC",
- "RVCT"
- ]
-
-TYPE_HOB_SECTION = 'HOB'
-TYPE_EVENT_SECTION = 'EVENT'
-TYPE_BOOTMODE_SECTION = 'BOOTMODE'
-
-PCD_ERR_CODE_MAX_SIZE = 4294967295
diff --git a/BaseTools/Source/Python/UPT/Library/ExpressionValidate.py b/BaseTools/Source/Python/UPT/Library/ExpressionValidate.py
deleted file mode 100644
index 090c7eb957..0000000000
--- a/BaseTools/Source/Python/UPT/Library/ExpressionValidate.py
+++ /dev/null
@@ -1,572 +0,0 @@
-## @file
-# This file is used to check PCD logical expression
-#
-# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License which accompanies this
-# distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-
-'''
-ExpressionValidate
-'''
-
-##
-# Import Modules
-#
-import re
-from Logger import StringTable as ST
-
-## IsValidBareCString
-#
-# Check if String is comprised by whitespace(0x20), !(0x21), 0x23 - 0x7E
-# or '\n', '\t', '\f', '\r', '\b', '\0', '\\'
-#
-# @param String: string to be checked
-#
-def IsValidBareCString(String):
- EscapeList = ['n', 't', 'f', 'r', 'b', '0', '\\', '"']
- PreChar = ''
- LastChar = ''
- for Char in String:
- LastChar = Char
- if PreChar == '\\':
- if Char not in EscapeList:
- return False
- if Char == '\\':
- PreChar = ''
- continue
- else:
- IntChar = ord(Char)
- if IntChar != 0x20 and IntChar != 0x09 and IntChar != 0x21 \
- and (IntChar < 0x23 or IntChar > 0x7e):
- return False
- PreChar = Char
-
- # Last char cannot be \ if PreChar is not \
- if LastChar == '\\' and PreChar == LastChar:
- return False
- return True
-
-def _ValidateToken(Token):
- Token = Token.strip()
- Index = Token.find("\"")
- if Index != -1:
- return IsValidBareCString(Token[Index+1:-1])
- return True
-
-## _ExprError
-#
-# @param Exception: Exception
-#
-class _ExprError(Exception):
- def __init__(self, Error = ''):
- Exception.__init__(self)
- self.Error = Error
-
-## _ExprBase
-#
-class _ExprBase:
- HEX_PATTERN = '[\t\s]*0[xX][a-fA-F0-9]+'
- INT_PATTERN = '[\t\s]*[0-9]+'
- MACRO_PATTERN = '[\t\s]*\$\(([A-Z][_A-Z0-9]*)\)'
- PCD_PATTERN = \
- '[\t\s]*[_a-zA-Z][a-zA-Z0-9_]*[\t\s]*\.[\t\s]*[_a-zA-Z][a-zA-Z0-9_]*'
- QUOTED_PATTERN = '[\t\s]*L?"[^"]*"'
- BOOL_PATTERN = '[\t\s]*(true|True|TRUE|false|False|FALSE)'
- def __init__(self, Token):
- self.Token = Token
- self.Index = 0
- self.Len = len(Token)
-
- ## SkipWhitespace
- #
- def SkipWhitespace(self):
- for Char in self.Token[self.Index:]:
- if Char not in ' \t':
- break
- self.Index += 1
-
- ## IsCurrentOp
- #
- # @param OpList: option list
- #
- def IsCurrentOp(self, OpList):
- self.SkipWhitespace()
- LetterOp = ["EQ", "NE", "GE", "LE", "GT", "LT", "NOT", "and", "AND",
- "or", "OR", "XOR"]
- OpMap = {
- '|' : '|',
- '&' : '&',
- '!' : '=',
- '>' : '=',
- '<' : '='
- }
-
- for Operator in OpList:
- if not self.Token[self.Index:].startswith(Operator):
- continue
-
- self.Index += len(Operator)
- Char = self.Token[self.Index : self.Index + 1]
-
- if (Operator in LetterOp and (Char == '_' or Char.isalnum())) \
- or (Operator in OpMap and OpMap[Operator] == Char):
- self.Index -= len(Operator)
- break
-
- return True
-
- return False
-
-## _LogicalExpressionParser
-#
-# @param _ExprBase: _ExprBase object
-#
-class _LogicalExpressionParser(_ExprBase):
- #
- # STRINGITEM can only be logical field according to spec
- #
- STRINGITEM = -1
-
- #
- # Evaluate to True or False
- #
- LOGICAL = 0
- REALLOGICAL = 2
-
- #
- # Just arithmetic expression
- #
- ARITH = 1
-
- def __init__(self, Token):
- _ExprBase.__init__(self, Token)
- self.Parens = 0
-
- def _CheckToken(self, MatchList):
- for Match in MatchList:
- if Match and Match.start() == 0:
- if not _ValidateToken(
- self.Token[self.Index:self.Index+Match.end()]
- ):
- return False
-
- self.Index += Match.end()
- if self.Token[self.Index - 1] == '"':
- return True
- if self.Token[self.Index:self.Index+1] == '_' or \
- self.Token[self.Index:self.Index+1].isalnum():
- self.Index -= Match.end()
- return False
-
- Token = self.Token[self.Index - Match.end():self.Index]
- if Token.strip() in ["EQ", "NE", "GE", "LE", "GT", "LT",
- "NOT", "and", "AND", "or", "OR", "XOR"]:
- self.Index -= Match.end()
- return False
-
- return True
-
- return False
-
- def IsAtomicNumVal(self):
- #
- # Hex number
- #
- Match1 = re.compile(self.HEX_PATTERN).match(self.Token[self.Index:])
-
- #
- # Number
- #
- Match2 = re.compile(self.INT_PATTERN).match(self.Token[self.Index:])
-
- #
- # Macro
- #
- Match3 = re.compile(self.MACRO_PATTERN).match(self.Token[self.Index:])
-
- #
- # PcdName
- #
- Match4 = re.compile(self.PCD_PATTERN).match(self.Token[self.Index:])
-
- return self._CheckToken([Match1, Match2, Match3, Match4])
-
-
- def IsAtomicItem(self):
- #
- # Macro
- #
- Match1 = re.compile(self.MACRO_PATTERN).match(self.Token[self.Index:])
-
- #
- # PcdName
- #
- Match2 = re.compile(self.PCD_PATTERN).match(self.Token[self.Index:])
-
- #
- # Quoted string
- #
- Match3 = re.compile(self.QUOTED_PATTERN).\
- match(self.Token[self.Index:].replace('\\\\', '//').\
- replace('\\\"', '\\\''))
-
- return self._CheckToken([Match1, Match2, Match3])
-
- ## A || B
- #
- def LogicalExpression(self):
- Ret = self.SpecNot()
- while self.IsCurrentOp(['||', 'OR', 'or', '&&', 'AND', 'and', 'XOR', 'xor', '^']):
- if self.Token[self.Index-1] == '|' and self.Parens <= 0:
- raise _ExprError(ST.ERR_EXPR_OR % self.Token)
- if Ret not in [self.ARITH, self.LOGICAL, self.REALLOGICAL, self.STRINGITEM]:
- raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
- Ret = self.SpecNot()
- if Ret not in [self.ARITH, self.LOGICAL, self.REALLOGICAL, self.STRINGITEM]:
- raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
- Ret = self.REALLOGICAL
- return Ret
-
- def SpecNot(self):
- if self.IsCurrentOp(["NOT", "!", "not"]):
- return self.SpecNot()
- return self.Rel()
-
- ## A < B, A > B, A <= B, A >= B
- #
- def Rel(self):
- Ret = self.Expr()
- if self.IsCurrentOp(["<=", ">=", ">", "<", "GT", "LT", "GE", "LE",
- "==", "EQ", "!=", "NE"]):
- if Ret == self.STRINGITEM:
- raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
- Ret = self.Expr()
- if Ret == self.REALLOGICAL:
- raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
- Ret = self.REALLOGICAL
- return Ret
-
- ## A + B, A - B
- #
- def Expr(self):
- Ret = self.Factor()
- while self.IsCurrentOp(["+", "-", "&", "|", "^", "XOR", "xor"]):
- if self.Token[self.Index-1] == '|' and self.Parens <= 0:
- raise _ExprError(ST.ERR_EXPR_OR)
- if Ret == self.STRINGITEM or Ret == self.REALLOGICAL:
- raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
- Ret = self.Factor()
- if Ret == self.STRINGITEM or Ret == self.REALLOGICAL:
- raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
- Ret = self.ARITH
- return Ret
-
- ## Factor
- #
- def Factor(self):
- if self.IsCurrentOp(["("]):
- self.Parens += 1
- Ret = self.LogicalExpression()
- if not self.IsCurrentOp([")"]):
- raise _ExprError(ST.ERR_EXPR_RIGHT_PAREN % \
- (self.Token, self.Token[self.Index:]))
- self.Parens -= 1
- return Ret
-
- if self.IsAtomicItem():
- if self.Token[self.Index - 1] == '"':
- return self.STRINGITEM
- return self.LOGICAL
- elif self.IsAtomicNumVal():
- return self.ARITH
- else:
- raise _ExprError(ST.ERR_EXPR_FACTOR % \
- (self.Token[self.Index:], self.Token))
-
- ## IsValidLogicalExpression
- #
- def IsValidLogicalExpression(self):
- if self.Len == 0:
- return False, ST.ERR_EXPRESS_EMPTY
- try:
- if self.LogicalExpression() not in [self.ARITH, self.LOGICAL, self.REALLOGICAL, self.STRINGITEM]:
- return False, ST.ERR_EXPR_LOGICAL % self.Token
- except _ExprError, XExcept:
- return False, XExcept.Error
- self.SkipWhitespace()
- if self.Index != self.Len:
- return False, (ST.ERR_EXPR_BOOLEAN % \
- (self.Token[self.Index:], self.Token))
- return True, ''
-
-## _ValidRangeExpressionParser
-#
-class _ValidRangeExpressionParser(_ExprBase):
- INT_RANGE_PATTERN = '[\t\s]*[0-9]+[\t\s]*-[\t\s]*[0-9]+'
- HEX_RANGE_PATTERN = \
- '[\t\s]*0[xX][a-fA-F0-9]+[\t\s]*-[\t\s]*0[xX][a-fA-F0-9]+'
- def __init__(self, Token):
- _ExprBase.__init__(self, Token)
- self.Parens = 0
- self.HEX = 1
- self.INT = 2
- self.IsParenHappen = False
- self.IsLogicalOpHappen = False
-
- ## IsValidRangeExpression
- #
- def IsValidRangeExpression(self):
- if self.Len == 0:
- return False, ST.ERR_EXPR_RANGE_EMPTY
- try:
- if self.RangeExpression() not in [self.HEX, self.INT]:
- return False, ST.ERR_EXPR_RANGE % self.Token
- except _ExprError, XExcept:
- return False, XExcept.Error
-
- self.SkipWhitespace()
- if self.Index != self.Len:
- return False, (ST.ERR_EXPR_RANGE % self.Token)
- return True, ''
-
- ## RangeExpression
- #
- def RangeExpression(self):
- Ret = self.Unary()
- while self.IsCurrentOp(['OR', 'AND', 'and', 'or']):
- self.IsLogicalOpHappen = True
- if not self.IsParenHappen:
- raise _ExprError(ST.ERR_PAREN_NOT_USED % self.Token)
- self.IsParenHappen = False
- Ret = self.Unary()
-
- if self.IsCurrentOp(['XOR']):
- Ret = self.Unary()
-
- return Ret
-
- ## Unary
- #
- def Unary(self):
- if self.IsCurrentOp(["NOT"]):
- return self.Unary()
-
- return self.ValidRange()
-
- ## ValidRange
- #
- def ValidRange(self):
- Ret = -1
- if self.IsCurrentOp(["("]):
- self.IsLogicalOpHappen = False
- self.IsParenHappen = True
- self.Parens += 1
- if self.Parens > 1:
- raise _ExprError(ST.ERR_EXPR_RANGE_DOUBLE_PAREN_NESTED % self.Token)
- Ret = self.RangeExpression()
- if not self.IsCurrentOp([")"]):
- raise _ExprError(ST.ERR_EXPR_RIGHT_PAREN % self.Token)
- self.Parens -= 1
- return Ret
-
- if self.IsLogicalOpHappen:
- raise _ExprError(ST.ERR_PAREN_NOT_USED % self.Token)
-
- if self.IsCurrentOp(["LT", "GT", "LE", "GE", "EQ", "XOR"]):
- IntMatch = \
- re.compile(self.INT_PATTERN).match(self.Token[self.Index:])
- HexMatch = \
- re.compile(self.HEX_PATTERN).match(self.Token[self.Index:])
- if HexMatch and HexMatch.start() == 0:
- self.Index += HexMatch.end()
- Ret = self.HEX
- elif IntMatch and IntMatch.start() == 0:
- self.Index += IntMatch.end()
- Ret = self.INT
- else:
- raise _ExprError(ST.ERR_EXPR_RANGE_FACTOR % (self.Token[self.Index:], self.Token))
- else:
- IntRangeMatch = re.compile(
- self.INT_RANGE_PATTERN).match(self.Token[self.Index:]
- )
- HexRangeMatch = re.compile(
- self.HEX_RANGE_PATTERN).match(self.Token[self.Index:]
- )
- if HexRangeMatch and HexRangeMatch.start() == 0:
- self.Index += HexRangeMatch.end()
- Ret = self.HEX
- elif IntRangeMatch and IntRangeMatch.start() == 0:
- self.Index += IntRangeMatch.end()
- Ret = self.INT
- else:
- raise _ExprError(ST.ERR_EXPR_RANGE % self.Token)
-
- return Ret
-
-## _ValidListExpressionParser
-#
-class _ValidListExpressionParser(_ExprBase):
- VALID_LIST_PATTERN = '(0[xX][0-9a-fA-F]+|[0-9]+)([\t\s]*,[\t\s]*(0[xX][0-9a-fA-F]+|[0-9]+))*'
- def __init__(self, Token):
- _ExprBase.__init__(self, Token)
- self.NUM = 1
-
- def IsValidListExpression(self):
- if self.Len == 0:
- return False, ST.ERR_EXPR_LIST_EMPTY
- try:
- if self.ListExpression() not in [self.NUM]:
- return False, ST.ERR_EXPR_LIST % self.Token
- except _ExprError, XExcept:
- return False, XExcept.Error
-
- self.SkipWhitespace()
- if self.Index != self.Len:
- return False, (ST.ERR_EXPR_LIST % self.Token)
-
- return True, ''
-
- def ListExpression(self):
- Ret = -1
- self.SkipWhitespace()
- ListMatch = re.compile(self.VALID_LIST_PATTERN).match(self.Token[self.Index:])
- if ListMatch and ListMatch.start() == 0:
- self.Index += ListMatch.end()
- Ret = self.NUM
- else:
- raise _ExprError(ST.ERR_EXPR_LIST % self.Token)
-
- return Ret
-
-## _StringTestParser
-#
-class _StringTestParser(_ExprBase):
- def __init__(self, Token):
- _ExprBase.__init__(self, Token)
-
- ## IsValidStringTest
- #
- def IsValidStringTest(self):
- if self.Len == 0:
- return False, ST.ERR_EXPR_EMPTY
- try:
- self.StringTest()
- except _ExprError, XExcept:
- return False, XExcept.Error
- return True, ''
-
- ## StringItem
- #
- def StringItem(self):
- Match1 = re.compile(self.QUOTED_PATTERN)\
- .match(self.Token[self.Index:].replace('\\\\', '//')\
- .replace('\\\"', '\\\''))
- Match2 = re.compile(self.MACRO_PATTERN).match(self.Token[self.Index:])
- Match3 = re.compile(self.PCD_PATTERN).match(self.Token[self.Index:])
- MatchList = [Match1, Match2, Match3]
- for Match in MatchList:
- if Match and Match.start() == 0:
- if not _ValidateToken(
- self.Token[self.Index:self.Index+Match.end()]
- ):
- raise _ExprError(ST.ERR_EXPR_STRING_ITEM % \
- (self.Token, self.Token[self.Index:]))
- self.Index += Match.end()
- Token = self.Token[self.Index - Match.end():self.Index]
- if Token.strip() in ["EQ", "NE"]:
- raise _ExprError(ST.ERR_EXPR_STRING_ITEM % \
- (self.Token, self.Token[self.Index:]))
- return
- else:
- raise _ExprError(ST.ERR_EXPR_STRING_ITEM % \
- (self.Token, self.Token[self.Index:]))
-
- ## StringTest
- #
- def StringTest(self):
- self.StringItem()
- if not self.IsCurrentOp(["==", "EQ", "!=", "NE"]):
- raise _ExprError(ST.ERR_EXPR_EQUALITY % \
- (self.Token[self.Index:], self.Token))
- self.StringItem()
- if self.Index != self.Len:
- raise _ExprError(ST.ERR_EXPR_BOOLEAN % \
- (self.Token[self.Index:], self.Token))
-
-##
-# Check syntax of string test
-#
-# @param Token: string test token
-#
-def IsValidStringTest(Token, Flag=False):
- #
- # Not do the check right now, keep the implementation for future enhancement.
- #
- if not Flag:
- return True, ""
- return _StringTestParser(Token).IsValidStringTest()
-
-
-##
-# Check syntax of logical expression
-#
-# @param Token: expression token
-#
-def IsValidLogicalExpr(Token, Flag=False):
- #
- # Not do the check right now, keep the implementation for future enhancement.
- #
- if not Flag:
- return True, ""
- return _LogicalExpressionParser(Token).IsValidLogicalExpression()
-
-##
-# Check syntax of range expression
-#
-# @param Token: range expression token
-#
-def IsValidRangeExpr(Token):
- return _ValidRangeExpressionParser(Token).IsValidRangeExpression()
-
-##
-# Check syntax of value list expression token
-#
-# @param Token: value list expression token
-#
-def IsValidListExpr(Token):
- return _ValidListExpressionParser(Token).IsValidListExpression()
-
-##
-# Check whether the feature flag expression is valid or not
-#
-# @param Token: feature flag expression
-#
-def IsValidFeatureFlagExp(Token, Flag=False):
- #
- # Not do the check right now, keep the implementation for future enhancement.
- #
- if not Flag:
- return True, "", Token
- else:
- if Token in ['TRUE', 'FALSE', 'true', 'false', 'True', 'False',
- '0x1', '0x01', '0x0', '0x00']:
- return True, ""
- Valid, Cause = IsValidStringTest(Token, Flag)
- if not Valid:
- Valid, Cause = IsValidLogicalExpr(Token, Flag)
- if not Valid:
- return False, Cause
- return True, ""
-
-if __name__ == '__main__':
-# print IsValidRangeExpr('LT 9')
- print _LogicalExpressionParser('gCrownBayTokenSpaceGuid.PcdPciDevice1BridgeAddressLE0').IsValidLogicalExpression()
-
-
-
diff --git a/BaseTools/Source/Python/UPT/Library/GlobalData.py b/BaseTools/Source/Python/UPT/Library/GlobalData.py
deleted file mode 100644
index 8f446d4888..0000000000
--- a/BaseTools/Source/Python/UPT/Library/GlobalData.py
+++ /dev/null
@@ -1,111 +0,0 @@
-## @file
-# This file is used to define common static strings and global data used by UPT
-#
-# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License which accompanies this
-# distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-
-'''
-GlobalData
-'''
-
-#
-# The workspace directory
-#
-gWORKSPACE = '.'
-gPACKAGE_PATH = None
-
-#
-# INF module directory
-#
-gINF_MODULE_DIR = "."
-gINF_MODULE_NAME = ''
-
-#
-# the directory to holds upt related files
-#
-gUPT_DIR = r"Conf/upt/"
-
-#
-# Log file for invalid meta-data files during force removing
-#
-gINVALID_MODULE_FILE = gUPT_DIR + r"Invalid_Modules.log"
-
-#
-# File name for content zip file in the distribution
-#
-gCONTENT_FILE = "dist.content"
-
-#
-# File name for XML file in the distibution
-#
-gDESC_FILE = 'dist.pkg'
-
-#
-# Case Insensitive flag
-#
-gCASE_INSENSITIVE = ''
-
-#
-# All Files dictionary
-#
-gALL_FILES = {}
-
-#
-# Database instance
-#
-gDB = None
-
-#
-# list for files that are found in module level but not in INF files,
-# items are (File, ModulePath), all these should be relative to $(WORKSPACE)
-#
-gMISS_FILE_IN_MODLIST = []
-
-#
-# Global Current Line
-#
-gINF_CURRENT_LINE = None
-
-#
-# Global pkg list
-#
-gWSPKG_LIST = []
-
-#
-# Flag used to take WARN as ERROR.
-# By default, only ERROR message will break the tools execution.
-#
-gWARNING_AS_ERROR = False
-
-#
-# Used to specify the temp directory to hold the unpacked distribution files
-#
-gUNPACK_DIR = None
-
-#
-# Flag used to mark whether the INF file is Binary INF or not.
-#
-gIS_BINARY_INF = False
-
-#
-# Used by FileHook module.
-#
-gRECOVERMGR = None
-
-#
-# Used by PCD parser
-#
-gPackageDict = {}
-
-#
-# Used by Library instance parser
-# {FilePath: FileObj}
-#
-gLIBINSTANCEDICT = {}
diff --git a/BaseTools/Source/Python/UPT/Library/Misc.py b/BaseTools/Source/Python/UPT/Library/Misc.py
deleted file mode 100644
index 0d92cb3767..0000000000
--- a/BaseTools/Source/Python/UPT/Library/Misc.py
+++ /dev/null
@@ -1,1125 +0,0 @@
-## @file
-# Common routines used by all tools
-#
-# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License which accompanies this
-# distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-'''
-Misc
-'''
-
-##
-# Import Modules
-#
-import os.path
-from os import access
-from os import F_OK
-from os import makedirs
-from os import getcwd
-from os import chdir
-from os import listdir
-from os import remove
-from os import rmdir
-from os import linesep
-from os import walk
-from os import environ
-import re
-from UserDict import IterableUserDict
-
-import Logger.Log as Logger
-from Logger import StringTable as ST
-from Logger import ToolError
-from Library import GlobalData
-from Library.DataType import SUP_MODULE_LIST
-from Library.DataType import END_OF_LINE
-from Library.DataType import TAB_SPLIT
-from Library.DataType import TAB_LANGUAGE_EN_US
-from Library.DataType import TAB_LANGUAGE_EN
-from Library.DataType import TAB_LANGUAGE_EN_X
-from Library.DataType import TAB_UNI_FILE_SUFFIXS
-from Library.String import GetSplitValueList
-from Library.ParserValidate import IsValidHexVersion
-from Library.ParserValidate import IsValidPath
-from Object.POM.CommonObject import TextObject
-from Core.FileHook import __FileHookOpen__
-from Common.MultipleWorkspace import MultipleWorkspace as mws
-
-## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C
-# structure style
-#
-# @param Guid: The GUID string
-#
-def GuidStringToGuidStructureString(Guid):
- GuidList = Guid.split('-')
- Result = '{'
- for Index in range(0, 3, 1):
- Result = Result + '0x' + GuidList[Index] + ', '
- Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
- for Index in range(0, 12, 2):
- Result = Result + ', 0x' + GuidList[4][Index:Index + 2]
- Result += '}}'
- return Result
-
-## Check whether GUID string is of format xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-#
-# @param GuidValue: The GUID value
-#
-def CheckGuidRegFormat(GuidValue):
- ## Regular expression used to find out register format of GUID
- #
- RegFormatGuidPattern = re.compile("^\s*([0-9a-fA-F]){8}-"
- "([0-9a-fA-F]){4}-"
- "([0-9a-fA-F]){4}-"
- "([0-9a-fA-F]){4}-"
- "([0-9a-fA-F]){12}\s*$")
-
- if RegFormatGuidPattern.match(GuidValue):
- return True
- else:
- return False
-
-
-## Convert GUID string in C structure style to
-# xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-#
-# @param GuidValue: The GUID value in C structure format
-#
-def GuidStructureStringToGuidString(GuidValue):
- GuidValueString = GuidValue.lower().replace("{", "").replace("}", "").\
- replace(" ", "").replace(";", "")
- GuidValueList = GuidValueString.split(",")
- if len(GuidValueList) != 11:
- return ''
- try:
- return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
- int(GuidValueList[0], 16),
- int(GuidValueList[1], 16),
- int(GuidValueList[2], 16),
- int(GuidValueList[3], 16),
- int(GuidValueList[4], 16),
- int(GuidValueList[5], 16),
- int(GuidValueList[6], 16),
- int(GuidValueList[7], 16),
- int(GuidValueList[8], 16),
- int(GuidValueList[9], 16),
- int(GuidValueList[10], 16)
- )
- except BaseException:
- return ''
-
-## Create directories
-#
-# @param Directory: The directory name
-#
-def CreateDirectory(Directory):
- if Directory == None or Directory.strip() == "":
- return True
- try:
- if not access(Directory, F_OK):
- makedirs(Directory)
- except BaseException:
- return False
- return True
-
-## Remove directories, including files and sub-directories in it
-#
-# @param Directory: The directory name
-#
-def RemoveDirectory(Directory, Recursively=False):
- if Directory == None or Directory.strip() == "" or not \
- os.path.exists(Directory):
- return
- if Recursively:
- CurrentDirectory = getcwd()
- chdir(Directory)
- for File in listdir("."):
- if os.path.isdir(File):
- RemoveDirectory(File, Recursively)
- else:
- remove(File)
- chdir(CurrentDirectory)
- rmdir(Directory)
-
-## Store content in file
-#
-# This method is used to save file only when its content is changed. This is
-# quite useful for "make" system to decide what will be re-built and what
-# won't.
-#
-# @param File: The path of file
-# @param Content: The new content of the file
-# @param IsBinaryFile: The flag indicating if the file is binary file
-# or not
-#
-def SaveFileOnChange(File, Content, IsBinaryFile=True):
- if not IsBinaryFile:
- Content = Content.replace("\n", linesep)
-
- if os.path.exists(File):
- try:
- if Content == __FileHookOpen__(File, "rb").read():
- return False
- except BaseException:
- Logger.Error(None, ToolError.FILE_OPEN_FAILURE, ExtraData=File)
-
- CreateDirectory(os.path.dirname(File))
- try:
- FileFd = __FileHookOpen__(File, "wb")
- FileFd.write(Content)
- FileFd.close()
- except BaseException:
- Logger.Error(None, ToolError.FILE_CREATE_FAILURE, ExtraData=File)
-
- return True
-
-## Get all files of a directory
-#
-# @param Root: Root dir
-# @param SkipList : The files need be skipped
-#
-def GetFiles(Root, SkipList=None, FullPath=True):
- OriPath = os.path.normpath(Root)
- FileList = []
- for Root, Dirs, Files in walk(Root):
- if SkipList:
- for Item in SkipList:
- if Item in Dirs:
- Dirs.remove(Item)
- if Item in Files:
- Files.remove(Item)
- for Dir in Dirs:
- if Dir.startswith('.'):
- Dirs.remove(Dir)
-
- for File in Files:
- if File.startswith('.'):
- continue
- File = os.path.normpath(os.path.join(Root, File))
- if not FullPath:
- File = File[len(OriPath) + 1:]
- FileList.append(File)
-
- return FileList
-
-## Get all non-metadata files of a directory
-#
-# @param Root: Root Dir
-# @param SkipList : List of path need be skipped
-# @param FullPath: True if the returned file should be full path
-# @param PrefixPath: the path that need to be added to the files found
-# @return: the list of files found
-#
-def GetNonMetaDataFiles(Root, SkipList, FullPath, PrefixPath):
- FileList = GetFiles(Root, SkipList, FullPath)
- NewFileList = []
- for File in FileList:
- ExtName = os.path.splitext(File)[1]
- #
- # skip '.dec', '.inf', '.dsc', '.fdf' files
- #
- if ExtName.lower() not in ['.dec', '.inf', '.dsc', '.fdf']:
- NewFileList.append(os.path.normpath(os.path.join(PrefixPath, File)))
-
- return NewFileList
-
-## Check if given file exists or not
-#
-# @param File: File name or path to be checked
-# @param Dir: The directory the file is relative to
-#
-def ValidFile(File, Ext=None):
- File = File.replace('\\', '/')
- if Ext != None:
- FileExt = os.path.splitext(File)[1]
- if FileExt.lower() != Ext.lower():
- return False
- if not os.path.exists(File):
- return False
- return True
-
-## RealPath
-#
-# @param File: File name or path to be checked
-# @param Dir: The directory the file is relative to
-# @param OverrideDir: The override directory
-#
-def RealPath(File, Dir='', OverrideDir=''):
- NewFile = os.path.normpath(os.path.join(Dir, File))
- NewFile = GlobalData.gALL_FILES[NewFile]
- if not NewFile and OverrideDir:
- NewFile = os.path.normpath(os.path.join(OverrideDir, File))
- NewFile = GlobalData.gALL_FILES[NewFile]
- return NewFile
-
-## RealPath2
-#
-# @param File: File name or path to be checked
-# @param Dir: The directory the file is relative to
-# @param OverrideDir: The override directory
-#
-def RealPath2(File, Dir='', OverrideDir=''):
- if OverrideDir:
- NewFile = GlobalData.gALL_FILES[os.path.normpath(os.path.join\
- (OverrideDir, File))]
- if NewFile:
- if OverrideDir[-1] == os.path.sep:
- return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
- else:
- return NewFile[len(OverrideDir) + 1:], \
- NewFile[0:len(OverrideDir)]
-
- NewFile = GlobalData.gALL_FILES[os.path.normpath(os.path.join(Dir, File))]
- if NewFile:
- if Dir:
- if Dir[-1] == os.path.sep:
- return NewFile[len(Dir):], NewFile[0:len(Dir)]
- else:
- return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]
- else:
- return NewFile, ''
-
- return None, None
-
-## A dict which can access its keys and/or values orderly
-#
-# The class implements a new kind of dict which its keys or values can be
-# accessed in the order they are added into the dict. It guarantees the order
-# by making use of an internal list to keep a copy of keys.
-#
-class Sdict(IterableUserDict):
- ## Constructor
- #
- def __init__(self):
- IterableUserDict.__init__(self)
- self._key_list = []
-
- ## [] operator
- #
- def __setitem__(self, Key, Value):
- if Key not in self._key_list:
- self._key_list.append(Key)
- IterableUserDict.__setitem__(self, Key, Value)
-
- ## del operator
- #
- def __delitem__(self, Key):
- self._key_list.remove(Key)
- IterableUserDict.__delitem__(self, Key)
-
- ## used in "for k in dict" loop to ensure the correct order
- #
- def __iter__(self):
- return self.iterkeys()
-
- ## len() support
- #
- def __len__(self):
- return len(self._key_list)
-
- ## "in" test support
- #
- def __contains__(self, Key):
- return Key in self._key_list
-
- ## indexof support
- #
- def index(self, Key):
- return self._key_list.index(Key)
-
- ## insert support
- #
- def insert(self, Key, Newkey, Newvalue, Order):
- Index = self._key_list.index(Key)
- if Order == 'BEFORE':
- self._key_list.insert(Index, Newkey)
- IterableUserDict.__setitem__(self, Newkey, Newvalue)
- elif Order == 'AFTER':
- self._key_list.insert(Index + 1, Newkey)
- IterableUserDict.__setitem__(self, Newkey, Newvalue)
-
- ## append support
- #
- def append(self, Sdict2):
- for Key in Sdict2:
- if Key not in self._key_list:
- self._key_list.append(Key)
- IterableUserDict.__setitem__(self, Key, Sdict2[Key])
- ## hash key
- #
- def has_key(self, Key):
- return Key in self._key_list
-
- ## Empty the dict
- #
- def clear(self):
- self._key_list = []
- IterableUserDict.clear(self)
-
- ## Return a copy of keys
- #
- def keys(self):
- Keys = []
- for Key in self._key_list:
- Keys.append(Key)
- return Keys
-
- ## Return a copy of values
- #
- def values(self):
- Values = []
- for Key in self._key_list:
- Values.append(self[Key])
- return Values
-
- ## Return a copy of (key, value) list
- #
- def items(self):
- Items = []
- for Key in self._key_list:
- Items.append((Key, self[Key]))
- return Items
-
- ## Iteration support
- #
- def iteritems(self):
- return iter(self.items())
-
- ## Keys interation support
- #
- def iterkeys(self):
- return iter(self.keys())
-
- ## Values interation support
- #
- def itervalues(self):
- return iter(self.values())
-
- ## Return value related to a key, and remove the (key, value) from the dict
- #
- def pop(self, Key, *Dv):
- Value = None
- if Key in self._key_list:
- Value = self[Key]
- self.__delitem__(Key)
- elif len(Dv) != 0 :
- Value = Dv[0]
- return Value
-
- ## Return (key, value) pair, and remove the (key, value) from the dict
- #
- def popitem(self):
- Key = self._key_list[-1]
- Value = self[Key]
- self.__delitem__(Key)
- return Key, Value
- ## update method
- #
- def update(self, Dict=None, **Kwargs):
- if Dict != None:
- for Key1, Val1 in Dict.items():
- self[Key1] = Val1
- if len(Kwargs):
- for Key1, Val1 in Kwargs.items():
- self[Key1] = Val1
-
-## CommonPath
-#
-# @param PathList: PathList
-#
-def CommonPath(PathList):
- Path1 = min(PathList).split(os.path.sep)
- Path2 = max(PathList).split(os.path.sep)
- for Index in xrange(min(len(Path1), len(Path2))):
- if Path1[Index] != Path2[Index]:
- return os.path.sep.join(Path1[:Index])
- return os.path.sep.join(Path1)
-
-## PathClass
-#
-class PathClass(object):
- def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
- Arch='COMMON', ToolChainFamily='', Target='', TagName='', \
- ToolCode=''):
- self.Arch = Arch
- self.File = str(File)
- if os.path.isabs(self.File):
- self.Root = ''
- self.AlterRoot = ''
- else:
- self.Root = str(Root)
- self.AlterRoot = str(AlterRoot)
-
- #
- # Remove any '.' and '..' in path
- #
- if self.Root:
- self.Path = os.path.normpath(os.path.join(self.Root, self.File))
- self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
- #
- # eliminate the side-effect of 'C:'
- #
- if self.Root[-1] == ':':
- self.Root += os.path.sep
- #
- # file path should not start with path separator
- #
- if self.Root[-1] == os.path.sep:
- self.File = self.Path[len(self.Root):]
- else:
- self.File = self.Path[len(self.Root) + 1:]
- else:
- self.Path = os.path.normpath(self.File)
-
- self.SubDir, self.Name = os.path.split(self.File)
- self.BaseName, self.Ext = os.path.splitext(self.Name)
-
- if self.Root:
- if self.SubDir:
- self.Dir = os.path.join(self.Root, self.SubDir)
- else:
- self.Dir = self.Root
- else:
- self.Dir = self.SubDir
-
- if IsBinary:
- self.Type = Type
- else:
- self.Type = self.Ext.lower()
-
- self.IsBinary = IsBinary
- self.Target = Target
- self.TagName = TagName
- self.ToolCode = ToolCode
- self.ToolChainFamily = ToolChainFamily
-
- self._Key = None
-
- ## Convert the object of this class to a string
- #
- # Convert member Path of the class to a string
- #
- def __str__(self):
- return self.Path
-
- ## Override __eq__ function
- #
- # Check whether PathClass are the same
- #
- def __eq__(self, Other):
- if type(Other) == type(self):
- return self.Path == Other.Path
- else:
- return self.Path == str(Other)
-
- ## Override __hash__ function
- #
- # Use Path as key in hash table
- #
- def __hash__(self):
- return hash(self.Path)
-
- ## _GetFileKey
- #
- def _GetFileKey(self):
- if self._Key == None:
- self._Key = self.Path.upper()
- return self._Key
- ## Validate
- #
- def Validate(self, Type='', CaseSensitive=True):
- if GlobalData.gCASE_INSENSITIVE:
- CaseSensitive = False
- if Type and Type.lower() != self.Type:
- return ToolError.FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % \
- (self.File, Type, self.Type)
-
- RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
- if not RealRoot and not RealFile:
- RealFile = self.File
- if self.AlterRoot:
- RealFile = os.path.join(self.AlterRoot, self.File)
- elif self.Root:
- RealFile = os.path.join(self.Root, self.File)
- return ToolError.FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
-
- ErrorCode = 0
- ErrorInfo = ''
- if RealRoot != self.Root or RealFile != self.File:
- if CaseSensitive and (RealFile != self.File or \
- (RealRoot != self.Root and RealRoot != \
- self.AlterRoot)):
- ErrorCode = ToolError.FILE_CASE_MISMATCH
- ErrorInfo = self.File + '\n\t' + RealFile + \
- " [in file system]"
-
- self.SubDir, self.Name = os.path.split(RealFile)
- self.BaseName, self.Ext = os.path.splitext(self.Name)
- if self.SubDir:
- self.Dir = os.path.join(RealRoot, self.SubDir)
- else:
- self.Dir = RealRoot
- self.File = RealFile
- self.Root = RealRoot
- self.Path = os.path.join(RealRoot, RealFile)
- return ErrorCode, ErrorInfo
-
- Key = property(_GetFileKey)
-
-## Get current workspace
-#
-# get WORKSPACE from environment variable if present,if not use current working directory as WORKSPACE
-#
-def GetWorkspace():
- #
- # check WORKSPACE
- #
- if "WORKSPACE" in environ:
- WorkspaceDir = os.path.normpath(environ["WORKSPACE"])
- if not os.path.exists(WorkspaceDir):
- Logger.Error("UPT",
- ToolError.UPT_ENVIRON_MISSING_ERROR,
- ST.ERR_WORKSPACE_NOTEXIST,
- ExtraData="%s" % WorkspaceDir)
- else:
- WorkspaceDir = os.getcwd()
-
- if WorkspaceDir[-1] == ':':
- WorkspaceDir += os.sep
-
- PackagesPath = os.environ.get("PACKAGES_PATH")
- mws.setWs(WorkspaceDir, PackagesPath)
-
- return WorkspaceDir, mws.PACKAGES_PATH
-
-## Get relative path
-#
-# use full path and workspace to get relative path
-# the destination of this function is mainly to resolve the root path issue(like c: or c:\)
-#
-# @param Fullpath: a string of fullpath
-# @param Workspace: a string of workspace
-#
-def GetRelativePath(Fullpath, Workspace):
-
- RelativePath = ''
- if Workspace.endswith(os.sep):
- RelativePath = Fullpath[Fullpath.upper().find(Workspace.upper())+len(Workspace):]
- else:
- RelativePath = Fullpath[Fullpath.upper().find(Workspace.upper())+len(Workspace)+1:]
-
- return RelativePath
-
-## Check whether all module types are in list
-#
-# check whether all module types (SUP_MODULE_LIST) are in list
-#
-# @param ModuleList: a list of ModuleType
-#
-def IsAllModuleList(ModuleList):
- NewModuleList = [Module.upper() for Module in ModuleList]
- for Module in SUP_MODULE_LIST:
- if Module not in NewModuleList:
- return False
- else:
- return True
-
-## Dictionary that use comment(GenericComment, TailComment) as value,
-# if a new comment which key already in the dic is inserted, then the
-# comment will be merged.
-# Key is (Statement, SupArch), when TailComment is added, it will ident
-# according to Statement
-#
-class MergeCommentDict(dict):
- ## []= operator
- #
- def __setitem__(self, Key, CommentVal):
- GenericComment, TailComment = CommentVal
- if Key in self:
- OrigVal1, OrigVal2 = dict.__getitem__(self, Key)
- Statement = Key[0]
- dict.__setitem__(self, Key, (OrigVal1 + GenericComment, OrigVal2 \
- + len(Statement) * ' ' + TailComment))
- else:
- dict.__setitem__(self, Key, (GenericComment, TailComment))
-
- ## =[] operator
- #
- def __getitem__(self, Key):
- return dict.__getitem__(self, Key)
-
-
-## GenDummyHelpTextObj
-#
-# @retval HelpTxt: Generated dummy help text object
-#
-def GenDummyHelpTextObj():
- HelpTxt = TextObject()
- HelpTxt.SetLang(TAB_LANGUAGE_EN_US)
- HelpTxt.SetString(' ')
- return HelpTxt
-
-## ConvertVersionToDecimal, the minor version should be within 0 - 99
-# <HexVersion> ::= "0x" <Major> <Minor>
-# <Major> ::= (a-fA-F0-9){4}
-# <Minor> ::= (a-fA-F0-9){4}
-# <DecVersion> ::= (0-65535) ["." (0-99)]
-#
-# @param StringIn: The string contains version defined in INF file.
-# It can be Decimal or Hex
-#
-def ConvertVersionToDecimal(StringIn):
- if IsValidHexVersion(StringIn):
- Value = int(StringIn, 16)
- Major = Value >> 16
- Minor = Value & 0xFFFF
- MinorStr = str(Minor)
- if len(MinorStr) == 1:
- MinorStr = '0' + MinorStr
- return str(Major) + '.' + MinorStr
- else:
- if StringIn.find(TAB_SPLIT) != -1:
- return StringIn
- elif StringIn:
- return StringIn + '.0'
- else:
- #
- # when StringIn is '', return it directly
- #
- return StringIn
-
-## GetHelpStringByRemoveHashKey
-#
-# Remove hash key at the header of string and return the remain.
-#
-# @param String: The string need to be processed.
-#
-def GetHelpStringByRemoveHashKey(String):
- ReturnString = ''
- PattenRemoveHashKey = re.compile(r"^[#+\s]+", re.DOTALL)
- String = String.strip()
- if String == '':
- return String
-
- LineList = GetSplitValueList(String, END_OF_LINE)
- for Line in LineList:
- ValueList = PattenRemoveHashKey.split(Line)
- if len(ValueList) == 1:
- ReturnString += ValueList[0] + END_OF_LINE
- else:
- ReturnString += ValueList[1] + END_OF_LINE
-
- if ReturnString.endswith('\n') and not ReturnString.endswith('\n\n') and ReturnString != '\n':
- ReturnString = ReturnString[:-1]
-
- return ReturnString
-
-## ConvPathFromAbsToRel
-#
-# Get relative file path from absolute path.
-#
-# @param Path: The string contain file absolute path.
-# @param Root: The string contain the parent path of Path in.
-#
-#
-def ConvPathFromAbsToRel(Path, Root):
- Path = os.path.normpath(Path)
- Root = os.path.normpath(Root)
- FullPath = os.path.normpath(os.path.join(Root, Path))
-
- #
- # If Path is absolute path.
- # It should be in Root.
- #
- if os.path.isabs(Path):
- return FullPath[FullPath.find(Root) + len(Root) + 1:]
-
- else:
- return Path
-
-## ConvertPath
-#
-# Convert special characters to '_', '\' to '/'
-# return converted path: Test!1.inf -> Test_1.inf
-#
-# @param Path: Path to be converted
-#
-def ConvertPath(Path):
- RetPath = ''
- for Char in Path.strip():
- if Char.isalnum() or Char in '.-_/':
- RetPath = RetPath + Char
- elif Char == '\\':
- RetPath = RetPath + '/'
- else:
- RetPath = RetPath + '_'
- return RetPath
-
-## ConvertSpec
-#
-# during install, convert the Spec string extract from UPD into INF allowable definition,
-# the difference is period is allowed in the former (not the first letter) but not in the latter.
-# return converted Spec string
-#
-# @param SpecStr: SpecStr to be converted
-#
-def ConvertSpec(SpecStr):
- RetStr = ''
- for Char in SpecStr:
- if Char.isalnum() or Char == '_':
- RetStr = RetStr + Char
- else:
- RetStr = RetStr + '_'
-
- return RetStr
-
-
-## IsEqualList
-#
-# Judge two lists are identical(contain same item).
-# The rule is elements in List A are in List B and elements in List B are in List A.
-#
-# @param ListA, ListB Lists need to be judged.
-#
-# @return True ListA and ListB are identical
-# @return False ListA and ListB are different with each other
-#
-def IsEqualList(ListA, ListB):
- if ListA == ListB:
- return True
-
- for ItemA in ListA:
- if not ItemA in ListB:
- return False
-
- for ItemB in ListB:
- if not ItemB in ListA:
- return False
-
- return True
-
-## ConvertArchList
-#
-# Convert item in ArchList if the start character is lower case.
-# In UDP spec, Arch is only allowed as: [A-Z]([a-zA-Z0-9])*
-#
-# @param ArchList The ArchList need to be converted.
-#
-# @return NewList The ArchList been converted.
-#
-def ConvertArchList(ArchList):
- NewArchList = []
- if not ArchList:
- return NewArchList
-
- if type(ArchList) == list:
- for Arch in ArchList:
- Arch = Arch.upper()
- NewArchList.append(Arch)
- elif type(ArchList) == str:
- ArchList = ArchList.upper()
- NewArchList.append(ArchList)
-
- return NewArchList
-
-## ProcessLineExtender
-#
-# Process the LineExtender of Line in LineList.
-# If one line ends with a line extender, then it will be combined together with next line.
-#
-# @param LineList The LineList need to be processed.
-#
-# @return NewList The ArchList been processed.
-#
-def ProcessLineExtender(LineList):
- NewList = []
- Count = 0
- while Count < len(LineList):
- if LineList[Count].strip().endswith("\\") and Count + 1 < len(LineList):
- NewList.append(LineList[Count].strip()[:-2] + LineList[Count + 1])
- Count = Count + 1
- else:
- NewList.append(LineList[Count])
-
- Count = Count + 1
-
- return NewList
-
-## ProcessEdkComment
-#
-# Process EDK style comment in LineList: c style /* */ comment or cpp style // comment
-#
-#
-# @param LineList The LineList need to be processed.
-#
-# @return LineList The LineList been processed.
-# @return FirstPos Where Edk comment is first found, -1 if not found
-#
-def ProcessEdkComment(LineList):
- FindEdkBlockComment = False
- Count = 0
- StartPos = -1
- EndPos = -1
- FirstPos = -1
-
- while(Count < len(LineList)):
- Line = LineList[Count].strip()
- if Line.startswith("/*"):
- #
- # handling c style comment
- #
- StartPos = Count
- while Count < len(LineList):
- Line = LineList[Count].strip()
- if Line.endswith("*/"):
- if (Count == StartPos) and Line.strip() == '/*/':
- Count = Count + 1
- continue
- EndPos = Count
- FindEdkBlockComment = True
- break
- Count = Count + 1
-
- if FindEdkBlockComment:
- if FirstPos == -1:
- FirstPos = StartPos
- for Index in xrange(StartPos, EndPos+1):
- LineList[Index] = ''
- FindEdkBlockComment = False
- elif Line.find("//") != -1 and not Line.startswith("#"):
- #
- # handling cpp style comment
- #
- LineList[Count] = Line.replace("//", '#')
- if FirstPos == -1:
- FirstPos = Count
-
- Count = Count + 1
-
- return LineList, FirstPos
-
-## GetLibInstanceInfo
-#
-# Get the information from Library Instance INF file.
-#
-# @param string. A string start with # and followed by INF file path
-# @param WorkSpace. The WorkSpace directory used to combined with INF file path.
-#
-# @return GUID, Version
-def GetLibInstanceInfo(String, WorkSpace, LineNo):
-
- FileGuidString = ""
- VerString = ""
-
- OrignalString = String
- String = String.strip()
- if not String:
- return None, None
- #
- # Remove "#" characters at the beginning
- #
- String = GetHelpStringByRemoveHashKey(String)
- String = String.strip()
-
- #
- # Validate file name exist.
- #
- FullFileName = os.path.normpath(os.path.realpath(os.path.join(WorkSpace, String)))
- if not (ValidFile(FullFileName)):
- Logger.Error("InfParser",
- ToolError.FORMAT_INVALID,
- ST.ERR_FILELIST_EXIST % (String),
- File=GlobalData.gINF_MODULE_NAME,
- Line=LineNo,
- ExtraData=OrignalString)
-
- #
- # Validate file exist/format.
- #
- if IsValidPath(String, WorkSpace):
- IsValidFileFlag = True
- else:
- Logger.Error("InfParser",
- ToolError.FORMAT_INVALID,
- ST.ERR_INF_PARSER_FILE_NOT_EXIST_OR_NAME_INVALID % (String),
- File=GlobalData.gINF_MODULE_NAME,
- Line=LineNo,
- ExtraData=OrignalString)
- return False
- if IsValidFileFlag:
- FileLinesList = []
-
- try:
- FInputfile = open(FullFileName, "rb", 0)
- try:
- FileLinesList = FInputfile.readlines()
- except BaseException:
- Logger.Error("InfParser",
- ToolError.FILE_READ_FAILURE,
- ST.ERR_FILE_OPEN_FAILURE,
- File=FullFileName)
- finally:
- FInputfile.close()
- except BaseException:
- Logger.Error("InfParser",
- ToolError.FILE_READ_FAILURE,
- ST.ERR_FILE_OPEN_FAILURE,
- File=FullFileName)
-
- ReFileGuidPattern = re.compile("^\s*FILE_GUID\s*=.*$")
- ReVerStringPattern = re.compile("^\s*VERSION_STRING\s*=.*$")
-
- FileLinesList = ProcessLineExtender(FileLinesList)
-
- for Line in FileLinesList:
- if ReFileGuidPattern.match(Line):
- FileGuidString = Line
- if ReVerStringPattern.match(Line):
- VerString = Line
-
- if FileGuidString:
- FileGuidString = GetSplitValueList(FileGuidString, '=', 1)[1]
- if VerString:
- VerString = GetSplitValueList(VerString, '=', 1)[1]
-
- return FileGuidString, VerString
-
-## GetLocalValue
-#
-# Generate the local value for INF and DEC file. If Lang attribute not present, then use this value.
-# If present, and there is no element without the Lang attribute, and one of the elements has the rfc1766 code is
-# "en-x-tianocore", or "en-US" if "en-x-tianocore" was not found, or "en" if "en-US" was not found, or startswith 'en'
-# if 'en' was not found, then use this value.
-# If multiple entries of a tag exist which have the same language code, use the last entry.
-#
-# @param ValueList A list need to be processed.
-# @param UseFirstValue: True to use the first value, False to use the last value
-#
-# @return LocalValue
-def GetLocalValue(ValueList, UseFirstValue=False):
- Value1 = ''
- Value2 = ''
- Value3 = ''
- Value4 = ''
- Value5 = ''
- for (Key, Value) in ValueList:
- if Key == TAB_LANGUAGE_EN_X:
- if UseFirstValue:
- if not Value1:
- Value1 = Value
- else:
- Value1 = Value
- if Key == TAB_LANGUAGE_EN_US:
- if UseFirstValue:
- if not Value2:
- Value2 = Value
- else:
- Value2 = Value
- if Key == TAB_LANGUAGE_EN:
- if UseFirstValue:
- if not Value3:
- Value3 = Value
- else:
- Value3 = Value
- if Key.startswith(TAB_LANGUAGE_EN):
- if UseFirstValue:
- if not Value4:
- Value4 = Value
- else:
- Value4 = Value
- if Key == '':
- if UseFirstValue:
- if not Value5:
- Value5 = Value
- else:
- Value5 = Value
-
- if Value1:
- return Value1
- if Value2:
- return Value2
- if Value3:
- return Value3
- if Value4:
- return Value4
- if Value5:
- return Value5
-
- return ''
-
-
-## GetCharIndexOutStr
-#
-# Get comment character index outside a string
-#
-# @param Line: The string to be checked
-# @param CommentCharacter: Comment char, used to ignore comment content
-#
-# @retval Index
-#
-def GetCharIndexOutStr(CommentCharacter, Line):
- #
- # remove whitespace
- #
- Line = Line.strip()
-
- #
- # Check whether comment character is in a string
- #
- InString = False
- for Index in range(0, len(Line)):
- if Line[Index] == '"':
- InString = not InString
- elif Line[Index] == CommentCharacter and InString :
- pass
- elif Line[Index] == CommentCharacter and (Index +1) < len(Line) and Line[Index+1] == CommentCharacter \
- and not InString :
- return Index
- return -1
-
-## ValidateUNIFilePath
-#
-# Check the UNI file path
-#
-# @param FilePath: The UNI file path
-#
-def ValidateUNIFilePath(Path):
- Suffix = Path[Path.rfind(TAB_SPLIT):]
-
- #
- # Check if the suffix is one of the '.uni', '.UNI', '.Uni'
- #
- if Suffix not in TAB_UNI_FILE_SUFFIXS:
- Logger.Error("Unicode File Parser",
- ToolError.FORMAT_INVALID,
- Message=ST.ERR_UNI_FILE_SUFFIX_WRONG,
- ExtraData=Path)
-
- #
- # Check if '..' in the file name(without suffixe)
- #
- if (TAB_SPLIT + TAB_SPLIT) in Path:
- Logger.Error("Unicode File Parser",
- ToolError.FORMAT_INVALID,
- Message=ST.ERR_UNI_FILE_NAME_INVALID,
- ExtraData=Path)
-
- #
- # Check if the file name is valid according to the DEC and INF specification
- #
- Pattern = '[a-zA-Z0-9_][a-zA-Z0-9_\-\.]*'
- FileName = Path.replace(Suffix, '')
- InvalidCh = re.sub(Pattern, '', FileName)
- if InvalidCh:
- Logger.Error("Unicode File Parser",
- ToolError.FORMAT_INVALID,
- Message=ST.ERR_INF_PARSER_FILE_NOT_EXIST_OR_NAME_INVALID,
- ExtraData=Path)
-
diff --git a/BaseTools/Source/Python/UPT/Library/ParserValidate.py b/BaseTools/Source/Python/UPT/Library/ParserValidate.py
deleted file mode 100644
index 028cf9a54f..0000000000
--- a/BaseTools/Source/Python/UPT/Library/ParserValidate.py
+++ /dev/null
@@ -1,733 +0,0 @@
-## @file ParserValidate.py
-# Functions for parser validation
-#
-# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License which accompanies this
-# distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-'''
-PaserValidate
-'''
-
-import os.path
-import re
-import platform
-
-from Library.DataType import MODULE_LIST
-from Library.DataType import COMPONENT_TYPE_LIST
-from Library.DataType import PCD_USAGE_TYPE_LIST_OF_MODULE
-from Library.DataType import TAB_SPACE_SPLIT
-from Library.String import GetSplitValueList
-from Library.ExpressionValidate import IsValidBareCString
-from Library.ExpressionValidate import IsValidFeatureFlagExp
-from Common.MultipleWorkspace import MultipleWorkspace as mws
-
-## __HexDigit() method
-#
-# Whether char input is a Hex data bit
-#
-# @param TempChar: The char to test
-#
-def __HexDigit(TempChar):
- if (TempChar >= 'a' and TempChar <= 'f') or \
- (TempChar >= 'A' and TempChar <= 'F') \
- or (TempChar >= '0' and TempChar <= '9'):
- return True
- else:
- return False
-
-## IsValidHex() method
-#
-# Whether char input is a Hex data.
-#
-# @param TempChar: The char to test
-#
-def IsValidHex(HexStr):
- if not HexStr.upper().startswith("0X"):
- return False
- CharList = [c for c in HexStr[2:] if not __HexDigit(c)]
- if len(CharList) == 0:
- return True
- else:
- return False
-
-## Judge the input string is valid bool type or not.
-#
-# <TRUE> ::= {"TRUE"} {"true"} {"True"} {"0x1"} {"0x01"}
-# <FALSE> ::= {"FALSE"} {"false"} {"False"} {"0x0"} {"0x00"}
-# <BoolType> ::= {<TRUE>} {<FALSE>}
-#
-# @param BoolString: A string contained the value need to be judged.
-#
-def IsValidBoolType(BoolString):
- #
- # Valid Ture
- #
- if BoolString == 'TRUE' or \
- BoolString == 'True' or \
- BoolString == 'true' or \
- BoolString == '0x1' or \
- BoolString == '0x01':
- return True
- #
- # Valid False
- #
- elif BoolString == 'FALSE' or \
- BoolString == 'False' or \
- BoolString == 'false' or \
- BoolString == '0x0' or \
- BoolString == '0x00':
- return True
- #
- # Invalid bool type
- #
- else:
- return False
-
-## Is Valid Module Type List or not
-#
-# @param ModuleTypeList: A list contain ModuleType strings need to be
-# judged.
-#
-def IsValidInfMoudleTypeList(ModuleTypeList):
- for ModuleType in ModuleTypeList:
- return IsValidInfMoudleType(ModuleType)
-
-## Is Valid Module Type or not
-#
-# @param ModuleType: A string contain ModuleType need to be judged.
-#
-def IsValidInfMoudleType(ModuleType):
- if ModuleType in MODULE_LIST:
- return True
- else:
- return False
-
-## Is Valid Component Type or not
-#
-# @param ComponentType: A string contain ComponentType need to be judged.
-#
-def IsValidInfComponentType(ComponentType):
- if ComponentType.upper() in COMPONENT_TYPE_LIST:
- return True
- else:
- return False
-
-
-## Is valid Tool Family or not
-#
-# @param ToolFamily: A string contain Tool Family need to be judged.
-# Famlily := [A-Z]([a-zA-Z0-9])*
-#
-def IsValidToolFamily(ToolFamily):
- ReIsValieFamily = re.compile(r"^[A-Z]+[A-Za-z0-9]{0,}$", re.DOTALL)
- if ReIsValieFamily.match(ToolFamily) == None:
- return False
- return True
-
-## Is valid Tool TagName or not
-#
-# The TagName sample is MYTOOLS and VS2005.
-#
-# @param TagName: A string contain Tool TagName need to be judged.
-#
-def IsValidToolTagName(TagName):
- if TagName.strip() == '':
- return True
- if TagName.strip() == '*':
- return True
- if not IsValidWord(TagName):
- return False
- return True
-
-## Is valid arch or not
-#
-# @param Arch The arch string need to be validated
-# <OA> ::= (a-zA-Z)(A-Za-z0-9){0,}
-# <arch> ::= {"IA32"} {"X64"} {"IPF"} {"EBC"} {<OA>}
-# {"common"}
-# @param Arch: Input arch
-#
-def IsValidArch(Arch):
- if Arch == 'common':
- return True
- ReIsValieArch = re.compile(r"^[a-zA-Z]+[a-zA-Z0-9]{0,}$", re.DOTALL)
- if ReIsValieArch.match(Arch) == None:
- return False
- return True
-
-## Is valid family or not
-#
-# <Family> ::= {"MSFT"} {"GCC"} {"INTEL"} {<Usr>} {"*"}
-# <Usr> ::= [A-Z][A-Za-z0-9]{0,}
-#
-# @param family: The family string need to be validated
-#
-def IsValidFamily(Family):
- Family = Family.strip()
- if Family == '*':
- return True
-
- if Family == '':
- return True
-
- ReIsValidFamily = re.compile(r"^[A-Z]+[A-Za-z0-9]{0,}$", re.DOTALL)
- if ReIsValidFamily.match(Family) == None:
- return False
- return True
-
-## Is valid build option name or not
-#
-# @param BuildOptionName: The BuildOptionName string need to be validated
-#
-def IsValidBuildOptionName(BuildOptionName):
- if not BuildOptionName:
- return False
-
- ToolOptionList = GetSplitValueList(BuildOptionName, '_', 4)
-
- if len(ToolOptionList) != 5:
- return False
-
- ReIsValidBuildOption1 = re.compile(r"^\s*(\*)|([A-Z][a-zA-Z0-9]*)$")
- ReIsValidBuildOption2 = re.compile(r"^\s*(\*)|([a-zA-Z][a-zA-Z0-9]*)$")
-
- if ReIsValidBuildOption1.match(ToolOptionList[0]) == None:
- return False
-
- if ReIsValidBuildOption1.match(ToolOptionList[1]) == None:
- return False
-
- if ReIsValidBuildOption2.match(ToolOptionList[2]) == None:
- return False
-
- if ToolOptionList[3] == "*" and ToolOptionList[4] not in ['FAMILY', 'DLL', 'DPATH']:
- return False
-
- return True
-
-## IsValidToken
-#
-# Check if pattern string matches total token
-#
-# @param ReString: regular string
-# @param Token: Token to be matched
-#
-def IsValidToken(ReString, Token):
- Match = re.compile(ReString).match(Token)
- return Match and Match.start() == 0 and Match.end() == len(Token)
-
-## IsValidPath
-#
-# Check if path exist
-#
-# @param Path: Absolute path or relative path to be checked
-# @param Root: Root path
-#
-def IsValidPath(Path, Root):
- Path = Path.strip()
- OrigPath = Path.replace('\\', '/')
-
- Path = os.path.normpath(Path).replace('\\', '/')
- Root = os.path.normpath(Root).replace('\\', '/')
- FullPath = mws.join(Root, Path)
-
- if not os.path.exists(FullPath):
- return False
-
- #
- # If Path is absolute path.
- # It should be in Root.
- #
- if os.path.isabs(Path):
- if not Path.startswith(Root):
- return False
- return True
-
- #
- # Check illegal character
- #
- for Rel in ['/', './', '../']:
- if OrigPath.startswith(Rel):
- return False
- for Rel in ['//', '/./', '/../']:
- if Rel in OrigPath:
- return False
- for Rel in ['/.', '/..', '/']:
- if OrigPath.endswith(Rel):
- return False
-
- Path = Path.rstrip('/')
-
- #
- # Check relative path
- #
- for Word in Path.split('/'):
- if not IsValidWord(Word):
- return False
-
- return True
-
-## IsValidInstallPath
-#
-# Check if an install path valid or not.
-#
-# Absolute path or path starts with '.' or path contains '..' are invalid.
-#
-# @param Path: path to be checked
-#
-def IsValidInstallPath(Path):
- if platform.platform().find("Windows") >= 0:
- if os.path.isabs(Path):
- return False
- else:
- if Path[1:2] == ':':
- return False
- if os.path.isabs(Path):
- return False
- if Path.startswith('.'):
- return False
-
- if Path.find('..') != -1:
- return False
-
- return True
-
-
-## IsValidCFormatGuid
-#
-# Check if GUID format has the from of {8,4,4,{2,2,2,2,2,2,2,2}}
-#
-# @param Guid: Guid to be checked
-#
-def IsValidCFormatGuid(Guid):
- #
- # Valid: { 0xf0b11735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
- # 0xaf, 0x48, 0xce }}
- # Invalid: { 0xf0b11735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
- # 0xaf, 0x48, 0xce }} 0x123
- # Invalid: { 0xf0b1 1735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
- # 0xaf, 0x48, 0xce }}
- #
- List = ['{', 10, ',', 6, ',', 6, ',{', 4, ',', 4, ',', 4,
- ',', 4, ',', 4, ',', 4, ',', 4, ',', 4, '}}']
- Index = 0
- Value = ''
- SepValue = ''
- for Char in Guid:
- if Char not in '{},\t ':
- Value += Char
- continue
- if Value:
- try:
- #
- # Index may out of bound
- #
- if not SepValue or SepValue != List[Index]:
- return False
- Index += 1
- SepValue = ''
-
- if not Value.startswith('0x') and not Value.startswith('0X'):
- return False
-
- #
- # Index may out of bound
- #
- if type(List[Index]) != type(1) or \
- len(Value) > List[Index] or len(Value) < 3:
- return False
-
- #
- # Check if string can be converted to integer
- # Throw exception if not
- #
- int(Value, 16)
- except BaseException:
- #
- # Exception caught means invalid format
- #
- return False
- Value = ''
- Index += 1
- if Char in '{},':
- SepValue += Char
-
- return SepValue == '}}' and Value == ''
-
-## IsValidPcdType
-#
-# Check whether the PCD type is valid
-#
-# @param PcdTypeString: The PcdType string need to be checked.
-#
-def IsValidPcdType(PcdTypeString):
- if PcdTypeString.upper() in PCD_USAGE_TYPE_LIST_OF_MODULE:
- return True
- else:
- return False
-
-## IsValidWord
-#
-# Check whether the word is valid.
-# <Word> ::= (a-zA-Z0-9_)(a-zA-Z0-9_-){0,} Alphanumeric characters with
-# optional
-# dash "-" and/or underscore "_" characters. No whitespace
-# characters are permitted.
-#
-# @param Word: The word string need to be checked.
-#
-def IsValidWord(Word):
- if not Word:
- return False
- #
- # The first char should be alpha, _ or Digit.
- #
- if not Word[0].isalnum() and \
- not Word[0] == '_' and \
- not Word[0].isdigit():
- return False
-
- LastChar = ''
- for Char in Word[1:]:
- if (not Char.isalpha()) and \
- (not Char.isdigit()) and \
- Char != '-' and \
- Char != '_' and \
- Char != '.':
- return False
- if Char == '.' and LastChar == '.':
- return False
- LastChar = Char
-
- return True
-
-
-## IsValidSimpleWord
-#
-# Check whether the SimpleWord is valid.
-# <SimpleWord> ::= (a-zA-Z0-9)(a-zA-Z0-9_-){0,}
-# A word that cannot contain a period character.
-#
-# @param Word: The word string need to be checked.
-#
-def IsValidSimpleWord(Word):
- ReIsValidSimpleWord = \
- re.compile(r"^[0-9A-Za-z][0-9A-Za-z\-_]*$", re.DOTALL)
- Word = Word.strip()
- if not Word:
- return False
-
- if not ReIsValidSimpleWord.match(Word):
- return False
-
- return True
-
-## IsValidDecVersion
-#
-# Check whether the decimal version is valid.
-# <DecVersion> ::= (0-9){1,} ["." (0-9){1,}]
-#
-# @param Word: The word string need to be checked.
-#
-def IsValidDecVersion(Word):
- if Word.find('.') > -1:
- ReIsValidDecVersion = re.compile(r"[0-9]+\.?[0-9]+$")
- else:
- ReIsValidDecVersion = re.compile(r"[0-9]+$")
- if ReIsValidDecVersion.match(Word) == None:
- return False
- return True
-
-## IsValidHexVersion
-#
-# Check whether the hex version is valid.
-# <HexVersion> ::= "0x" <Major> <Minor>
-# <Major> ::= <HexDigit>{4}
-# <Minor> ::= <HexDigit>{4}
-#
-# @param Word: The word string need to be checked.
-#
-def IsValidHexVersion(Word):
- ReIsValidHexVersion = re.compile(r"[0][xX][0-9A-Fa-f]{8}$", re.DOTALL)
- if ReIsValidHexVersion.match(Word) == None:
- return False
-
- return True
-
-## IsValidBuildNumber
-#
-# Check whether the BUILD_NUMBER is valid.
-# ["BUILD_NUMBER" "=" <Integer>{1,4} <EOL>]
-#
-# @param Word: The BUILD_NUMBER string need to be checked.
-#
-def IsValidBuildNumber(Word):
- ReIsValieBuildNumber = re.compile(r"[0-9]{1,4}$", re.DOTALL)
- if ReIsValieBuildNumber.match(Word) == None:
- return False
-
- return True
-
-## IsValidDepex
-#
-# Check whether the Depex is valid.
-#
-# @param Word: The Depex string need to be checked.
-#
-def IsValidDepex(Word):
- Index = Word.upper().find("PUSH")
- if Index > -1:
- return IsValidCFormatGuid(Word[Index+4:].strip())
-
- ReIsValidCName = re.compile(r"^[A-Za-z_][0-9A-Za-z_\s\.]*$", re.DOTALL)
- if ReIsValidCName.match(Word) == None:
- return False
-
- return True
-
-## IsValidNormalizedString
-#
-# Check
-# <NormalizedString> ::= <DblQuote> [{<Word>} {<Space>}]{1,} <DblQuote>
-# <Space> ::= 0x20
-#
-# @param String: string to be checked
-#
-def IsValidNormalizedString(String):
- if String == '':
- return True
-
- for Char in String:
- if Char == '\t':
- return False
-
- StringList = GetSplitValueList(String, TAB_SPACE_SPLIT)
-
- for Item in StringList:
- if not Item:
- continue
- if not IsValidWord(Item):
- return False
-
- return True
-
-## IsValidIdString
-#
-# Check whether the IdString is valid.
-#
-# @param IdString: The IdString need to be checked.
-#
-def IsValidIdString(String):
- if IsValidSimpleWord(String.strip()):
- return True
-
- if String.strip().startswith('"') and \
- String.strip().endswith('"'):
- String = String[1:-1]
- if String.strip() == "":
- return True
- if IsValidNormalizedString(String):
- return True
-
- return False
-
-## IsValidVersionString
-#
-# Check whether the VersionString is valid.
-# <AsciiString> ::= [ [<WhiteSpace>]{0,} [<AsciiChars>]{0,} ] {0,}
-# <WhiteSpace> ::= {<Tab>} {<Space>}
-# <Tab> ::= 0x09
-# <Space> ::= 0x20
-# <AsciiChars> ::= (0x21 - 0x7E)
-#
-# @param VersionString: The VersionString need to be checked.
-#
-def IsValidVersionString(VersionString):
- VersionString = VersionString.strip()
- for Char in VersionString:
- if not (Char >= 0x21 and Char <= 0x7E):
- return False
-
- return True
-
-## IsValidPcdValue
-#
-# Check whether the PcdValue is valid.
-#
-# @param VersionString: The PcdValue need to be checked.
-#
-def IsValidPcdValue(PcdValue):
- for Char in PcdValue:
- if Char == '\n' or Char == '\t' or Char == '\f':
- return False
-
- #
- # <Boolean>
- #
- if IsValidFeatureFlagExp(PcdValue, True)[0]:
- return True
-
- #
- # <Number> ::= {<Integer>} {<HexNumber>}
- # <Integer> ::= {(0-9)} {(1-9)(0-9){1,}}
- # <HexNumber> ::= "0x" <HexDigit>{1,}
- # <HexDigit> ::= (a-fA-F0-9)
- #
- if IsValidHex(PcdValue):
- return True
-
- ReIsValidIntegerSingle = re.compile(r"^\s*[0-9]\s*$", re.DOTALL)
- if ReIsValidIntegerSingle.match(PcdValue) != None:
- return True
-
- ReIsValidIntegerMulti = re.compile(r"^\s*[1-9][0-9]+\s*$", re.DOTALL)
- if ReIsValidIntegerMulti.match(PcdValue) != None:
- return True
-
- #
- # <StringVal> ::= {<StringType>} {<Array>} {"$(" <MACRO> ")"}
- # <StringType> ::= {<UnicodeString>} {<CString>}
- #
- ReIsValidStringType = re.compile(r"^\s*[\"L].*[\"]\s*$")
- if ReIsValidStringType.match(PcdValue):
- IsTrue = False
- if PcdValue.strip().startswith('L\"'):
- StringValue = PcdValue.strip().lstrip('L\"').rstrip('\"')
- if IsValidBareCString(StringValue):
- IsTrue = True
- elif PcdValue.strip().startswith('\"'):
- StringValue = PcdValue.strip().lstrip('\"').rstrip('\"')
- if IsValidBareCString(StringValue):
- IsTrue = True
- if IsTrue:
- return IsTrue
-
- #
- # <Array> ::= {<CArray>} {<NList>} {<CFormatGUID>}
- # <CArray> ::= "{" [<NList>] <CArray>{0,} "}"
- # <NList> ::= <HexByte> ["," <HexByte>]{0,}
- # <HexDigit> ::= (a-fA-F0-9)
- # <HexByte> ::= "0x" <HexDigit>{1,2}
- #
- if IsValidCFormatGuid(PcdValue):
- return True
-
- ReIsValidByteHex = re.compile(r"^\s*0x[0-9a-fA-F]{1,2}\s*$", re.DOTALL)
- if PcdValue.strip().startswith('{') and PcdValue.strip().endswith('}') :
- StringValue = PcdValue.strip().lstrip('{').rstrip('}')
- ValueList = StringValue.split(',')
- AllValidFlag = True
- for ValueItem in ValueList:
- if not ReIsValidByteHex.match(ValueItem.strip()):
- AllValidFlag = False
-
- if AllValidFlag:
- return True
-
- #
- # NList
- #
- AllValidFlag = True
- ValueList = PcdValue.split(',')
- for ValueItem in ValueList:
- if not ReIsValidByteHex.match(ValueItem.strip()):
- AllValidFlag = False
-
- if AllValidFlag:
- return True
-
- return False
-
-## IsValidCVariableName
-#
-# Check whether the PcdValue is valid.
-#
-# @param VersionString: The PcdValue need to be checked.
-#
-def IsValidCVariableName(CName):
- ReIsValidCName = re.compile(r"^[A-Za-z_][0-9A-Za-z_]*$", re.DOTALL)
- if ReIsValidCName.match(CName) == None:
- return False
-
- return True
-
-## IsValidIdentifier
-#
-# <Identifier> ::= <NonDigit> <Chars>{0,}
-# <Chars> ::= (a-zA-Z0-9_)
-# <NonDigit> ::= (a-zA-Z_)
-#
-# @param Ident: identifier to be checked
-#
-def IsValidIdentifier(Ident):
- ReIdent = re.compile(r"^[A-Za-z_][0-9A-Za-z_]*$", re.DOTALL)
- if ReIdent.match(Ident) == None:
- return False
-
- return True
-
-## IsValidDecVersionVal
-#
-# {(0-9){1,} "." (0-99)}
-#
-# @param Ver: version to be checked
-#
-def IsValidDecVersionVal(Ver):
- ReVersion = re.compile(r"[0-9]+(\.[0-9]{1,2})$")
-
- if ReVersion.match(Ver) == None:
- return False
-
- return True
-
-
-## IsValidLibName
-#
-# (A-Z)(a-zA-Z0-9){0,} and could not be "NULL"
-#
-def IsValidLibName(LibName):
- if LibName == 'NULL':
- return False
- ReLibName = re.compile("^[A-Z]+[a-zA-Z0-9]*$")
- if not ReLibName.match(LibName):
- return False
-
- return True
-
-# IsValidUserId
-#
-# <UserId> ::= (a-zA-Z)(a-zA-Z0-9_.){0,}
-# Words that contain period "." must be encapsulated in double quotation marks.
-#
-def IsValidUserId(UserId):
- UserId = UserId.strip()
- Quoted = False
- if UserId.startswith('"') and UserId.endswith('"'):
- Quoted = True
- UserId = UserId[1:-1]
- if not UserId or not UserId[0].isalpha():
- return False
- for Char in UserId[1:]:
- if not Char.isalnum() and not Char in '_.':
- return False
- if Char == '.' and not Quoted:
- return False
- return True
-
-#
-# Check if a UTF16-LE file has a BOM header
-#
-def CheckUTF16FileHeader(File):
- FileIn = open(File, 'rb').read(2)
- if FileIn != '\xff\xfe':
- return False
-
- return True
diff --git a/BaseTools/Source/Python/UPT/Library/Parsing.py b/BaseTools/Source/Python/UPT/Library/Parsing.py
deleted file mode 100644
index c34e775144..0000000000
--- a/BaseTools/Source/Python/UPT/Library/Parsing.py
+++ /dev/null
@@ -1,1020 +0,0 @@
-## @file
-# This file is used to define common parsing related functions used in parsing
-# INF/DEC/DSC process
-#
-# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License which accompanies this
-# distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-'''
-Parsing
-'''
-
-##
-# Import Modules
-#
-import os.path
-import re
-
-from Library.String import RaiseParserError
-from Library.String import GetSplitValueList
-from Library.String import CheckFileType
-from Library.String import CheckFileExist
-from Library.String import CleanString
-from Library.String import NormPath
-
-from Logger.ToolError import FILE_NOT_FOUND
-from Logger.ToolError import FatalError
-from Logger.ToolError import FORMAT_INVALID
-
-from Library import DataType
-
-from Library.Misc import GuidStructureStringToGuidString
-from Library.Misc import CheckGuidRegFormat
-from Logger import StringTable as ST
-import Logger.Log as Logger
-
-from Parser.DecParser import Dec
-import GlobalData
-
-gPKG_INFO_DICT = {}
-
-## GetBuildOption
-#
-# Parse a string with format "[<Family>:]<ToolFlag>=Flag"
-# Return (Family, ToolFlag, Flag)
-#
-# @param String: String with BuildOption statement
-# @param File: The file which defines build option, used in error report
-#
-def GetBuildOption(String, File, LineNo= -1):
- (Family, ToolChain, Flag) = ('', '', '')
- if String.find(DataType.TAB_EQUAL_SPLIT) < 0:
- RaiseParserError(String, 'BuildOptions', File, \
- '[<Family>:]<ToolFlag>=Flag', LineNo)
- else:
- List = GetSplitValueList(String, DataType.TAB_EQUAL_SPLIT, MaxSplit=1)
- if List[0].find(':') > -1:
- Family = List[0][ : List[0].find(':')].strip()
- ToolChain = List[0][List[0].find(':') + 1 : ].strip()
- else:
- ToolChain = List[0].strip()
- Flag = List[1].strip()
- return (Family, ToolChain, Flag)
-
-## Get Library Class
-#
-# Get Library of Dsc as <LibraryClassKeyWord>|<LibraryInstance>
-#
-# @param Item: String as <LibraryClassKeyWord>|<LibraryInstance>
-# @param ContainerFile: The file which describes the library class, used for
-# error report
-#
-def GetLibraryClass(Item, ContainerFile, WorkspaceDir, LineNo= -1):
- List = GetSplitValueList(Item[0])
- SupMod = DataType.SUP_MODULE_LIST_STRING
- if len(List) != 2:
- RaiseParserError(Item[0], 'LibraryClasses', ContainerFile, \
- '<LibraryClassKeyWord>|<LibraryInstance>')
- else:
- CheckFileType(List[1], '.Inf', ContainerFile, \
- 'library class instance', Item[0], LineNo)
- CheckFileExist(WorkspaceDir, List[1], ContainerFile, \
- 'LibraryClasses', Item[0], LineNo)
- if Item[1] != '':
- SupMod = Item[1]
-
- return (List[0], List[1], SupMod)
-
-## Get Library Class
-#
-# Get Library of Dsc as <LibraryClassKeyWord>[|<LibraryInstance>]
-# [|<TokenSpaceGuidCName>.<PcdCName>]
-#
-# @param Item: String as <LibraryClassKeyWord>|<LibraryInstance>
-# @param ContainerFile: The file which describes the library class, used for
-# error report
-#
-def GetLibraryClassOfInf(Item, ContainerFile, WorkspaceDir, LineNo= -1):
- ItemList = GetSplitValueList((Item[0] + DataType.TAB_VALUE_SPLIT * 2))
- SupMod = DataType.SUP_MODULE_LIST_STRING
-
- if len(ItemList) > 5:
- RaiseParserError\
- (Item[0], 'LibraryClasses', ContainerFile, \
- '<LibraryClassKeyWord>[|<LibraryInstance>]\
- [|<TokenSpaceGuidCName>.<PcdCName>]')
- else:
- CheckFileType(ItemList[1], '.Inf', ContainerFile, 'LibraryClasses', \
- Item[0], LineNo)
- CheckFileExist(WorkspaceDir, ItemList[1], ContainerFile, \
- 'LibraryClasses', Item[0], LineNo)
- if ItemList[2] != '':
- CheckPcdTokenInfo(ItemList[2], 'LibraryClasses', \
- ContainerFile, LineNo)
- if Item[1] != '':
- SupMod = Item[1]
-
- return (ItemList[0], ItemList[1], ItemList[2], SupMod)
-
-## CheckPcdTokenInfo
-#
-# Check if PcdTokenInfo is following <TokenSpaceGuidCName>.<PcdCName>
-#
-# @param TokenInfoString: String to be checked
-# @param Section: Used for error report
-# @param File: Used for error report
-#
-def CheckPcdTokenInfo(TokenInfoString, Section, File, LineNo= -1):
- Format = '<TokenSpaceGuidCName>.<PcdCName>'
- if TokenInfoString != '' and TokenInfoString != None:
- TokenInfoList = GetSplitValueList(TokenInfoString, DataType.TAB_SPLIT)
- if len(TokenInfoList) == 2:
- return True
-
- RaiseParserError(TokenInfoString, Section, File, Format, LineNo)
-
-## Get Pcd
-#
-# Get Pcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<Value>
-# [|<Type>|<MaximumDatumSize>]
-#
-# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
-# <Value>[|<Type>|<MaximumDatumSize>]
-# @param ContainerFile: The file which describes the pcd, used for error
-# report
-
-#
-def GetPcd(Item, Type, ContainerFile, LineNo= -1):
- TokenGuid, TokenName, Value, MaximumDatumSize, Token = '', '', '', '', ''
- List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
-
- if len(List) < 4 or len(List) > 6:
- RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
- '<PcdTokenSpaceGuidCName>.<TokenCName>|<Value>\
- [|<Type>|<MaximumDatumSize>]', LineNo)
- else:
- Value = List[1]
- MaximumDatumSize = List[2]
- Token = List[3]
-
- if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
- (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
-
- return (TokenName, TokenGuid, Value, MaximumDatumSize, Token, Type)
-
-## Get FeatureFlagPcd
-#
-# Get FeatureFlagPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE
-#
-# @param Item: String as <PcdTokenSpaceGuidCName>
-# .<TokenCName>|TRUE/FALSE
-# @param ContainerFile: The file which describes the pcd, used for error
-# report
-#
-def GetFeatureFlagPcd(Item, Type, ContainerFile, LineNo= -1):
- TokenGuid, TokenName, Value = '', '', ''
- List = GetSplitValueList(Item)
- if len(List) != 2:
- RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
- '<PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE', \
- LineNo)
- else:
- Value = List[1]
- if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
- (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
-
- return (TokenName, TokenGuid, Value, Type)
-
-## Get DynamicDefaultPcd
-#
-# Get DynamicDefaultPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>
-# |<Value>[|<DatumTyp>[|<MaxDatumSize>]]
-#
-# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
-# TRUE/FALSE
-# @param ContainerFile: The file which describes the pcd, used for error
-# report
-#
-def GetDynamicDefaultPcd(Item, Type, ContainerFile, LineNo= -1):
- TokenGuid, TokenName, Value, DatumTyp, MaxDatumSize = '', '', '', '', ''
- List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
- if len(List) < 4 or len(List) > 8:
- RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
- '<PcdTokenSpaceGuidCName>.<TokenCName>|<Value>\
- [|<DatumTyp>[|<MaxDatumSize>]]', LineNo)
- else:
- Value = List[1]
- DatumTyp = List[2]
- MaxDatumSize = List[3]
- if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
- (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
-
- return (TokenName, TokenGuid, Value, DatumTyp, MaxDatumSize, Type)
-
-## Get DynamicHiiPcd
-#
-# Get DynamicHiiPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<String>|
-# <VariableGuidCName>|<VariableOffset>[|<DefaultValue>[|<MaximumDatumSize>]]
-#
-# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
-# TRUE/FALSE
-# @param ContainerFile: The file which describes the pcd, used for error
-# report
-#
-def GetDynamicHiiPcd(Item, Type, ContainerFile, LineNo= -1):
- TokenGuid, TokenName, List1, List2, List3, List4, List5 = \
- '', '', '', '', '', '', ''
- List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
- if len(List) < 6 or len(List) > 8:
- RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
- '<PcdTokenSpaceGuidCName>.<TokenCName>|<String>|\
- <VariableGuidCName>|<VariableOffset>[|<DefaultValue>\
- [|<MaximumDatumSize>]]', LineNo)
- else:
- List1, List2, List3, List4, List5 = \
- List[1], List[2], List[3], List[4], List[5]
- if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
- (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
-
- return (TokenName, TokenGuid, List1, List2, List3, List4, List5, Type)
-
-## Get DynamicVpdPcd
-#
-# Get DynamicVpdPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|
-# <VpdOffset>[|<MaximumDatumSize>]
-#
-# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>
-# |TRUE/FALSE
-# @param ContainerFile: The file which describes the pcd, used for error
-# report
-#
-def GetDynamicVpdPcd(Item, Type, ContainerFile, LineNo= -1):
- TokenGuid, TokenName, List1, List2 = '', '', '', ''
- List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT)
- if len(List) < 3 or len(List) > 4:
- RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
- '<PcdTokenSpaceGuidCName>.<TokenCName>|<VpdOffset>\
- [|<MaximumDatumSize>]', LineNo)
- else:
- List1, List2 = List[1], List[2]
- if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
- (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
-
- return (TokenName, TokenGuid, List1, List2, Type)
-
-## GetComponent
-#
-# Parse block of the components defined in dsc file
-# Set KeyValues as [ ['component name', [lib1, lib2, lib3],
-# [bo1, bo2, bo3], [pcd1, pcd2, pcd3]], ...]
-#
-# @param Lines: The content to be parsed
-# @param KeyValues: To store data after parsing
-#
-def GetComponent(Lines, KeyValues):
- (FindBlock, FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
- FindPcdsPatchableInModule, FindPcdsFixedAtBuild, FindPcdsDynamic, \
- FindPcdsDynamicEx) = (False, False, False, False, False, False, False, \
- False)
- ListItem = None
- LibraryClassItem = []
- BuildOption = []
- Pcd = []
-
- for Line in Lines:
- Line = Line[0]
- #
- # Ignore !include statement
- #
- if Line.upper().find(DataType.TAB_INCLUDE.upper() + ' ') > -1 or \
- Line.upper().find(DataType.TAB_DEFINE + ' ') > -1:
- continue
-
- if FindBlock == False:
- ListItem = Line
- #
- # find '{' at line tail
- #
- if Line.endswith('{'):
- FindBlock = True
- ListItem = CleanString(Line.rsplit('{', 1)[0], \
- DataType.TAB_COMMENT_SPLIT)
-
- #
- # Parse a block content
- #
- if FindBlock:
- if Line.find('<LibraryClasses>') != -1:
- (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
- FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
- FindPcdsDynamic, FindPcdsDynamicEx) = \
- (True, False, False, False, False, False, False)
- continue
- if Line.find('<BuildOptions>') != -1:
- (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
- FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
- FindPcdsDynamic, FindPcdsDynamicEx) = \
- (False, True, False, False, False, False, False)
- continue
- if Line.find('<PcdsFeatureFlag>') != -1:
- (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
- FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
- FindPcdsDynamic, FindPcdsDynamicEx) = \
- (False, False, True, False, False, False, False)
- continue
- if Line.find('<PcdsPatchableInModule>') != -1:
- (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
- FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
- FindPcdsDynamic, FindPcdsDynamicEx) = \
- (False, False, False, True, False, False, False)
- continue
- if Line.find('<PcdsFixedAtBuild>') != -1:
- (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
- FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
- FindPcdsDynamic, FindPcdsDynamicEx) = \
- (False, False, False, False, True, False, False)
- continue
- if Line.find('<PcdsDynamic>') != -1:
- (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
- FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
- FindPcdsDynamic, FindPcdsDynamicEx) = \
- (False, False, False, False, False, True, False)
- continue
- if Line.find('<PcdsDynamicEx>') != -1:
- (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
- FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
- FindPcdsDynamic, FindPcdsDynamicEx) = \
- (False, False, False, False, False, False, True)
- continue
- if Line.endswith('}'):
- #
- # find '}' at line tail
- #
- KeyValues.append([ListItem, LibraryClassItem, \
- BuildOption, Pcd])
- (FindBlock, FindLibraryClass, FindBuildOption, \
- FindPcdsFeatureFlag, FindPcdsPatchableInModule, \
- FindPcdsFixedAtBuild, FindPcdsDynamic, FindPcdsDynamicEx) = \
- (False, False, False, False, False, False, False, False)
- LibraryClassItem, BuildOption, Pcd = [], [], []
- continue
-
- if FindBlock:
- if FindLibraryClass:
- LibraryClassItem.append(Line)
- elif FindBuildOption:
- BuildOption.append(Line)
- elif FindPcdsFeatureFlag:
- Pcd.append((DataType.TAB_PCDS_FEATURE_FLAG_NULL, Line))
- elif FindPcdsPatchableInModule:
- Pcd.append((DataType.TAB_PCDS_PATCHABLE_IN_MODULE_NULL, Line))
- elif FindPcdsFixedAtBuild:
- Pcd.append((DataType.TAB_PCDS_FIXED_AT_BUILD_NULL, Line))
- elif FindPcdsDynamic:
- Pcd.append((DataType.TAB_PCDS_DYNAMIC_DEFAULT_NULL, Line))
- elif FindPcdsDynamicEx:
- Pcd.append((DataType.TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL, Line))
- else:
- KeyValues.append([ListItem, [], [], []])
-
- return True
-
-## GetExec
-#
-# Parse a string with format "InfFilename [EXEC = ExecFilename]"
-# Return (InfFilename, ExecFilename)
-#
-# @param String: String with EXEC statement
-#
-def GetExec(String):
- InfFilename = ''
- ExecFilename = ''
- if String.find('EXEC') > -1:
- InfFilename = String[ : String.find('EXEC')].strip()
- ExecFilename = String[String.find('EXEC') + len('EXEC') : ].strip()
- else:
- InfFilename = String.strip()
-
- return (InfFilename, ExecFilename)
-
-## GetComponents
-#
-# Parse block of the components defined in dsc file
-# Set KeyValues as [ ['component name', [lib1, lib2, lib3], [bo1, bo2, bo3],
-# [pcd1, pcd2, pcd3]], ...]
-#
-# @param Lines: The content to be parsed
-# @param Key: Reserved
-# @param KeyValues: To store data after parsing
-# @param CommentCharacter: Comment char, used to ignore comment content
-#
-# @retval True Get component successfully
-#
-def GetComponents(Lines, KeyValues, CommentCharacter):
- if Lines.find(DataType.TAB_SECTION_END) > -1:
- Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
- (FindBlock, FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
- FindPcdsPatchableInModule, FindPcdsFixedAtBuild, FindPcdsDynamic, \
- FindPcdsDynamicEx) = \
- (False, False, False, False, False, False, False, False)
- ListItem = None
- LibraryClassItem = []
- BuildOption = []
- Pcd = []
-
- LineList = Lines.split('\n')
- for Line in LineList:
- Line = CleanString(Line, CommentCharacter)
- if Line == None or Line == '':
- continue
-
- if FindBlock == False:
- ListItem = Line
- #
- # find '{' at line tail
- #
- if Line.endswith('{'):
- FindBlock = True
- ListItem = CleanString(Line.rsplit('{', 1)[0], CommentCharacter)
-
- #
- # Parse a block content
- #
- if FindBlock:
- if Line.find('<LibraryClasses>') != -1:
- (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
- FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
- FindPcdsDynamic, FindPcdsDynamicEx) = \
- (True, False, False, False, False, False, False)
- continue
- if Line.find('<BuildOptions>') != -1:
- (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
- FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
- FindPcdsDynamic, FindPcdsDynamicEx) = \
- (False, True, False, False, False, False, False)
- continue
- if Line.find('<PcdsFeatureFlag>') != -1:
- (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
- FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
- FindPcdsDynamic, FindPcdsDynamicEx) = \
- (False, False, True, False, False, False, False)
- continue
- if Line.find('<PcdsPatchableInModule>') != -1:
- (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
- FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
- FindPcdsDynamic, FindPcdsDynamicEx) = \
- (False, False, False, True, False, False, False)
- continue
- if Line.find('<PcdsFixedAtBuild>') != -1:
- (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
- FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
- FindPcdsDynamic, FindPcdsDynamicEx) = \
- (False, False, False, False, True, False, False)
- continue
- if Line.find('<PcdsDynamic>') != -1:
- (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
- FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
- FindPcdsDynamic, FindPcdsDynamicEx) = \
- (False, False, False, False, False, True, False)
- continue
- if Line.find('<PcdsDynamicEx>') != -1:
- (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
- FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
- FindPcdsDynamic, FindPcdsDynamicEx) = \
- (False, False, False, False, False, False, True)
- continue
- if Line.endswith('}'):
- #
- # find '}' at line tail
- #
- KeyValues.append([ListItem, LibraryClassItem, BuildOption, \
- Pcd])
- (FindBlock, FindLibraryClass, FindBuildOption, \
- FindPcdsFeatureFlag, FindPcdsPatchableInModule, \
- FindPcdsFixedAtBuild, FindPcdsDynamic, FindPcdsDynamicEx) = \
- (False, False, False, False, False, False, False, False)
- LibraryClassItem, BuildOption, Pcd = [], [], []
- continue
-
- if FindBlock:
- if FindLibraryClass:
- LibraryClassItem.append(Line)
- elif FindBuildOption:
- BuildOption.append(Line)
- elif FindPcdsFeatureFlag:
- Pcd.append((DataType.TAB_PCDS_FEATURE_FLAG, Line))
- elif FindPcdsPatchableInModule:
- Pcd.append((DataType.TAB_PCDS_PATCHABLE_IN_MODULE, Line))
- elif FindPcdsFixedAtBuild:
- Pcd.append((DataType.TAB_PCDS_FIXED_AT_BUILD, Line))
- elif FindPcdsDynamic:
- Pcd.append((DataType.TAB_PCDS_DYNAMIC, Line))
- elif FindPcdsDynamicEx:
- Pcd.append((DataType.TAB_PCDS_DYNAMIC_EX, Line))
- else:
- KeyValues.append([ListItem, [], [], []])
-
- return True
-
-## Get Source
-#
-# Get Source of Inf as <Filename>[|<Family>[|<TagName>[|<ToolCode>
-# [|<PcdFeatureFlag>]]]]
-#
-# @param Item: String as <Filename>[|<Family>[|<TagName>[|<ToolCode>
-# [|<PcdFeatureFlag>]]]]
-# @param ContainerFile: The file which describes the library class, used
-# for error report
-#
-def GetSource(Item, ContainerFile, FileRelativePath, LineNo= -1):
- ItemNew = Item + DataType.TAB_VALUE_SPLIT * 4
- List = GetSplitValueList(ItemNew)
- if len(List) < 5 or len(List) > 9:
- RaiseParserError(Item, 'Sources', ContainerFile, \
- '<Filename>[|<Family>[|<TagName>[|<ToolCode>\
- [|<PcdFeatureFlag>]]]]', LineNo)
- List[0] = NormPath(List[0])
- CheckFileExist(FileRelativePath, List[0], ContainerFile, 'Sources', \
- Item, LineNo)
- if List[4] != '':
- CheckPcdTokenInfo(List[4], 'Sources', ContainerFile, LineNo)
-
- return (List[0], List[1], List[2], List[3], List[4])
-
-## Get Binary
-#
-# Get Binary of Inf as <Filename>[|<Family>[|<TagName>[|<ToolCode>
-# [|<PcdFeatureFlag>]]]]
-#
-# @param Item: String as <Filename>[|<Family>[|<TagName>
-# [|<ToolCode>[|<PcdFeatureFlag>]]]]
-# @param ContainerFile: The file which describes the library class,
-# used for error report
-#
-def GetBinary(Item, ContainerFile, LineNo= -1):
- ItemNew = Item + DataType.TAB_VALUE_SPLIT
- List = GetSplitValueList(ItemNew)
- if len(List) < 3 or len(List) > 5:
- RaiseParserError(Item, 'Binaries', ContainerFile, \
- "<FileType>|<Filename>[|<Target>\
- [|<TokenSpaceGuidCName>.<PcdCName>]]", LineNo)
-
- if len(List) >= 4:
- if List[3] != '':
- CheckPcdTokenInfo(List[3], 'Binaries', ContainerFile, LineNo)
- return (List[0], List[1], List[2], List[3])
- elif len(List) == 3:
- return (List[0], List[1], List[2], '')
-
-## Get Guids/Protocols/Ppis
-#
-# Get Guids/Protocols/Ppis of Inf as <GuidCName>[|<PcdFeatureFlag>]
-#
-# @param Item: String as <GuidCName>[|<PcdFeatureFlag>]
-# @param Type: Type of parsing string
-# @param ContainerFile: The file which describes the library class,
-# used for error report
-#
-def GetGuidsProtocolsPpisOfInf(Item):
- ItemNew = Item + DataType.TAB_VALUE_SPLIT
- List = GetSplitValueList(ItemNew)
- return (List[0], List[1])
-
-## Get Guids/Protocols/Ppis
-#
-# Get Guids/Protocols/Ppis of Dec as <GuidCName>=<GuidValue>
-#
-# @param Item: String as <GuidCName>=<GuidValue>
-# @param Type: Type of parsing string
-# @param ContainerFile: The file which describes the library class,
-# used for error report
-#
-def GetGuidsProtocolsPpisOfDec(Item, Type, ContainerFile, LineNo= -1):
- List = GetSplitValueList(Item, DataType.TAB_EQUAL_SPLIT)
- if len(List) != 2:
- RaiseParserError(Item, Type, ContainerFile, '<CName>=<GuidValue>', \
- LineNo)
- #
- #convert C-Format Guid to Register Format
- #
- if List[1][0] == '{' and List[1][-1] == '}':
- RegisterFormatGuid = GuidStructureStringToGuidString(List[1])
- if RegisterFormatGuid == '':
- RaiseParserError(Item, Type, ContainerFile, \
- 'CFormat or RegisterFormat', LineNo)
- else:
- if CheckGuidRegFormat(List[1]):
- RegisterFormatGuid = List[1]
- else:
- RaiseParserError(Item, Type, ContainerFile, \
- 'CFormat or RegisterFormat', LineNo)
-
- return (List[0], RegisterFormatGuid)
-
-## GetPackage
-#
-# Get Package of Inf as <PackagePath>[|<PcdFeatureFlag>]
-#
-# @param Item: String as <PackagePath>[|<PcdFeatureFlag>]
-# @param Type: Type of parsing string
-# @param ContainerFile: The file which describes the library class,
-# used for error report
-#
-def GetPackage(Item, ContainerFile, FileRelativePath, LineNo= -1):
- ItemNew = Item + DataType.TAB_VALUE_SPLIT
- List = GetSplitValueList(ItemNew)
- CheckFileType(List[0], '.Dec', ContainerFile, 'package', List[0], LineNo)
- CheckFileExist(FileRelativePath, List[0], ContainerFile, 'Packages', \
- List[0], LineNo)
- if List[1] != '':
- CheckPcdTokenInfo(List[1], 'Packages', ContainerFile, LineNo)
-
- return (List[0], List[1])
-
-## Get Pcd Values of Inf
-#
-# Get Pcd of Inf as <TokenSpaceGuidCName>.<PcdCName>[|<Value>]
-#
-# @param Item: The string describes pcd
-# @param Type: The type of Pcd
-# @param File: The file which describes the pcd, used for error report
-#
-def GetPcdOfInf(Item, Type, File, LineNo):
- Format = '<TokenSpaceGuidCName>.<PcdCName>[|<Value>]'
- TokenGuid, TokenName, Value, InfType = '', '', '', ''
-
- if Type == DataType.TAB_PCDS_FIXED_AT_BUILD:
- InfType = DataType.TAB_INF_FIXED_PCD
- elif Type == DataType.TAB_PCDS_PATCHABLE_IN_MODULE:
- InfType = DataType.TAB_INF_PATCH_PCD
- elif Type == DataType.TAB_PCDS_FEATURE_FLAG:
- InfType = DataType.TAB_INF_FEATURE_PCD
- elif Type == DataType.TAB_PCDS_DYNAMIC_EX:
- InfType = DataType.TAB_INF_PCD_EX
- elif Type == DataType.TAB_PCDS_DYNAMIC:
- InfType = DataType.TAB_INF_PCD
- List = GetSplitValueList(Item, DataType.TAB_VALUE_SPLIT, 1)
- TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
- if len(TokenInfo) != 2:
- RaiseParserError(Item, InfType, File, Format, LineNo)
- else:
- TokenGuid = TokenInfo[0]
- TokenName = TokenInfo[1]
-
- if len(List) > 1:
- Value = List[1]
- else:
- Value = None
- return (TokenGuid, TokenName, Value, InfType)
-
-
-## Get Pcd Values of Dec
-#
-# Get Pcd of Dec as <TokenSpcCName>.<TokenCName>|<Value>|<DatumType>|<Token>
-# @param Item: Pcd item
-# @param Type: Pcd type
-# @param File: Dec file
-# @param LineNo: Line number
-#
-def GetPcdOfDec(Item, Type, File, LineNo= -1):
- Format = '<TokenSpaceGuidCName>.<PcdCName>|<Value>|<DatumType>|<Token>'
- TokenGuid, TokenName, Value, DatumType, Token = '', '', '', '', ''
- List = GetSplitValueList(Item)
- if len(List) != 4:
- RaiseParserError(Item, 'Pcds' + Type, File, Format, LineNo)
- else:
- Value = List[1]
- DatumType = List[2]
- Token = List[3]
- TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
- if len(TokenInfo) != 2:
- RaiseParserError(Item, 'Pcds' + Type, File, Format, LineNo)
- else:
- TokenGuid = TokenInfo[0]
- TokenName = TokenInfo[1]
-
- return (TokenGuid, TokenName, Value, DatumType, Token, Type)
-
-## Parse DEFINE statement
-#
-# Get DEFINE macros
-#
-# @param LineValue: A DEFINE line value
-# @param StartLine: A DEFINE start line
-# @param Table: A table
-# @param FileID: File ID
-# @param Filename: File name
-# @param SectionName: DEFINE section name
-# @param SectionModel: DEFINE section model
-# @param Arch: DEFINE arch
-#
-def ParseDefine(LineValue, StartLine, Table, FileID, SectionName, \
- SectionModel, Arch):
- Logger.Debug(Logger.DEBUG_2, ST.MSG_DEFINE_STATEMENT_FOUND % (LineValue, \
- SectionName))
- Define = \
- GetSplitValueList(CleanString\
- (LineValue[LineValue.upper().\
- find(DataType.TAB_DEFINE.upper() + ' ') + \
- len(DataType.TAB_DEFINE + ' ') : ]), \
- DataType.TAB_EQUAL_SPLIT, 1)
- Table.Insert(DataType.MODEL_META_DATA_DEFINE, Define[0], Define[1], '', \
- '', '', Arch, SectionModel, FileID, StartLine, -1, \
- StartLine, -1, 0)
-
-## InsertSectionItems
-#
-# Insert item data of a section to a dict
-#
-# @param Model: A model
-# @param CurrentSection: Current section
-# @param SectionItemList: Section item list
-# @param ArchList: Arch list
-# @param ThirdList: Third list
-# @param RecordSet: Record set
-#
-def InsertSectionItems(Model, SectionItemList, ArchList, \
- ThirdList, RecordSet):
- #
- # Insert each item data of a section
- #
- for Index in range(0, len(ArchList)):
- Arch = ArchList[Index]
- Third = ThirdList[Index]
- if Arch == '':
- Arch = DataType.TAB_ARCH_COMMON
-
- Records = RecordSet[Model]
- for SectionItem in SectionItemList:
- LineValue, StartLine, Comment = SectionItem[0], \
- SectionItem[1], SectionItem[2]
-
- Logger.Debug(4, ST.MSG_PARSING % LineValue)
- #
- # And then parse DEFINE statement
- #
- if LineValue.upper().find(DataType.TAB_DEFINE.upper() + ' ') > -1:
- continue
- #
- # At last parse other sections
- #
- IdNum = -1
- Records.append([LineValue, Arch, StartLine, IdNum, Third, Comment])
-
- if RecordSet != {}:
- RecordSet[Model] = Records
-
-## GenMetaDatSectionItem
-#
-# @param Key: A key
-# @param Value: A value
-# @param List: A list
-#
-def GenMetaDatSectionItem(Key, Value, List):
- if Key not in List:
- List[Key] = [Value]
- else:
- List[Key].append(Value)
-
-## GetPkgInfoFromDec
-#
-# get package name, guid, version info from dec files
-#
-# @param Path: File path
-#
-def GetPkgInfoFromDec(Path):
- PkgName = None
- PkgGuid = None
- PkgVersion = None
-
- Path = Path.replace('\\', '/')
-
- if not os.path.exists(Path):
- Logger.Error("\nUPT", FILE_NOT_FOUND, File=Path)
-
- if Path in gPKG_INFO_DICT:
- return gPKG_INFO_DICT[Path]
-
- try:
- DecParser = None
- if Path not in GlobalData.gPackageDict:
- DecParser = Dec(Path)
- GlobalData.gPackageDict[Path] = DecParser
- else:
- DecParser = GlobalData.gPackageDict[Path]
-
- PkgName = DecParser.GetPackageName()
- PkgGuid = DecParser.GetPackageGuid()
- PkgVersion = DecParser.GetPackageVersion()
- gPKG_INFO_DICT[Path] = (PkgName, PkgGuid, PkgVersion)
- return PkgName, PkgGuid, PkgVersion
- except FatalError:
- return None, None, None
-
-
-## GetWorkspacePackage
-#
-# Get a list of workspace package information.
-#
-def GetWorkspacePackage():
- DecFileList = []
- WorkspaceDir = GlobalData.gWORKSPACE
- PackageDir = GlobalData.gPACKAGE_PATH
- for PkgRoot in [WorkspaceDir] + PackageDir:
- for Root, Dirs, Files in os.walk(PkgRoot):
- if 'CVS' in Dirs:
- Dirs.remove('CVS')
- if '.svn' in Dirs:
- Dirs.remove('.svn')
- for Dir in Dirs:
- if Dir.startswith('.'):
- Dirs.remove(Dir)
- for FileSp in Files:
- if FileSp.startswith('.'):
- continue
- Ext = os.path.splitext(FileSp)[1]
- if Ext.lower() in ['.dec']:
- DecFileList.append\
- (os.path.normpath(os.path.join(Root, FileSp)))
- #
- # abstract package guid, version info from DecFile List
- #
- PkgList = []
- for DecFile in DecFileList:
- (PkgName, PkgGuid, PkgVersion) = GetPkgInfoFromDec(DecFile)
- if PkgName and PkgGuid and PkgVersion:
- PkgList.append((PkgName, PkgGuid, PkgVersion, DecFile))
-
- return PkgList
-
-## GetWorkspaceModule
-#
-# Get a list of workspace modules.
-#
-def GetWorkspaceModule():
- InfFileList = []
- WorkspaceDir = GlobalData.gWORKSPACE
- for Root, Dirs, Files in os.walk(WorkspaceDir):
- if 'CVS' in Dirs:
- Dirs.remove('CVS')
- if '.svn' in Dirs:
- Dirs.remove('.svn')
- if 'Build' in Dirs:
- Dirs.remove('Build')
- for Dir in Dirs:
- if Dir.startswith('.'):
- Dirs.remove(Dir)
- for FileSp in Files:
- if FileSp.startswith('.'):
- continue
- Ext = os.path.splitext(FileSp)[1]
- if Ext.lower() in ['.inf']:
- InfFileList.append\
- (os.path.normpath(os.path.join(Root, FileSp)))
-
- return InfFileList
-
-## MacroParser used to parse macro definition
-#
-# @param Line: The content contain linestring and line number
-# @param FileName: The meta-file file name
-# @param SectionType: Section for the Line belong to
-# @param FileLocalMacros: A list contain Macro defined in [Defines] section.
-#
-def MacroParser(Line, FileName, SectionType, FileLocalMacros):
- MacroDefPattern = re.compile("^(DEFINE)[ \t]+")
- LineContent = Line[0]
- LineNo = Line[1]
- Match = MacroDefPattern.match(LineContent)
- if not Match:
- #
- # Not 'DEFINE/EDK_GLOBAL' statement, call decorated method
- #
- return None, None
-
- TokenList = GetSplitValueList(LineContent[Match.end(1):], \
- DataType.TAB_EQUAL_SPLIT, 1)
- #
- # Syntax check
- #
- if not TokenList[0]:
- Logger.Error('Parser', FORMAT_INVALID, ST.ERR_MACRONAME_NOGIVEN,
- ExtraData=LineContent, File=FileName, Line=LineNo)
- if len(TokenList) < 2:
- Logger.Error('Parser', FORMAT_INVALID, ST.ERR_MACROVALUE_NOGIVEN,
- ExtraData=LineContent, File=FileName, Line=LineNo)
-
- Name, Value = TokenList
-
- #
- # DEFINE defined macros
- #
- if SectionType == DataType.MODEL_META_DATA_HEADER:
- FileLocalMacros[Name] = Value
-
- ReIsValidMacroName = re.compile(r"^[A-Z][A-Z0-9_]*$", re.DOTALL)
- if ReIsValidMacroName.match(Name) == None:
- Logger.Error('Parser',
- FORMAT_INVALID,
- ST.ERR_MACRONAME_INVALID % (Name),
- ExtraData=LineContent,
- File=FileName,
- Line=LineNo)
-
- # Validate MACRO Value
- #
- # <MacroDefinition> ::= [<Comments>]{0,}
- # "DEFINE" <MACRO> "=" [{<PATH>} {<VALUE>}] <EOL>
- # <Value> ::= {<NumVal>} {<Boolean>} {<AsciiString>} {<GUID>}
- # {<CString>} {<UnicodeString>} {<CArray>}
- #
- # The definition of <NumVal>, <PATH>, <Boolean>, <GUID>, <CString>,
- # <UnicodeString>, <CArray> are subset of <AsciiString>.
- #
- ReIsValidMacroValue = re.compile(r"^[\x20-\x7e]*$", re.DOTALL)
- if ReIsValidMacroValue.match(Value) == None:
- Logger.Error('Parser',
- FORMAT_INVALID,
- ST.ERR_MACROVALUE_INVALID % (Value),
- ExtraData=LineContent,
- File=FileName,
- Line=LineNo)
-
- return Name, Value
-
-## GenSection
-#
-# generate section contents
-#
-# @param SectionName: indicate the name of the section, details refer to
-# INF, DEC specs
-# @param SectionDict: section statement dict, key is SectionAttrs(arch,
-# moduletype or platform may exist as needed) list
-# seperated by space,
-# value is statement
-#
-def GenSection(SectionName, SectionDict, SplitArch=True, NeedBlankLine=False):
- Content = ''
- for SectionAttrs in SectionDict:
- StatementList = SectionDict[SectionAttrs]
- if SectionAttrs and SectionName != 'Defines' and SectionAttrs.strip().upper() != DataType.TAB_ARCH_COMMON:
- if SplitArch:
- ArchList = GetSplitValueList(SectionAttrs, DataType.TAB_SPACE_SPLIT)
- else:
- if SectionName != 'UserExtensions':
- ArchList = GetSplitValueList(SectionAttrs, DataType.TAB_COMMENT_SPLIT)
- else:
- ArchList = [SectionAttrs]
- for Index in xrange(0, len(ArchList)):
- ArchList[Index] = ConvertArchForInstall(ArchList[Index])
- Section = '[' + SectionName + '.' + (', ' + SectionName + '.').join(ArchList) + ']'
- else:
- Section = '[' + SectionName + ']'
- Content += '\n' + Section + '\n'
- if StatementList != None:
- for Statement in StatementList:
- LineList = Statement.split('\n')
- NewStatement = ""
- for Line in LineList:
- # ignore blank comment
- if not Line.replace("#", '').strip() and SectionName not in ('Defines', 'Hob', 'Event', 'BootMode'):
- continue
- # add two space before non-comments line except the comments in Defines section
- if Line.strip().startswith('#') and SectionName == 'Defines':
- NewStatement += "%s\n" % Line
- continue
- NewStatement += " %s\n" % Line
- if NeedBlankLine:
- Content += NewStatement + '\n'
- else:
- Content += NewStatement
-
- if NeedBlankLine:
- Content = Content[:-1]
- if not Content.replace('\\n', '').strip():
- return ''
- return Content
-
-## ConvertArchForInstall
-# if Arch.upper() is in "IA32", "X64", "IPF", and "EBC", it must be upper case. "common" must be lower case.
-# Anything else, the case must be preserved
-#
-# @param Arch: the arch string that need to be converted, it should be stripped before pass in
-# @return: the arch string that get converted
-#
-def ConvertArchForInstall(Arch):
- if Arch.upper() in [DataType.TAB_ARCH_IA32, DataType.TAB_ARCH_X64,
- DataType.TAB_ARCH_IPF, DataType.TAB_ARCH_EBC]:
- Arch = Arch.upper()
- elif Arch.upper() == DataType.TAB_ARCH_COMMON:
- Arch = Arch.lower()
-
- return Arch
diff --git a/BaseTools/Source/Python/UPT/Library/String.py b/BaseTools/Source/Python/UPT/Library/String.py
deleted file mode 100644
index 89371dbdbf..0000000000
--- a/BaseTools/Source/Python/UPT/Library/String.py
+++ /dev/null
@@ -1,988 +0,0 @@
-## @file
-# This file is used to define common string related functions used in parsing
-# process
-#
-# Copyright (c) 2011 - 2016, Intel Corporation. All rights reserved.<BR>
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License which accompanies this
-# distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-'''
-String
-'''
-##
-# Import Modules
-#
-import re
-import os.path
-from string import strip
-import Logger.Log as Logger
-import Library.DataType as DataType
-from Logger.ToolError import FORMAT_INVALID
-from Logger.ToolError import PARSER_ERROR
-from Logger import StringTable as ST
-
-#
-# Regular expression for matching macro used in DSC/DEC/INF file inclusion
-#
-gMACRO_PATTERN = re.compile("\$\(([_A-Z][_A-Z0-9]*)\)", re.UNICODE)
-
-## GetSplitValueList
-#
-# Get a value list from a string with multiple values splited with SplitTag
-# The default SplitTag is DataType.TAB_VALUE_SPLIT
-# 'AAA|BBB|CCC' -> ['AAA', 'BBB', 'CCC']
-#
-# @param String: The input string to be splitted
-# @param SplitTag: The split key, default is DataType.TAB_VALUE_SPLIT
-# @param MaxSplit: The max number of split values, default is -1
-#
-#
-def GetSplitValueList(String, SplitTag=DataType.TAB_VALUE_SPLIT, MaxSplit= -1):
- return map(lambda l: l.strip(), String.split(SplitTag, MaxSplit))
-
-## MergeArches
-#
-# Find a key's all arches in dict, add the new arch to the list
-# If not exist any arch, set the arch directly
-#
-# @param Dict: The input value for Dict
-# @param Key: The input value for Key
-# @param Arch: The Arch to be added or merged
-#
-def MergeArches(Dict, Key, Arch):
- if Key in Dict.keys():
- Dict[Key].append(Arch)
- else:
- Dict[Key] = Arch.split()
-
-## GenDefines
-#
-# Parse a string with format "DEFINE <VarName> = <PATH>"
-# Generate a map Defines[VarName] = PATH
-# Return False if invalid format
-#
-# @param String: String with DEFINE statement
-# @param Arch: Supportted Arch
-# @param Defines: DEFINE statement to be parsed
-#
-def GenDefines(String, Arch, Defines):
- if String.find(DataType.TAB_DEFINE + ' ') > -1:
- List = String.replace(DataType.TAB_DEFINE + ' ', '').\
- split(DataType.TAB_EQUAL_SPLIT)
- if len(List) == 2:
- Defines[(CleanString(List[0]), Arch)] = CleanString(List[1])
- return 0
- else:
- return -1
- return 1
-
-## GetLibraryClassesWithModuleType
-#
-# Get Library Class definition when no module type defined
-#
-# @param Lines: The content to be parsed
-# @param Key: Reserved
-# @param KeyValues: To store data after parsing
-# @param CommentCharacter: Comment char, used to ignore comment content
-#
-def GetLibraryClassesWithModuleType(Lines, Key, KeyValues, CommentCharacter):
- NewKey = SplitModuleType(Key)
- Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
- LineList = Lines.splitlines()
- for Line in LineList:
- Line = CleanString(Line, CommentCharacter)
- if Line != '' and Line[0] != CommentCharacter:
- KeyValues.append([CleanString(Line, CommentCharacter), NewKey[1]])
-
- return True
-
-## GetDynamics
-#
-# Get Dynamic Pcds
-#
-# @param Lines: The content to be parsed
-# @param Key: Reserved
-# @param KeyValues: To store data after parsing
-# @param CommentCharacter: Comment char, used to ignore comment content
-#
-def GetDynamics(Lines, Key, KeyValues, CommentCharacter):
- #
- # Get SkuId Name List
- #
- SkuIdNameList = SplitModuleType(Key)
-
- Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
- LineList = Lines.splitlines()
- for Line in LineList:
- Line = CleanString(Line, CommentCharacter)
- if Line != '' and Line[0] != CommentCharacter:
- KeyValues.append([CleanString(Line, CommentCharacter), SkuIdNameList[1]])
-
- return True
-
-## SplitModuleType
-#
-# Split ModuleType out of section defien to get key
-# [LibraryClass.Arch.ModuleType|ModuleType|ModuleType] -> [
-# 'LibraryClass.Arch', ['ModuleType', 'ModuleType', 'ModuleType'] ]
-#
-# @param Key: String to be parsed
-#
-def SplitModuleType(Key):
- KeyList = Key.split(DataType.TAB_SPLIT)
- #
- # Fill in for arch
- #
- KeyList.append('')
- #
- # Fill in for moduletype
- #
- KeyList.append('')
- ReturnValue = []
- KeyValue = KeyList[0]
- if KeyList[1] != '':
- KeyValue = KeyValue + DataType.TAB_SPLIT + KeyList[1]
- ReturnValue.append(KeyValue)
- ReturnValue.append(GetSplitValueList(KeyList[2]))
-
- return ReturnValue
-
-## Replace macro in string
-#
-# This method replace macros used in given string. The macros are given in a
-# dictionary.
-#
-# @param String String to be processed
-# @param MacroDefinitions The macro definitions in the form of dictionary
-# @param SelfReplacement To decide whether replace un-defined macro to ''
-# @param Line: The content contain line string and line number
-# @param FileName: The meta-file file name
-#
-def ReplaceMacro(String, MacroDefinitions=None, SelfReplacement=False, Line=None, FileName=None, Flag=False):
- LastString = String
- if MacroDefinitions == None:
- MacroDefinitions = {}
- while MacroDefinitions:
- QuotedStringList = []
- HaveQuotedMacroFlag = False
- if not Flag:
- MacroUsed = gMACRO_PATTERN.findall(String)
- else:
- ReQuotedString = re.compile('\"')
- QuotedStringList = ReQuotedString.split(String)
- if len(QuotedStringList) >= 3:
- HaveQuotedMacroFlag = True
- Count = 0
- MacroString = ""
- for QuotedStringItem in QuotedStringList:
- Count += 1
- if Count % 2 != 0:
- MacroString += QuotedStringItem
-
- if Count == len(QuotedStringList) and Count % 2 == 0:
- MacroString += QuotedStringItem
-
- MacroUsed = gMACRO_PATTERN.findall(MacroString)
- #
- # no macro found in String, stop replacing
- #
- if len(MacroUsed) == 0:
- break
- for Macro in MacroUsed:
- if Macro not in MacroDefinitions:
- if SelfReplacement:
- String = String.replace("$(%s)" % Macro, '')
- Logger.Debug(5, "Delete undefined MACROs in file %s line %d: %s!" % (FileName, Line[1], Line[0]))
- continue
- if not HaveQuotedMacroFlag:
- String = String.replace("$(%s)" % Macro, MacroDefinitions[Macro])
- else:
- Count = 0
- for QuotedStringItem in QuotedStringList:
- Count += 1
- if Count % 2 != 0:
- QuotedStringList[Count - 1] = QuotedStringList[Count - 1].replace("$(%s)" % Macro,
- MacroDefinitions[Macro])
- elif Count == len(QuotedStringList) and Count % 2 == 0:
- QuotedStringList[Count - 1] = QuotedStringList[Count - 1].replace("$(%s)" % Macro,
- MacroDefinitions[Macro])
-
- RetString = ''
- if HaveQuotedMacroFlag:
- Count = 0
- for QuotedStringItem in QuotedStringList:
- Count += 1
- if Count != len(QuotedStringList):
- RetString += QuotedStringList[Count - 1] + "\""
- else:
- RetString += QuotedStringList[Count - 1]
-
- String = RetString
-
- #
- # in case there's macro not defined
- #
- if String == LastString:
- break
- LastString = String
-
- return String
-
-## NormPath
-#
-# Create a normal path
-# And replace DFEINE in the path
-#
-# @param Path: The input value for Path to be converted
-# @param Defines: A set for DEFINE statement
-#
-def NormPath(Path, Defines=None):
- IsRelativePath = False
- if Defines == None:
- Defines = {}
- if Path:
- if Path[0] == '.':
- IsRelativePath = True
- #
- # Replace with Define
- #
- if Defines:
- Path = ReplaceMacro(Path, Defines)
- #
- # To local path format
- #
- Path = os.path.normpath(Path)
-
- if IsRelativePath and Path[0] != '.':
- Path = os.path.join('.', Path)
- return Path
-
-## CleanString
-#
-# Remove comments in a string
-# Remove spaces
-#
-# @param Line: The string to be cleaned
-# @param CommentCharacter: Comment char, used to ignore comment content,
-# default is DataType.TAB_COMMENT_SPLIT
-#
-def CleanString(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
- #
- # remove whitespace
- #
- Line = Line.strip()
- #
- # Replace EDK1's comment character
- #
- if AllowCppStyleComment:
- Line = Line.replace(DataType.TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
- #
- # remove comments, but we should escape comment character in string
- #
- InString = False
- for Index in range(0, len(Line)):
- if Line[Index] == '"':
- InString = not InString
- elif Line[Index] == CommentCharacter and not InString:
- Line = Line[0: Index]
- break
- #
- # remove whitespace again
- #
- Line = Line.strip()
-
- return Line
-
-## CleanString2
-#
-# Split comments in a string
-# Remove spaces
-#
-# @param Line: The string to be cleaned
-# @param CommentCharacter: Comment char, used to ignore comment content,
-# default is DataType.TAB_COMMENT_SPLIT
-#
-def CleanString2(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
- #
- # remove whitespace
- #
- Line = Line.strip()
- #
- # Replace EDK1's comment character
- #
- if AllowCppStyleComment:
- Line = Line.replace(DataType.TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
- #
- # separate comments and statements
- #
- LineParts = Line.split(CommentCharacter, 1)
- #
- # remove whitespace again
- #
- Line = LineParts[0].strip()
- if len(LineParts) > 1:
- Comment = LineParts[1].strip()
- #
- # Remove prefixed and trailing comment characters
- #
- Start = 0
- End = len(Comment)
- while Start < End and Comment.startswith(CommentCharacter, Start, End):
- Start += 1
- while End >= 0 and Comment.endswith(CommentCharacter, Start, End):
- End -= 1
- Comment = Comment[Start:End]
- Comment = Comment.strip()
- else:
- Comment = ''
-
- return Line, Comment
-
-## GetMultipleValuesOfKeyFromLines
-#
-# Parse multiple strings to clean comment and spaces
-# The result is saved to KeyValues
-#
-# @param Lines: The content to be parsed
-# @param Key: Reserved
-# @param KeyValues: To store data after parsing
-# @param CommentCharacter: Comment char, used to ignore comment content
-#
-def GetMultipleValuesOfKeyFromLines(Lines, Key, KeyValues, CommentCharacter):
- if Key:
- pass
- if KeyValues:
- pass
- Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
- LineList = Lines.split('\n')
- for Line in LineList:
- Line = CleanString(Line, CommentCharacter)
- if Line != '' and Line[0] != CommentCharacter:
- KeyValues += [Line]
- return True
-
-## GetDefineValue
-#
-# Parse a DEFINE statement to get defined value
-# DEFINE Key Value
-#
-# @param String: The content to be parsed
-# @param Key: The key of DEFINE statement
-# @param CommentCharacter: Comment char, used to ignore comment content
-#
-def GetDefineValue(String, Key, CommentCharacter):
- if CommentCharacter:
- pass
- String = CleanString(String)
- return String[String.find(Key + ' ') + len(Key + ' ') : ]
-
-## GetSingleValueOfKeyFromLines
-#
-# Parse multiple strings as below to get value of each definition line
-# Key1 = Value1
-# Key2 = Value2
-# The result is saved to Dictionary
-#
-# @param Lines: The content to be parsed
-# @param Dictionary: To store data after parsing
-# @param CommentCharacter: Comment char, be used to ignore comment content
-# @param KeySplitCharacter: Key split char, between key name and key value.
-# Key1 = Value1, '=' is the key split char
-# @param ValueSplitFlag: Value split flag, be used to decide if has
-# multiple values
-# @param ValueSplitCharacter: Value split char, be used to split multiple
-# values. Key1 = Value1|Value2, '|' is the value
-# split char
-#
-def GetSingleValueOfKeyFromLines(Lines, Dictionary, CommentCharacter, KeySplitCharacter, \
- ValueSplitFlag, ValueSplitCharacter):
- Lines = Lines.split('\n')
- Keys = []
- Value = ''
- DefineValues = ['']
- SpecValues = ['']
-
- for Line in Lines:
- #
- # Handle DEFINE and SPEC
- #
- if Line.find(DataType.TAB_INF_DEFINES_DEFINE + ' ') > -1:
- if '' in DefineValues:
- DefineValues.remove('')
- DefineValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_DEFINE, CommentCharacter))
- continue
- if Line.find(DataType.TAB_INF_DEFINES_SPEC + ' ') > -1:
- if '' in SpecValues:
- SpecValues.remove('')
- SpecValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_SPEC, CommentCharacter))
- continue
-
- #
- # Handle Others
- #
- LineList = Line.split(KeySplitCharacter, 1)
- if len(LineList) >= 2:
- Key = LineList[0].split()
- if len(Key) == 1 and Key[0][0] != CommentCharacter:
- #
- # Remove comments and white spaces
- #
- LineList[1] = CleanString(LineList[1], CommentCharacter)
- if ValueSplitFlag:
- Value = map(strip, LineList[1].split(ValueSplitCharacter))
- else:
- Value = CleanString(LineList[1], CommentCharacter).splitlines()
-
- if Key[0] in Dictionary:
- if Key[0] not in Keys:
- Dictionary[Key[0]] = Value
- Keys.append(Key[0])
- else:
- Dictionary[Key[0]].extend(Value)
- else:
- Dictionary[DataType.TAB_INF_DEFINES_MACRO][Key[0]] = Value[0]
-
- if DefineValues == []:
- DefineValues = ['']
- if SpecValues == []:
- SpecValues = ['']
- Dictionary[DataType.TAB_INF_DEFINES_DEFINE] = DefineValues
- Dictionary[DataType.TAB_INF_DEFINES_SPEC] = SpecValues
-
- return True
-
-## The content to be parsed
-#
-# Do pre-check for a file before it is parsed
-# Check $()
-# Check []
-#
-# @param FileName: Used for error report
-# @param FileContent: File content to be parsed
-# @param SupSectionTag: Used for error report
-#
-def PreCheck(FileName, FileContent, SupSectionTag):
- if SupSectionTag:
- pass
- LineNo = 0
- IsFailed = False
- NewFileContent = ''
- for Line in FileContent.splitlines():
- LineNo = LineNo + 1
- #
- # Clean current line
- #
- Line = CleanString(Line)
- #
- # Remove commented line
- #
- if Line.find(DataType.TAB_COMMA_SPLIT) == 0:
- Line = ''
- #
- # Check $()
- #
- if Line.find('$') > -1:
- if Line.find('$(') < 0 or Line.find(')') < 0:
- Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=Logger.IS_RAISE_ERROR)
- #
- # Check []
- #
- if Line.find('[') > -1 or Line.find(']') > -1:
- #
- # Only get one '[' or one ']'
- #
- if not (Line.find('[') > -1 and Line.find(']') > -1):
- Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=Logger.IS_RAISE_ERROR)
- #
- # Regenerate FileContent
- #
- NewFileContent = NewFileContent + Line + '\r\n'
-
- if IsFailed:
- Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=Logger.IS_RAISE_ERROR)
-
- return NewFileContent
-
-## CheckFileType
-#
-# Check if the Filename is including ExtName
-# Return True if it exists
-# Raise a error message if it not exists
-#
-# @param CheckFilename: Name of the file to be checked
-# @param ExtName: Ext name of the file to be checked
-# @param ContainerFilename: The container file which describes the file to be
-# checked, used for error report
-# @param SectionName: Used for error report
-# @param Line: The line in container file which defines the file
-# to be checked
-#
-def CheckFileType(CheckFilename, ExtName, ContainerFilename, SectionName, Line, LineNo= -1):
- if CheckFilename != '' and CheckFilename != None:
- (Root, Ext) = os.path.splitext(CheckFilename)
- if Ext.upper() != ExtName.upper() and Root:
- ContainerFile = open(ContainerFilename, 'r').read()
- if LineNo == -1:
- LineNo = GetLineNo(ContainerFile, Line)
- ErrorMsg = ST.ERR_SECTIONNAME_INVALID % (SectionName, CheckFilename, ExtName)
- Logger.Error("Parser", PARSER_ERROR, ErrorMsg, Line=LineNo, \
- File=ContainerFilename, RaiseError=Logger.IS_RAISE_ERROR)
-
- return True
-
-## CheckFileExist
-#
-# Check if the file exists
-# Return True if it exists
-# Raise a error message if it not exists
-#
-# @param CheckFilename: Name of the file to be checked
-# @param WorkspaceDir: Current workspace dir
-# @param ContainerFilename: The container file which describes the file to
-# be checked, used for error report
-# @param SectionName: Used for error report
-# @param Line: The line in container file which defines the
-# file to be checked
-#
-def CheckFileExist(WorkspaceDir, CheckFilename, ContainerFilename, SectionName, Line, LineNo= -1):
- CheckFile = ''
- if CheckFilename != '' and CheckFilename != None:
- CheckFile = WorkspaceFile(WorkspaceDir, CheckFilename)
- if not os.path.isfile(CheckFile):
- ContainerFile = open(ContainerFilename, 'r').read()
- if LineNo == -1:
- LineNo = GetLineNo(ContainerFile, Line)
- ErrorMsg = ST.ERR_CHECKFILE_NOTFOUND % (CheckFile, SectionName)
- Logger.Error("Parser", PARSER_ERROR, ErrorMsg,
- File=ContainerFilename, Line=LineNo, RaiseError=Logger.IS_RAISE_ERROR)
- return CheckFile
-
-## GetLineNo
-#
-# Find the index of a line in a file
-#
-# @param FileContent: Search scope
-# @param Line: Search key
-#
-def GetLineNo(FileContent, Line, IsIgnoreComment=True):
- LineList = FileContent.splitlines()
- for Index in range(len(LineList)):
- if LineList[Index].find(Line) > -1:
- #
- # Ignore statement in comment
- #
- if IsIgnoreComment:
- if LineList[Index].strip()[0] == DataType.TAB_COMMENT_SPLIT:
- continue
- return Index + 1
-
- return -1
-
-## RaiseParserError
-#
-# Raise a parser error
-#
-# @param Line: String which has error
-# @param Section: Used for error report
-# @param File: File which has the string
-# @param Format: Correct format
-#
-def RaiseParserError(Line, Section, File, Format='', LineNo= -1):
- if LineNo == -1:
- LineNo = GetLineNo(open(os.path.normpath(File), 'r').read(), Line)
- ErrorMsg = ST.ERR_INVALID_NOTFOUND % (Line, Section)
- if Format != '':
- Format = "Correct format is " + Format
- Logger.Error("Parser", PARSER_ERROR, ErrorMsg, File=File, Line=LineNo, \
- ExtraData=Format, RaiseError=Logger.IS_RAISE_ERROR)
-
-## WorkspaceFile
-#
-# Return a full path with workspace dir
-#
-# @param WorkspaceDir: Workspace dir
-# @param Filename: Relative file name
-#
-def WorkspaceFile(WorkspaceDir, Filename):
- return os.path.join(NormPath(WorkspaceDir), NormPath(Filename))
-
-## Split string
-#
-# Revmove '"' which startswith and endswith string
-#
-# @param String: The string need to be splited
-#
-def SplitString(String):
- if String.startswith('\"'):
- String = String[1:]
- if String.endswith('\"'):
- String = String[:-1]
- return String
-
-## Convert To Sql String
-#
-# Replace "'" with "''" in each item of StringList
-#
-# @param StringList: A list for strings to be converted
-#
-def ConvertToSqlString(StringList):
- return map(lambda s: s.replace("'", "''") , StringList)
-
-## Convert To Sql String
-#
-# Replace "'" with "''" in the String
-#
-# @param String: A String to be converted
-#
-def ConvertToSqlString2(String):
- return String.replace("'", "''")
-
-## GetStringOfList
-#
-# Get String of a List
-#
-# @param Lines: string list
-# @param Split: split character
-#
-def GetStringOfList(List, Split=' '):
- if type(List) != type([]):
- return List
- Str = ''
- for Item in List:
- Str = Str + Item + Split
- return Str.strip()
-
-## Get HelpTextList
-#
-# Get HelpTextList from HelpTextClassList
-#
-# @param HelpTextClassList: Help Text Class List
-#
-def GetHelpTextList(HelpTextClassList):
- List = []
- if HelpTextClassList:
- for HelpText in HelpTextClassList:
- if HelpText.String.endswith('\n'):
- HelpText.String = HelpText.String[0: len(HelpText.String) - len('\n')]
- List.extend(HelpText.String.split('\n'))
- return List
-
-## Get String Array Length
-#
-# Get String Array Length
-#
-# @param String: the source string
-#
-def StringArrayLength(String):
- if isinstance(String, unicode):
- return (len(String) + 1) * 2 + 1
- elif String.startswith('L"'):
- return (len(String) - 3 + 1) * 2
- elif String.startswith('"'):
- return (len(String) - 2 + 1)
- else:
- return len(String.split()) + 1
-
-## RemoveDupOption
-#
-# Remove Dup Option
-#
-# @param OptionString: the option string
-# @param Which: Which flag
-# @param Against: Against flag
-#
-def RemoveDupOption(OptionString, Which="/I", Against=None):
- OptionList = OptionString.split()
- ValueList = []
- if Against:
- ValueList += Against
- for Index in range(len(OptionList)):
- Opt = OptionList[Index]
- if not Opt.startswith(Which):
- continue
- if len(Opt) > len(Which):
- Val = Opt[len(Which):]
- else:
- Val = ""
- if Val in ValueList:
- OptionList[Index] = ""
- else:
- ValueList.append(Val)
- return " ".join(OptionList)
-
-## Check if the string is HexDgit
-#
-# Return true if all characters in the string are digits and there is at
-# least one character
-# or valid Hexs (started with 0x, following by hexdigit letters)
-# , false otherwise.
-# @param string: input string
-#
-def IsHexDigit(Str):
- try:
- int(Str, 10)
- return True
- except ValueError:
- if len(Str) > 2 and Str.upper().startswith('0X'):
- try:
- int(Str, 16)
- return True
- except ValueError:
- return False
- return False
-
-## Check if the string is HexDgit and its interger value within limit of UINT32
-#
-# Return true if all characters in the string are digits and there is at
-# least one character
-# or valid Hexs (started with 0x, following by hexdigit letters)
-# , false otherwise.
-# @param string: input string
-#
-def IsHexDigitUINT32(Str):
- try:
- Value = int(Str, 10)
- if (Value <= 0xFFFFFFFF) and (Value >= 0):
- return True
- except ValueError:
- if len(Str) > 2 and Str.upper().startswith('0X'):
- try:
- Value = int(Str, 16)
- if (Value <= 0xFFFFFFFF) and (Value >= 0):
- return True
- except ValueError:
- return False
- return False
-
-## CleanSpecialChar
-#
-# The ASCII text files of type INF, DEC, INI are edited by developers,
-# and may contain characters that cannot be directly translated to strings that
-# are conformant with the UDP XML Schema. Any characters in this category
-# (0x00-0x08, TAB [0x09], 0x0B, 0x0C, 0x0E-0x1F, 0x80-0xFF)
-# must be converted to a space character[0x20] as part of the parsing process.
-#
-def ConvertSpecialChar(Lines):
- RetLines = []
- for line in Lines:
- ReMatchSpecialChar = re.compile(r"[\x00-\x08]|\x09|\x0b|\x0c|[\x0e-\x1f]|[\x7f-\xff]")
- RetLines.append(ReMatchSpecialChar.sub(' ', line))
-
- return RetLines
-
-## __GetTokenList
-#
-# Assume Str is a valid feature flag expression.
-# Return a list which contains tokens: alpha numeric token and other token
-# Whitespace are not stripped
-#
-def __GetTokenList(Str):
- InQuote = False
- Token = ''
- TokenOP = ''
- PreChar = ''
- List = []
- for Char in Str:
- if InQuote:
- Token += Char
- if Char == '"' and PreChar != '\\':
- InQuote = not InQuote
- List.append(Token)
- Token = ''
- continue
- if Char == '"':
- if Token and Token != 'L':
- List.append(Token)
- Token = ''
- if TokenOP:
- List.append(TokenOP)
- TokenOP = ''
- InQuote = not InQuote
- Token += Char
- continue
-
- if not (Char.isalnum() or Char in '_'):
- TokenOP += Char
- if Token:
- List.append(Token)
- Token = ''
- else:
- Token += Char
- if TokenOP:
- List.append(TokenOP)
- TokenOP = ''
-
- if PreChar == '\\' and Char == '\\':
- PreChar = ''
- else:
- PreChar = Char
- if Token:
- List.append(Token)
- if TokenOP:
- List.append(TokenOP)
- return List
-
-## ConvertNEToNOTEQ
-#
-# Convert NE operator to NOT EQ
-# For example: 1 NE 2 -> 1 NOT EQ 2
-#
-# @param Expr: Feature flag expression to be converted
-#
-def ConvertNEToNOTEQ(Expr):
- List = __GetTokenList(Expr)
- for Index in range(len(List)):
- if List[Index] == 'NE':
- List[Index] = 'NOT EQ'
- return ''.join(List)
-
-## ConvertNOTEQToNE
-#
-# Convert NOT EQ operator to NE
-# For example: 1 NOT NE 2 -> 1 NE 2
-#
-# @param Expr: Feature flag expression to be converted
-#
-def ConvertNOTEQToNE(Expr):
- List = __GetTokenList(Expr)
- HasNOT = False
- RetList = []
- for Token in List:
- if HasNOT and Token == 'EQ':
- # At least, 'NOT' is in the list
- while not RetList[-1].strip():
- RetList.pop()
- RetList[-1] = 'NE'
- HasNOT = False
- continue
- if Token == 'NOT':
- HasNOT = True
- elif Token.strip():
- HasNOT = False
- RetList.append(Token)
-
- return ''.join(RetList)
-
-## SplitPcdEntry
-#
-# Split an PCD entry string to Token.CName and PCD value and FFE.
-# NOTE: PCD Value and FFE can contain "|" in it's expression. And in INF specification, have below rule.
-# When using the characters "|" or "||" in an expression, the expression must be encapsulated in
-# open "(" and close ")" parenthesis.
-#
-# @param String An PCD entry string need to be split.
-#
-# @return List [PcdTokenCName, Value, FFE]
-#
-def SplitPcdEntry(String):
- if not String:
- return ['', '', ''], False
-
- PcdTokenCName = ''
- PcdValue = ''
- PcdFeatureFlagExp = ''
-
- ValueList = GetSplitValueList(String, "|", 1)
-
- #
- # Only contain TokenCName
- #
- if len(ValueList) == 1:
- return [ValueList[0]], True
-
- NewValueList = []
-
- if len(ValueList) == 2:
- PcdTokenCName = ValueList[0]
-
- InQuote = False
- InParenthesis = False
- StrItem = ''
- for StrCh in ValueList[1]:
- if StrCh == '"':
- InQuote = not InQuote
- elif StrCh == '(' or StrCh == ')':
- InParenthesis = not InParenthesis
-
- if StrCh == '|':
- if not InQuote or not InParenthesis:
- NewValueList.append(StrItem.strip())
- StrItem = ' '
- continue
-
- StrItem += StrCh
-
- NewValueList.append(StrItem.strip())
-
- if len(NewValueList) == 1:
- PcdValue = NewValueList[0]
- return [PcdTokenCName, PcdValue], True
- elif len(NewValueList) == 2:
- PcdValue = NewValueList[0]
- PcdFeatureFlagExp = NewValueList[1]
- return [PcdTokenCName, PcdValue, PcdFeatureFlagExp], True
- else:
- return ['', '', ''], False
-
- return ['', '', ''], False
-
-## Check if two arches matched?
-#
-# @param Arch1
-# @param Arch2
-#
-def IsMatchArch(Arch1, Arch2):
- if 'COMMON' in Arch1 or 'COMMON' in Arch2:
- return True
- if isinstance(Arch1, basestring) and isinstance(Arch2, basestring):
- if Arch1 == Arch2:
- return True
-
- if isinstance(Arch1, basestring) and isinstance(Arch2, list):
- return Arch1 in Arch2
-
- if isinstance(Arch2, basestring) and isinstance(Arch1, list):
- return Arch2 in Arch1
-
- if isinstance(Arch1, list) and isinstance(Arch2, list):
- for Item1 in Arch1:
- for Item2 in Arch2:
- if Item1 == Item2:
- return True
-
- return False
-
-# Search all files in FilePath to find the FileName with the largest index
-# Return the FileName with index +1 under the FilePath
-#
-def GetUniFileName(FilePath, FileName):
- Files = []
- try:
- Files = os.listdir(FilePath)
- except:
- pass
-
- LargestIndex = -1
- for File in Files:
- if File.upper().startswith(FileName.upper()) and File.upper().endswith('.UNI'):
- Index = File.upper().replace(FileName.upper(), '').replace('.UNI', '')
- if Index:
- try:
- Index = int(Index)
- except Exception:
- Index = -1
- else:
- Index = 0
- if Index > LargestIndex:
- LargestIndex = Index + 1
-
- if LargestIndex > -1:
- return os.path.normpath(os.path.join(FilePath, FileName + str(LargestIndex) + '.uni'))
- else:
- return os.path.normpath(os.path.join(FilePath, FileName + '.uni'))
diff --git a/BaseTools/Source/Python/UPT/Library/UniClassObject.py b/BaseTools/Source/Python/UPT/Library/UniClassObject.py
deleted file mode 100644
index 1fbbf2e498..0000000000
--- a/BaseTools/Source/Python/UPT/Library/UniClassObject.py
+++ /dev/null
@@ -1,1070 +0,0 @@
-## @file
-# Collect all defined strings in multiple uni files.
-#
-# Copyright (c) 2014 - 2017, Intel Corporation. All rights reserved.<BR>
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License which accompanies this
-# distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-"""
-Collect all defined strings in multiple uni files
-"""
-
-##
-# Import Modules
-#
-import os, codecs, re
-import distutils.util
-from Logger import ToolError
-from Logger import Log as EdkLogger
-from Logger import StringTable as ST
-from Library.String import GetLineNo
-from Library.Misc import PathClass
-from Library.Misc import GetCharIndexOutStr
-from Library import DataType as DT
-from Library.ParserValidate import CheckUTF16FileHeader
-
-##
-# Static definitions
-#
-UNICODE_WIDE_CHAR = u'\\wide'
-UNICODE_NARROW_CHAR = u'\\narrow'
-UNICODE_NON_BREAKING_CHAR = u'\\nbr'
-UNICODE_UNICODE_CR = '\r'
-UNICODE_UNICODE_LF = '\n'
-
-NARROW_CHAR = u'\uFFF0'
-WIDE_CHAR = u'\uFFF1'
-NON_BREAKING_CHAR = u'\uFFF2'
-CR = u'\u000D'
-LF = u'\u000A'
-NULL = u'\u0000'
-TAB = u'\t'
-BACK_SPLASH = u'\\'
-
-gINCLUDE_PATTERN = re.compile("^!include[\s]+([\S]+)[\s]*$", re.MULTILINE | re.UNICODE)
-
-gLANG_CONV_TABLE = {'eng':'en', 'fra':'fr', \
- 'aar':'aa', 'abk':'ab', 'ave':'ae', 'afr':'af', 'aka':'ak', 'amh':'am', \
- 'arg':'an', 'ara':'ar', 'asm':'as', 'ava':'av', 'aym':'ay', 'aze':'az', \
- 'bak':'ba', 'bel':'be', 'bul':'bg', 'bih':'bh', 'bis':'bi', 'bam':'bm', \
- 'ben':'bn', 'bod':'bo', 'bre':'br', 'bos':'bs', 'cat':'ca', 'che':'ce', \
- 'cha':'ch', 'cos':'co', 'cre':'cr', 'ces':'cs', 'chu':'cu', 'chv':'cv', \
- 'cym':'cy', 'dan':'da', 'deu':'de', 'div':'dv', 'dzo':'dz', 'ewe':'ee', \
- 'ell':'el', 'epo':'eo', 'spa':'es', 'est':'et', 'eus':'eu', 'fas':'fa', \
- 'ful':'ff', 'fin':'fi', 'fij':'fj', 'fao':'fo', 'fry':'fy', 'gle':'ga', \
- 'gla':'gd', 'glg':'gl', 'grn':'gn', 'guj':'gu', 'glv':'gv', 'hau':'ha', \
- 'heb':'he', 'hin':'hi', 'hmo':'ho', 'hrv':'hr', 'hat':'ht', 'hun':'hu', \
- 'hye':'hy', 'her':'hz', 'ina':'ia', 'ind':'id', 'ile':'ie', 'ibo':'ig', \
- 'iii':'ii', 'ipk':'ik', 'ido':'io', 'isl':'is', 'ita':'it', 'iku':'iu', \
- 'jpn':'ja', 'jav':'jv', 'kat':'ka', 'kon':'kg', 'kik':'ki', 'kua':'kj', \
- 'kaz':'kk', 'kal':'kl', 'khm':'km', 'kan':'kn', 'kor':'ko', 'kau':'kr', \
- 'kas':'ks', 'kur':'ku', 'kom':'kv', 'cor':'kw', 'kir':'ky', 'lat':'la', \
- 'ltz':'lb', 'lug':'lg', 'lim':'li', 'lin':'ln', 'lao':'lo', 'lit':'lt', \
- 'lub':'lu', 'lav':'lv', 'mlg':'mg', 'mah':'mh', 'mri':'mi', 'mkd':'mk', \
- 'mal':'ml', 'mon':'mn', 'mar':'mr', 'msa':'ms', 'mlt':'mt', 'mya':'my', \
- 'nau':'na', 'nob':'nb', 'nde':'nd', 'nep':'ne', 'ndo':'ng', 'nld':'nl', \
- 'nno':'nn', 'nor':'no', 'nbl':'nr', 'nav':'nv', 'nya':'ny', 'oci':'oc', \
- 'oji':'oj', 'orm':'om', 'ori':'or', 'oss':'os', 'pan':'pa', 'pli':'pi', \
- 'pol':'pl', 'pus':'ps', 'por':'pt', 'que':'qu', 'roh':'rm', 'run':'rn', \
- 'ron':'ro', 'rus':'ru', 'kin':'rw', 'san':'sa', 'srd':'sc', 'snd':'sd', \
- 'sme':'se', 'sag':'sg', 'sin':'si', 'slk':'sk', 'slv':'sl', 'smo':'sm', \
- 'sna':'sn', 'som':'so', 'sqi':'sq', 'srp':'sr', 'ssw':'ss', 'sot':'st', \
- 'sun':'su', 'swe':'sv', 'swa':'sw', 'tam':'ta', 'tel':'te', 'tgk':'tg', \
- 'tha':'th', 'tir':'ti', 'tuk':'tk', 'tgl':'tl', 'tsn':'tn', 'ton':'to', \
- 'tur':'tr', 'tso':'ts', 'tat':'tt', 'twi':'tw', 'tah':'ty', 'uig':'ug', \
- 'ukr':'uk', 'urd':'ur', 'uzb':'uz', 'ven':'ve', 'vie':'vi', 'vol':'vo', \
- 'wln':'wa', 'wol':'wo', 'xho':'xh', 'yid':'yi', 'yor':'yo', 'zha':'za', \
- 'zho':'zh', 'zul':'zu'}
-
-## Convert a python unicode string to a normal string
-#
-# Convert a python unicode string to a normal string
-# UniToStr(u'I am a string') is 'I am a string'
-#
-# @param Uni: The python unicode string
-#
-# @retval: The formatted normal string
-#
-def UniToStr(Uni):
- return repr(Uni)[2:-1]
-
-## Convert a unicode string to a Hex list
-#
-# Convert a unicode string to a Hex list
-# UniToHexList('ABC') is ['0x41', '0x00', '0x42', '0x00', '0x43', '0x00']
-#
-# @param Uni: The python unicode string
-#
-# @retval List: The formatted hex list
-#
-def UniToHexList(Uni):
- List = []
- for Item in Uni:
- Temp = '%04X' % ord(Item)
- List.append('0x' + Temp[2:4])
- List.append('0x' + Temp[0:2])
- return List
-
-## Convert special unicode characters
-#
-# Convert special characters to (c), (r) and (tm).
-#
-# @param Uni: The python unicode string
-#
-# @retval NewUni: The converted unicode string
-#
-def ConvertSpecialUnicodes(Uni):
- NewUni = Uni
- NewUni = NewUni.replace(u'\u00A9', '(c)')
- NewUni = NewUni.replace(u'\u00AE', '(r)')
- NewUni = NewUni.replace(u'\u2122', '(tm)')
- return NewUni
-
-## GetLanguageCode1766
-#
-# Check the language code read from .UNI file and convert RFC 4646 codes to RFC 1766 codes
-# RFC 1766 language codes supported in compatiblity mode
-# RFC 4646 language codes supported in native mode
-#
-# @param LangName: Language codes read from .UNI file
-#
-# @retval LangName: Valid lanugage code in RFC 1766 format or None
-#
-def GetLanguageCode1766(LangName, File=None):
- return LangName
-
- length = len(LangName)
- if length == 2:
- if LangName.isalpha():
- for Key in gLANG_CONV_TABLE.keys():
- if gLANG_CONV_TABLE.get(Key) == LangName.lower():
- return Key
- elif length == 3:
- if LangName.isalpha() and gLANG_CONV_TABLE.get(LangName.lower()):
- return LangName
- else:
- EdkLogger.Error("Unicode File Parser",
- ToolError.FORMAT_INVALID,
- "Invalid RFC 1766 language code : %s" % LangName,
- File)
- elif length == 5:
- if LangName[0:2].isalpha() and LangName[2] == '-':
- for Key in gLANG_CONV_TABLE.keys():
- if gLANG_CONV_TABLE.get(Key) == LangName[0:2].lower():
- return Key
- elif length >= 6:
- if LangName[0:2].isalpha() and LangName[2] == '-':
- for Key in gLANG_CONV_TABLE.keys():
- if gLANG_CONV_TABLE.get(Key) == LangName[0:2].lower():
- return Key
- if LangName[0:3].isalpha() and gLANG_CONV_TABLE.get(LangName.lower()) == None and LangName[3] == '-':
- for Key in gLANG_CONV_TABLE.keys():
- if Key == LangName[0:3].lower():
- return Key
-
- EdkLogger.Error("Unicode File Parser",
- ToolError.FORMAT_INVALID,
- "Invalid RFC 4646 language code : %s" % LangName,
- File)
-
-## GetLanguageCode
-#
-# Check the language code read from .UNI file and convert RFC 1766 codes to RFC 4646 codes if appropriate
-# RFC 1766 language codes supported in compatiblity mode
-# RFC 4646 language codes supported in native mode
-#
-# @param LangName: Language codes read from .UNI file
-#
-# @retval LangName: Valid lanugage code in RFC 4646 format or None
-#
-def GetLanguageCode(LangName, IsCompatibleMode, File):
- length = len(LangName)
- if IsCompatibleMode:
- if length == 3 and LangName.isalpha():
- TempLangName = gLANG_CONV_TABLE.get(LangName.lower())
- if TempLangName != None:
- return TempLangName
- return LangName
- else:
- EdkLogger.Error("Unicode File Parser",
- ToolError.FORMAT_INVALID,
- "Invalid RFC 1766 language code : %s" % LangName,
- File)
- if (LangName[0] == 'X' or LangName[0] == 'x') and LangName[1] == '-':
- return LangName
- if length == 2:
- if LangName.isalpha():
- return LangName
- elif length == 3:
- if LangName.isalpha() and gLANG_CONV_TABLE.get(LangName.lower()) == None:
- return LangName
- elif length == 5:
- if LangName[0:2].isalpha() and LangName[2] == '-':
- return LangName
- elif length >= 6:
- if LangName[0:2].isalpha() and LangName[2] == '-':
- return LangName
- if LangName[0:3].isalpha() and gLANG_CONV_TABLE.get(LangName.lower()) == None and LangName[3] == '-':
- return LangName
-
- EdkLogger.Error("Unicode File Parser",
- ToolError.FORMAT_INVALID,
- "Invalid RFC 4646 language code : %s" % LangName,
- File)
-
-## FormatUniEntry
-#
-# Formated the entry in Uni file.
-#
-# @param StrTokenName StrTokenName.
-# @param TokenValueList A list need to be processed.
-# @param ContainerFile ContainerFile.
-#
-# @return formated entry
-def FormatUniEntry(StrTokenName, TokenValueList, ContainerFile):
- SubContent = ''
- PreFormatLength = 40
- if len(StrTokenName) > PreFormatLength:
- PreFormatLength = len(StrTokenName) + 1
- for (Lang, Value) in TokenValueList:
- if not Value or Lang == DT.TAB_LANGUAGE_EN_X:
- continue
- if Lang == '':
- Lang = DT.TAB_LANGUAGE_EN_US
- if Lang == 'eng':
- Lang = DT.TAB_LANGUAGE_EN_US
- elif len(Lang.split('-')[0]) == 3:
- Lang = GetLanguageCode(Lang.split('-')[0], True, ContainerFile)
- else:
- Lang = GetLanguageCode(Lang, False, ContainerFile)
- ValueList = Value.split('\n')
- SubValueContent = ''
- for SubValue in ValueList:
- if SubValue.strip():
- SubValueContent += \
- ' ' * (PreFormatLength + len('#language en-US ')) + '\"%s\\n\"' % SubValue.strip() + '\r\n'
- SubValueContent = SubValueContent[(PreFormatLength + len('#language en-US ')):SubValueContent.rfind('\\n')] \
- + '\"' + '\r\n'
- SubContent += ' '*PreFormatLength + '#language %-5s ' % Lang + SubValueContent
- if SubContent:
- SubContent = StrTokenName + ' '*(PreFormatLength - len(StrTokenName)) + SubContent[PreFormatLength:]
- return SubContent
-
-
-## StringDefClassObject
-#
-# A structure for language definition
-#
-class StringDefClassObject(object):
- def __init__(self, Name = None, Value = None, Referenced = False, Token = None, UseOtherLangDef = ''):
- self.StringName = ''
- self.StringNameByteList = []
- self.StringValue = ''
- self.StringValueByteList = ''
- self.Token = 0
- self.Referenced = Referenced
- self.UseOtherLangDef = UseOtherLangDef
- self.Length = 0
-
- if Name != None:
- self.StringName = Name
- self.StringNameByteList = UniToHexList(Name)
- if Value != None:
- self.StringValue = Value
- self.StringValueByteList = UniToHexList(self.StringValue)
- self.Length = len(self.StringValueByteList)
- if Token != None:
- self.Token = Token
-
- def __str__(self):
- return repr(self.StringName) + ' ' + \
- repr(self.Token) + ' ' + \
- repr(self.Referenced) + ' ' + \
- repr(self.StringValue) + ' ' + \
- repr(self.UseOtherLangDef)
-
- def UpdateValue(self, Value = None):
- if Value != None:
- if self.StringValue:
- self.StringValue = self.StringValue + '\r\n' + Value
- else:
- self.StringValue = Value
- self.StringValueByteList = UniToHexList(self.StringValue)
- self.Length = len(self.StringValueByteList)
-
-## UniFileClassObject
-#
-# A structure for .uni file definition
-#
-class UniFileClassObject(object):
- def __init__(self, FileList = None, IsCompatibleMode = False, IncludePathList = None):
- self.FileList = FileList
- self.File = None
- self.IncFileList = FileList
- self.UniFileHeader = ''
- self.Token = 2
- self.LanguageDef = [] #[ [u'LanguageIdentifier', u'PrintableName'], ... ]
- self.OrderedStringList = {} #{ u'LanguageIdentifier' : [StringDefClassObject] }
- self.OrderedStringDict = {} #{ u'LanguageIdentifier' : {StringName:(IndexInList)} }
- self.OrderedStringListByToken = {} #{ u'LanguageIdentifier' : {Token: StringDefClassObject} }
- self.IsCompatibleMode = IsCompatibleMode
- if not IncludePathList:
- self.IncludePathList = []
- else:
- self.IncludePathList = IncludePathList
- if len(self.FileList) > 0:
- self.LoadUniFiles(FileList)
-
- #
- # Get Language definition
- #
- def GetLangDef(self, File, Line):
- Lang = distutils.util.split_quoted((Line.split(u"//")[0]))
- if len(Lang) != 3:
- try:
- FileIn = codecs.open(File.Path, mode='rb', encoding='utf_8').readlines()
- except UnicodeError, Xstr:
- FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16').readlines()
- except UnicodeError, Xstr:
- FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16_le').readlines()
- except:
- EdkLogger.Error("Unicode File Parser",
- ToolError.FILE_OPEN_FAILURE,
- "File read failure: %s" % str(Xstr),
- ExtraData=File)
- LineNo = GetLineNo(FileIn, Line, False)
- EdkLogger.Error("Unicode File Parser",
- ToolError.PARSER_ERROR,
- "Wrong language definition",
- ExtraData="""%s\n\t*Correct format is like '#langdef en-US "English"'""" % Line,
- File = File, Line = LineNo)
- else:
- LangName = GetLanguageCode(Lang[1], self.IsCompatibleMode, self.File)
- LangPrintName = Lang[2]
-
- IsLangInDef = False
- for Item in self.LanguageDef:
- if Item[0] == LangName:
- IsLangInDef = True
- break
-
- if not IsLangInDef:
- self.LanguageDef.append([LangName, LangPrintName])
-
- #
- # Add language string
- #
- self.AddStringToList(u'$LANGUAGE_NAME', LangName, LangName, 0, True, Index=0)
- self.AddStringToList(u'$PRINTABLE_LANGUAGE_NAME', LangName, LangPrintName, 1, True, Index=1)
-
- if not IsLangInDef:
- #
- # The found STRING tokens will be added into new language string list
- # so that the unique STRING identifier is reserved for all languages in the package list.
- #
- FirstLangName = self.LanguageDef[0][0]
- if LangName != FirstLangName:
- for Index in range (2, len (self.OrderedStringList[FirstLangName])):
- Item = self.OrderedStringList[FirstLangName][Index]
- if Item.UseOtherLangDef != '':
- OtherLang = Item.UseOtherLangDef
- else:
- OtherLang = FirstLangName
- self.OrderedStringList[LangName].append (StringDefClassObject(Item.StringName,
- '',
- Item.Referenced,
- Item.Token,
- OtherLang))
- self.OrderedStringDict[LangName][Item.StringName] = len(self.OrderedStringList[LangName]) - 1
- return True
-
- #
- # Get String name and value
- #
- def GetStringObject(self, Item):
- Language = ''
- Value = ''
-
- Name = Item.split()[1]
- # Check the string name is the upper character
- if Name != '':
- MatchString = re.match('[A-Z0-9_]+', Name, re.UNICODE)
- if MatchString == None or MatchString.end(0) != len(Name):
- EdkLogger.Error("Unicode File Parser",
- ToolError.FORMAT_INVALID,
- 'The string token name %s in UNI file %s must be upper case character.' %(Name, self.File))
- LanguageList = Item.split(u'#language ')
- for IndexI in range(len(LanguageList)):
- if IndexI == 0:
- continue
- else:
- Language = LanguageList[IndexI].split()[0]
- #.replace(u'\r\n', u'')
- Value = \
- LanguageList[IndexI][LanguageList[IndexI].find(u'\"') + len(u'\"') : LanguageList[IndexI].rfind(u'\"')]
- Language = GetLanguageCode(Language, self.IsCompatibleMode, self.File)
- self.AddStringToList(Name, Language, Value)
-
- #
- # Get include file list and load them
- #
- def GetIncludeFile(self, Item, Dir = None):
- if Dir:
- pass
- FileName = Item[Item.find(u'!include ') + len(u'!include ') :Item.find(u' ', len(u'!include '))][1:-1]
- self.LoadUniFile(FileName)
-
- #
- # Pre-process before parse .uni file
- #
- def PreProcess(self, File, IsIncludeFile=False):
- if not os.path.exists(File.Path) or not os.path.isfile(File.Path):
- EdkLogger.Error("Unicode File Parser",
- ToolError.FILE_NOT_FOUND,
- ExtraData=File.Path)
-
- #
- # Check file header of the Uni file
- #
-# if not CheckUTF16FileHeader(File.Path):
-# EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
-# ExtraData='The file %s is either invalid UTF-16LE or it is missing the BOM.' % File.Path)
-
- try:
- FileIn = codecs.open(File.Path, mode='rb', encoding='utf_8').readlines()
- except UnicodeError, Xstr:
- FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16').readlines()
- except UnicodeError:
- FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16_le').readlines()
- except:
- EdkLogger.Error("Unicode File Parser", ToolError.FILE_OPEN_FAILURE, ExtraData=File.Path)
-
-
- #
- # get the file header
- #
- Lines = []
- HeaderStart = False
- HeaderEnd = False
- if not self.UniFileHeader:
- FirstGenHeader = True
- else:
- FirstGenHeader = False
- for Line in FileIn:
- Line = Line.strip()
- if Line == u'':
- continue
- if Line.startswith(DT.TAB_COMMENT_EDK1_SPLIT) and (Line.find(DT.TAB_HEADER_COMMENT) > -1) \
- and not HeaderEnd and not HeaderStart:
- HeaderStart = True
- if not Line.startswith(DT.TAB_COMMENT_EDK1_SPLIT) and HeaderStart and not HeaderEnd:
- HeaderEnd = True
- if Line.startswith(DT.TAB_COMMENT_EDK1_SPLIT) and HeaderStart and not HeaderEnd and FirstGenHeader:
- self.UniFileHeader += Line + '\r\n'
- continue
-
- #
- # Use unique identifier
- #
- FindFlag = -1
- LineCount = 0
- MultiLineFeedExits = False
- #
- # 0: initial value
- # 1: signle String entry exist
- # 2: line feed exist under the some signle String entry
- #
- StringEntryExistsFlag = 0
- for Line in FileIn:
- Line = FileIn[LineCount]
- LineCount += 1
- Line = Line.strip()
- #
- # Ignore comment line and empty line
- #
- if Line == u'' or Line.startswith(u'//'):
- #
- # Change the single line String entry flag status
- #
- if StringEntryExistsFlag == 1:
- StringEntryExistsFlag = 2
- #
- # If the '#string' line and the '#language' line are not in the same line,
- # there should be only one line feed character betwwen them
- #
- if MultiLineFeedExits:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
- continue
-
- MultiLineFeedExits = False
- #
- # Process comment embeded in string define lines
- #
- FindFlag = Line.find(u'//')
- if FindFlag != -1 and Line.find(u'//') < Line.find(u'"'):
- Line = Line.replace(Line[FindFlag:], u' ')
- if FileIn[LineCount].strip().startswith('#language'):
- Line = Line + FileIn[LineCount]
- FileIn[LineCount-1] = Line
- FileIn[LineCount] = '\r\n'
- LineCount -= 1
- for Index in xrange (LineCount + 1, len (FileIn) - 1):
- if (Index == len(FileIn) -1):
- FileIn[Index] = '\r\n'
- else:
- FileIn[Index] = FileIn[Index + 1]
- continue
- CommIndex = GetCharIndexOutStr(u'/', Line)
- if CommIndex > -1:
- if (len(Line) - 1) > CommIndex:
- if Line[CommIndex+1] == u'/':
- Line = Line[:CommIndex].strip()
- else:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
- else:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
-
- Line = Line.replace(UNICODE_WIDE_CHAR, WIDE_CHAR)
- Line = Line.replace(UNICODE_NARROW_CHAR, NARROW_CHAR)
- Line = Line.replace(UNICODE_NON_BREAKING_CHAR, NON_BREAKING_CHAR)
-
- Line = Line.replace(u'\\\\', u'\u0006')
- Line = Line.replace(u'\\r\\n', CR + LF)
- Line = Line.replace(u'\\n', CR + LF)
- Line = Line.replace(u'\\r', CR)
- Line = Line.replace(u'\\t', u'\t')
- Line = Line.replace(u'''\"''', u'''"''')
- Line = Line.replace(u'\t', u' ')
- Line = Line.replace(u'\u0006', u'\\')
-
- # IncList = gINCLUDE_PATTERN.findall(Line)
- IncList = []
- if len(IncList) == 1:
- for Dir in [File.Dir] + self.IncludePathList:
- IncFile = PathClass(str(IncList[0]), Dir)
- self.IncFileList.append(IncFile)
- if os.path.isfile(IncFile.Path):
- Lines.extend(self.PreProcess(IncFile, True))
- break
- else:
- EdkLogger.Error("Unicode File Parser",
- ToolError.FILE_NOT_FOUND,
- Message="Cannot find include file",
- ExtraData=str(IncList[0]))
- continue
-
- #
- # Check if single line has correct '"'
- #
- if Line.startswith(u'#string') and Line.find(u'#language') > -1 and Line.find('"') > Line.find(u'#language'):
- if not Line.endswith('"'):
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
- ExtraData='''The line %s misses '"' at the end of it in file %s'''
- % (LineCount, File.Path))
-
- #
- # Between Name entry and Language entry can not contain line feed
- #
- if Line.startswith(u'#string') and Line.find(u'#language') == -1:
- MultiLineFeedExits = True
-
- if Line.startswith(u'#string') and Line.find(u'#language') > 0 and Line.find(u'"') < 0:
- MultiLineFeedExits = True
-
- #
- # Between Language entry and String entry can not contain line feed
- #
- if Line.startswith(u'#language') and len(Line.split()) == 2:
- MultiLineFeedExits = True
-
- #
- # Between two String entry, can not contain line feed
- #
- if Line.startswith(u'"'):
- if StringEntryExistsFlag == 2:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
- Message=ST.ERR_UNIPARSE_LINEFEED_UP_EXIST % Line, ExtraData=File.Path)
-
- StringEntryExistsFlag = 1
- if not Line.endswith('"'):
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
- ExtraData='''The line %s misses '"' at the end of it in file %s'''
- % (LineCount, File.Path))
- elif Line.startswith(u'#language'):
- if StringEntryExistsFlag == 2:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
- Message=ST.ERR_UNI_MISS_STRING_ENTRY % Line, ExtraData=File.Path)
- StringEntryExistsFlag = 0
- else:
- StringEntryExistsFlag = 0
-
- Lines.append(Line)
-
- #
- # Convert string def format as below
- #
- # #string MY_STRING_1
- # #language eng
- # "My first English string line 1"
- # "My first English string line 2"
- # #string MY_STRING_1
- # #language spa
- # "Mi segunda secuencia 1"
- # "Mi segunda secuencia 2"
- #
-
- if not IsIncludeFile and not Lines:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
- Message=ST.ERR_UNIPARSE_NO_SECTION_EXIST, \
- ExtraData=File.Path)
-
- NewLines = []
- StrName = u''
- ExistStrNameList = []
- for Line in Lines:
- if StrName and not StrName.split()[1].startswith(DT.TAB_STR_TOKENCNAME + DT.TAB_UNDERLINE_SPLIT):
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
- Message=ST.ERR_UNIPARSE_STRNAME_FORMAT_ERROR % StrName.split()[1], \
- ExtraData=File.Path)
-
- if StrName and len(StrName.split()[1].split(DT.TAB_UNDERLINE_SPLIT)) == 4:
- StringTokenList = StrName.split()[1].split(DT.TAB_UNDERLINE_SPLIT)
- if (StringTokenList[3].upper() in [DT.TAB_STR_TOKENPROMPT, DT.TAB_STR_TOKENHELP] and \
- StringTokenList[3] not in [DT.TAB_STR_TOKENPROMPT, DT.TAB_STR_TOKENHELP]) or \
- (StringTokenList[2].upper() == DT.TAB_STR_TOKENERR and StringTokenList[2] != DT.TAB_STR_TOKENERR):
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
- Message=ST.ERR_UNIPARSE_STRTOKEN_FORMAT_ERROR % StrName.split()[1], \
- ExtraData=File.Path)
-
- if Line.count(u'#language') > 1:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
- Message=ST.ERR_UNIPARSE_SEP_LANGENTRY_LINE % Line, \
- ExtraData=File.Path)
-
- if Line.startswith(u'//'):
- continue
- elif Line.startswith(u'#langdef'):
- if len(Line.split()) == 2:
- NewLines.append(Line)
- continue
- elif len(Line.split()) > 2 and Line.find(u'"') > 0:
- NewLines.append(Line[:Line.find(u'"')].strip())
- NewLines.append(Line[Line.find(u'"'):])
- else:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
- elif Line.startswith(u'#string'):
- if len(Line.split()) == 2:
- StrName = Line
- if StrName:
- if StrName.split()[1] not in ExistStrNameList:
- ExistStrNameList.append(StrName.split()[1].strip())
- elif StrName.split()[1] in [DT.TAB_INF_ABSTRACT, DT.TAB_INF_DESCRIPTION, \
- DT.TAB_INF_BINARY_ABSTRACT, DT.TAB_INF_BINARY_DESCRIPTION, \
- DT.TAB_DEC_PACKAGE_ABSTRACT, DT.TAB_DEC_PACKAGE_DESCRIPTION, \
- DT.TAB_DEC_BINARY_ABSTRACT, DT.TAB_DEC_BINARY_DESCRIPTION]:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
- Message=ST.ERR_UNIPARSE_MULTI_ENTRY_EXIST % StrName.split()[1], \
- ExtraData=File.Path)
- continue
- elif len(Line.split()) == 4 and Line.find(u'#language') > 0:
- if Line[Line.find(u'#language')-1] != ' ' or \
- Line[Line.find(u'#language')+len(u'#language')] != u' ':
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
-
- if Line.find(u'"') > 0:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
-
- StrName = Line.split()[0] + u' ' + Line.split()[1]
- if StrName:
- if StrName.split()[1] not in ExistStrNameList:
- ExistStrNameList.append(StrName.split()[1].strip())
- elif StrName.split()[1] in [DT.TAB_INF_ABSTRACT, DT.TAB_INF_DESCRIPTION, \
- DT.TAB_INF_BINARY_ABSTRACT, DT.TAB_INF_BINARY_DESCRIPTION, \
- DT.TAB_DEC_PACKAGE_ABSTRACT, DT.TAB_DEC_PACKAGE_DESCRIPTION, \
- DT.TAB_DEC_BINARY_ABSTRACT, DT.TAB_DEC_BINARY_DESCRIPTION]:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
- Message=ST.ERR_UNIPARSE_MULTI_ENTRY_EXIST % StrName.split()[1], \
- ExtraData=File.Path)
- if IsIncludeFile:
- if StrName not in NewLines:
- NewLines.append((Line[:Line.find(u'#language')]).strip())
- else:
- NewLines.append((Line[:Line.find(u'#language')]).strip())
- NewLines.append((Line[Line.find(u'#language'):]).strip())
- elif len(Line.split()) > 4 and Line.find(u'#language') > 0 and Line.find(u'"') > 0:
- if Line[Line.find(u'#language')-1] != u' ' or \
- Line[Line.find(u'#language')+len(u'#language')] != u' ':
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
-
- if Line[Line.find(u'"')-1] != u' ':
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
-
- StrName = Line.split()[0] + u' ' + Line.split()[1]
- if StrName:
- if StrName.split()[1] not in ExistStrNameList:
- ExistStrNameList.append(StrName.split()[1].strip())
- elif StrName.split()[1] in [DT.TAB_INF_ABSTRACT, DT.TAB_INF_DESCRIPTION, \
- DT.TAB_INF_BINARY_ABSTRACT, DT.TAB_INF_BINARY_DESCRIPTION, \
- DT.TAB_DEC_PACKAGE_ABSTRACT, DT.TAB_DEC_PACKAGE_DESCRIPTION, \
- DT.TAB_DEC_BINARY_ABSTRACT, DT.TAB_DEC_BINARY_DESCRIPTION]:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
- Message=ST.ERR_UNIPARSE_MULTI_ENTRY_EXIST % StrName.split()[1], \
- ExtraData=File.Path)
- if IsIncludeFile:
- if StrName not in NewLines:
- NewLines.append((Line[:Line.find(u'#language')]).strip())
- else:
- NewLines.append((Line[:Line.find(u'#language')]).strip())
- NewLines.append((Line[Line.find(u'#language'):Line.find(u'"')]).strip())
- NewLines.append((Line[Line.find(u'"'):]).strip())
- else:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
- elif Line.startswith(u'#language'):
- if len(Line.split()) == 2:
- if IsIncludeFile:
- if StrName not in NewLines:
- NewLines.append(StrName)
- else:
- NewLines.append(StrName)
- NewLines.append(Line)
- elif len(Line.split()) > 2 and Line.find(u'"') > 0:
- if IsIncludeFile:
- if StrName not in NewLines:
- NewLines.append(StrName)
- else:
- NewLines.append(StrName)
- NewLines.append((Line[:Line.find(u'"')]).strip())
- NewLines.append((Line[Line.find(u'"'):]).strip())
- else:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
- elif Line.startswith(u'"'):
- if u'#string' in Line or u'#language' in Line:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
- NewLines.append(Line)
- else:
- print Line
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
-
- if StrName and not StrName.split()[1].startswith(u'STR_'):
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
- Message=ST.ERR_UNIPARSE_STRNAME_FORMAT_ERROR % StrName.split()[1], \
- ExtraData=File.Path)
-
- if StrName and not NewLines:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
- Message=ST.ERR_UNI_MISS_LANGENTRY % StrName, \
- ExtraData=File.Path)
-
- #
- # Check Abstract, Description, BinaryAbstract and BinaryDescription order,
- # should be Abstract, Description, BinaryAbstract, BinaryDesctiption
- AbstractPosition = -1
- DescriptionPosition = -1
- BinaryAbstractPosition = -1
- BinaryDescriptionPosition = -1
- for StrName in ExistStrNameList:
- if DT.TAB_HEADER_ABSTRACT.upper() in StrName:
- if 'BINARY' in StrName:
- BinaryAbstractPosition = ExistStrNameList.index(StrName)
- else:
- AbstractPosition = ExistStrNameList.index(StrName)
- if DT.TAB_HEADER_DESCRIPTION.upper() in StrName:
- if 'BINARY' in StrName:
- BinaryDescriptionPosition = ExistStrNameList.index(StrName)
- else:
- DescriptionPosition = ExistStrNameList.index(StrName)
-
- OrderList = sorted([AbstractPosition, DescriptionPosition])
- BinaryOrderList = sorted([BinaryAbstractPosition, BinaryDescriptionPosition])
- Min = OrderList[0]
- Max = OrderList[1]
- BinaryMin = BinaryOrderList[0]
- BinaryMax = BinaryOrderList[1]
- if BinaryDescriptionPosition > -1:
- if not(BinaryDescriptionPosition == BinaryMax and BinaryAbstractPosition == BinaryMin and \
- BinaryMax > Max):
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
- Message=ST.ERR_UNIPARSE_ENTRY_ORDER_WRONG, \
- ExtraData=File.Path)
- elif BinaryAbstractPosition > -1:
- if not(BinaryAbstractPosition > Max):
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
- Message=ST.ERR_UNIPARSE_ENTRY_ORDER_WRONG, \
- ExtraData=File.Path)
-
- if DescriptionPosition > -1:
- if not(DescriptionPosition == Max and AbstractPosition == Min and \
- DescriptionPosition > AbstractPosition):
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
- Message=ST.ERR_UNIPARSE_ENTRY_ORDER_WRONG, \
- ExtraData=File.Path)
-
- if not self.UniFileHeader:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
- Message = ST.ERR_NO_SOURCE_HEADER,
- ExtraData=File.Path)
-
- return NewLines
-
- #
- # Load a .uni file
- #
- def LoadUniFile(self, File = None):
- if File == None:
- EdkLogger.Error("Unicode File Parser",
- ToolError.PARSER_ERROR,
- Message='No unicode file is given',
- ExtraData=File.Path)
-
- self.File = File
-
- #
- # Process special char in file
- #
- Lines = self.PreProcess(File)
-
- #
- # Get Unicode Information
- #
- for IndexI in range(len(Lines)):
- Line = Lines[IndexI]
- if (IndexI + 1) < len(Lines):
- SecondLine = Lines[IndexI + 1]
- if (IndexI + 2) < len(Lines):
- ThirdLine = Lines[IndexI + 2]
-
- #
- # Get Language def information
- #
- if Line.find(u'#langdef ') >= 0:
- self.GetLangDef(File, Line + u' ' + SecondLine)
- continue
-
- Name = ''
- Language = ''
- Value = ''
- CombineToken = False
- #
- # Get string def information format as below
- #
- # #string MY_STRING_1
- # #language eng
- # "My first English string line 1"
- # "My first English string line 2"
- # #string MY_STRING_1
- # #language spa
- # "Mi segunda secuencia 1"
- # "Mi segunda secuencia 2"
- #
- if Line.find(u'#string ') >= 0 and Line.find(u'#language ') < 0 and \
- SecondLine.find(u'#string ') < 0 and SecondLine.find(u'#language ') >= 0 and \
- ThirdLine.find(u'#string ') < 0 and ThirdLine.find(u'#language ') < 0:
- if Line.find('"') > 0 or SecondLine.find('"') > 0:
- EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
- Message=ST.ERR_UNIPARSE_DBLQUOTE_UNMATCHED,
- ExtraData=File.Path)
-
- Name = Line[Line.find(u'#string ') + len(u'#string ') : ].strip(' ')
- Language = SecondLine[SecondLine.find(u'#language ') + len(u'#language ') : ].strip(' ')
- for IndexJ in range(IndexI + 2, len(Lines)):
- if Lines[IndexJ].find(u'#string ') < 0 and Lines[IndexJ].find(u'#language ') < 0 and \
- Lines[IndexJ].strip().startswith(u'"') and Lines[IndexJ].strip().endswith(u'"'):
- if Lines[IndexJ][-2] == ' ':
- CombineToken = True
- if CombineToken:
- if Lines[IndexJ].strip()[1:-1].strip():
- Value = Value + Lines[IndexJ].strip()[1:-1].rstrip() + ' '
- else:
- Value = Value + Lines[IndexJ].strip()[1:-1]
- CombineToken = False
- else:
- Value = Value + Lines[IndexJ].strip()[1:-1] + '\r\n'
- else:
- IndexI = IndexJ
- break
- if Value.endswith('\r\n'):
- Value = Value[: Value.rfind('\r\n')]
- Language = GetLanguageCode(Language, self.IsCompatibleMode, self.File)
- self.AddStringToList(Name, Language, Value)
- continue
-
- #
- # Load multiple .uni files
- #
- def LoadUniFiles(self, FileList):
- if len(FileList) > 0:
- for File in FileList:
- FilePath = File.Path.strip()
- if FilePath.endswith('.uni') or FilePath.endswith('.UNI') or FilePath.endswith('.Uni'):
- self.LoadUniFile(File)
-
- #
- # Add a string to list
- #
- def AddStringToList(self, Name, Language, Value, Token = 0, Referenced = False, UseOtherLangDef = '', Index = -1):
- for LangNameItem in self.LanguageDef:
- if Language == LangNameItem[0]:
- break
-
- if Language not in self.OrderedStringList:
- self.OrderedStringList[Language] = []
- self.OrderedStringDict[Language] = {}
-
- IsAdded = True
- if Name in self.OrderedStringDict[Language]:
- IsAdded = False
- if Value != None:
- ItemIndexInList = self.OrderedStringDict[Language][Name]
- Item = self.OrderedStringList[Language][ItemIndexInList]
- Item.UpdateValue(Value)
- Item.UseOtherLangDef = ''
-
- if IsAdded:
- Token = len(self.OrderedStringList[Language])
- if Index == -1:
- self.OrderedStringList[Language].append(StringDefClassObject(Name,
- Value,
- Referenced,
- Token,
- UseOtherLangDef))
- self.OrderedStringDict[Language][Name] = Token
- for LangName in self.LanguageDef:
- #
- # New STRING token will be added into all language string lists.
- # so that the unique STRING identifier is reserved for all languages in the package list.
- #
- if LangName[0] != Language:
- if UseOtherLangDef != '':
- OtherLangDef = UseOtherLangDef
- else:
- OtherLangDef = Language
- self.OrderedStringList[LangName[0]].append(StringDefClassObject(Name,
- '',
- Referenced,
- Token,
- OtherLangDef))
- self.OrderedStringDict[LangName[0]][Name] = len(self.OrderedStringList[LangName[0]]) - 1
- else:
- self.OrderedStringList[Language].insert(Index, StringDefClassObject(Name,
- Value,
- Referenced,
- Token,
- UseOtherLangDef))
- self.OrderedStringDict[Language][Name] = Index
-
- #
- # Set the string as referenced
- #
- def SetStringReferenced(self, Name):
- #
- # String stoken are added in the same order in all language string lists.
- # So, only update the status of string stoken in first language string list.
- #
- Lang = self.LanguageDef[0][0]
- if Name in self.OrderedStringDict[Lang]:
- ItemIndexInList = self.OrderedStringDict[Lang][Name]
- Item = self.OrderedStringList[Lang][ItemIndexInList]
- Item.Referenced = True
-
- #
- # Search the string in language definition by Name
- #
- def FindStringValue(self, Name, Lang):
- if Name in self.OrderedStringDict[Lang]:
- ItemIndexInList = self.OrderedStringDict[Lang][Name]
- return self.OrderedStringList[Lang][ItemIndexInList]
-
- return None
-
- #
- # Search the string in language definition by Token
- #
- def FindByToken(self, Token, Lang):
- for Item in self.OrderedStringList[Lang]:
- if Item.Token == Token:
- return Item
-
- return None
-
- #
- # Re-order strings and re-generate tokens
- #
- def ReToken(self):
- if len(self.LanguageDef) == 0:
- return None
- #
- # Retoken all language strings according to the status of string stoken in the first language string.
- #
- FirstLangName = self.LanguageDef[0][0]
-
- # Convert the OrderedStringList to be OrderedStringListByToken in order to faciliate future search by token
- for LangNameItem in self.LanguageDef:
- self.OrderedStringListByToken[LangNameItem[0]] = {}
-
- #
- # Use small token for all referred string stoken.
- #
- RefToken = 0
- for Index in range (0, len (self.OrderedStringList[FirstLangName])):
- FirstLangItem = self.OrderedStringList[FirstLangName][Index]
- if FirstLangItem.Referenced == True:
- for LangNameItem in self.LanguageDef:
- LangName = LangNameItem[0]
- OtherLangItem = self.OrderedStringList[LangName][Index]
- OtherLangItem.Referenced = True
- OtherLangItem.Token = RefToken
- self.OrderedStringListByToken[LangName][OtherLangItem.Token] = OtherLangItem
- RefToken = RefToken + 1
-
- #
- # Use big token for all unreferred string stoken.
- #
- UnRefToken = 0
- for Index in range (0, len (self.OrderedStringList[FirstLangName])):
- FirstLangItem = self.OrderedStringList[FirstLangName][Index]
- if FirstLangItem.Referenced == False:
- for LangNameItem in self.LanguageDef:
- LangName = LangNameItem[0]
- OtherLangItem = self.OrderedStringList[LangName][Index]
- OtherLangItem.Token = RefToken + UnRefToken
- self.OrderedStringListByToken[LangName][OtherLangItem.Token] = OtherLangItem
- UnRefToken = UnRefToken + 1
-
- #
- # Show the instance itself
- #
- def ShowMe(self):
- print self.LanguageDef
- #print self.OrderedStringList
- for Item in self.OrderedStringList:
- print Item
- for Member in self.OrderedStringList[Item]:
- print str(Member)
-
- #
- # Read content from '!include' UNI file
- #
- def ReadIncludeUNIfile(self, FilaPath):
- if self.File:
- pass
-
- if not os.path.exists(FilaPath) or not os.path.isfile(FilaPath):
- EdkLogger.Error("Unicode File Parser",
- ToolError.FILE_NOT_FOUND,
- ExtraData=FilaPath)
- try:
- FileIn = codecs.open(FilaPath, mode='rb', encoding='utf_8').readlines()
- except UnicodeError, Xstr:
- FileIn = codecs.open(FilaPath, mode='rb', encoding='utf_16').readlines()
- except UnicodeError:
- FileIn = codecs.open(FilaPath, mode='rb', encoding='utf_16_le').readlines()
- except:
- EdkLogger.Error("Unicode File Parser", ToolError.FILE_OPEN_FAILURE, ExtraData=FilaPath)
- return FileIn
-
diff --git a/BaseTools/Source/Python/UPT/Library/Xml/XmlRoutines.py b/BaseTools/Source/Python/UPT/Library/Xml/XmlRoutines.py
deleted file mode 100644
index d7614b8849..0000000000
--- a/BaseTools/Source/Python/UPT/Library/Xml/XmlRoutines.py
+++ /dev/null
@@ -1,229 +0,0 @@
-## @file
-# This is an XML API that uses a syntax similar to XPath, but it is written in
-# standard python so that no extra python packages are required to use it.
-#
-# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License which accompanies this
-# distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-'''
-XmlRoutines
-'''
-
-##
-# Import Modules
-#
-import xml.dom.minidom
-import re
-import codecs
-from Logger.ToolError import PARSER_ERROR
-import Logger.Log as Logger
-
-## Create a element of XML
-#
-# @param Name
-# @param String
-# @param NodeList
-# @param AttributeList
-#
-def CreateXmlElement(Name, String, NodeList, AttributeList):
- Doc = xml.dom.minidom.Document()
- Element = Doc.createElement(Name)
- if String != '' and String != None:
- Element.appendChild(Doc.createTextNode(String))
-
- for Item in NodeList:
- if type(Item) == type([]):
- Key = Item[0]
- Value = Item[1]
- if Key != '' and Key != None and Value != '' and Value != None:
- Node = Doc.createElement(Key)
- Node.appendChild(Doc.createTextNode(Value))
- Element.appendChild(Node)
- else:
- Element.appendChild(Item)
- for Item in AttributeList:
- Key = Item[0]
- Value = Item[1]
- if Key != '' and Key != None and Value != '' and Value != None:
- Element.setAttribute(Key, Value)
-
- return Element
-
-## Get a list of XML nodes using XPath style syntax.
-#
-# Return a list of XML DOM nodes from the root Dom specified by XPath String.
-# If the input Dom or String is not valid, then an empty list is returned.
-#
-# @param Dom The root XML DOM node.
-# @param String A XPath style path.
-#
-def XmlList(Dom, String):
- if String == None or String == "" or Dom == None or Dom == "":
- return []
- if Dom.nodeType == Dom.DOCUMENT_NODE:
- Dom = Dom.documentElement
- if String[0] == "/":
- String = String[1:]
- TagList = String.split('/')
- Nodes = [Dom]
- Index = 0
- End = len(TagList) - 1
- while Index <= End:
- ChildNodes = []
- for Node in Nodes:
- if Node.nodeType == Node.ELEMENT_NODE and Node.tagName == \
- TagList[Index]:
- if Index < End:
- ChildNodes.extend(Node.childNodes)
- else:
- ChildNodes.append(Node)
- Nodes = ChildNodes
- ChildNodes = []
- Index += 1
-
- return Nodes
-
-
-## Get a single XML node using XPath style syntax.
-#
-# Return a single XML DOM node from the root Dom specified by XPath String.
-# If the input Dom or String is not valid, then an empty string is returned.
-#
-# @param Dom The root XML DOM node.
-# @param String A XPath style path.
-#
-def XmlNode(Dom, String):
- if String == None or String == "" or Dom == None or Dom == "":
- return None
- if Dom.nodeType == Dom.DOCUMENT_NODE:
- Dom = Dom.documentElement
- if String[0] == "/":
- String = String[1:]
- TagList = String.split('/')
- Index = 0
- End = len(TagList) - 1
- ChildNodes = [Dom]
- while Index <= End:
- for Node in ChildNodes:
- if Node.nodeType == Node.ELEMENT_NODE and \
- Node.tagName == TagList[Index]:
- if Index < End:
- ChildNodes = Node.childNodes
- else:
- return Node
- break
- Index += 1
- return None
-
-
-## Get a single XML element using XPath style syntax.
-#
-# Return a single XML element from the root Dom specified by XPath String.
-# If the input Dom or String is not valid, then an empty string is returned.
-#
-# @param Dom The root XML DOM object.
-# @param Strin A XPath style path.
-#
-def XmlElement(Dom, String):
- try:
- return XmlNode(Dom, String).firstChild.data.strip()
- except BaseException:
- return ""
-
-## Get a single XML element using XPath style syntax.
-#
-# Similar with XmlElement, but do not strip all the leading and tailing space
-# and newline, instead just remove the newline and spaces introduced by
-# toprettyxml()
-#
-# @param Dom The root XML DOM object.
-# @param Strin A XPath style path.
-#
-def XmlElement2(Dom, String):
- try:
- HelpStr = XmlNode(Dom, String).firstChild.data
- gRemovePrettyRe = re.compile(r"""(?:(\n *) )(.*)\1""", re.DOTALL)
- HelpStr = re.sub(gRemovePrettyRe, r"\2", HelpStr)
- return HelpStr
- except BaseException:
- return ""
-
-
-## Get a single XML element of the current node.
-#
-# Return a single XML element specified by the current root Dom.
-# If the input Dom is not valid, then an empty string is returned.
-#
-# @param Dom The root XML DOM object.
-#
-def XmlElementData(Dom):
- try:
- return Dom.firstChild.data.strip()
- except BaseException:
- return ""
-
-
-## Get a list of XML elements using XPath style syntax.
-#
-# Return a list of XML elements from the root Dom specified by XPath String.
-# If the input Dom or String is not valid, then an empty list is returned.
-#
-# @param Dom The root XML DOM object.
-# @param String A XPath style path.
-#
-def XmlElementList(Dom, String):
- return map(XmlElementData, XmlList(Dom, String))
-
-
-## Get the XML attribute of the current node.
-#
-# Return a single XML attribute named Attribute from the current root Dom.
-# If the input Dom or Attribute is not valid, then an empty string is returned.
-#
-# @param Dom The root XML DOM object.
-# @param Attribute The name of Attribute.
-#
-def XmlAttribute(Dom, Attribute):
- try:
- return Dom.getAttribute(Attribute)
- except BaseException:
- return ''
-
-
-## Get the XML node name of the current node.
-#
-# Return a single XML node name from the current root Dom.
-# If the input Dom is not valid, then an empty string is returned.
-#
-# @param Dom The root XML DOM object.
-#
-def XmlNodeName(Dom):
- try:
- return Dom.nodeName.strip()
- except BaseException:
- return ''
-
-## Parse an XML file.
-#
-# Parse the input XML file named FileName and return a XML DOM it stands for.
-# If the input File is not a valid XML file, then an empty string is returned.
-#
-# @param FileName The XML file name.
-#
-def XmlParseFile(FileName):
- try:
- XmlFile = codecs.open(FileName, 'rb')
- Dom = xml.dom.minidom.parse(XmlFile)
- XmlFile.close()
- return Dom
- except BaseException, XExcept:
- XmlFile.close()
- Logger.Error('\nUPT', PARSER_ERROR, XExcept, File=FileName, RaiseError=True)
diff --git a/BaseTools/Source/Python/UPT/Library/Xml/__init__.py b/BaseTools/Source/Python/UPT/Library/Xml/__init__.py
deleted file mode 100644
index f09eece5fb..0000000000
--- a/BaseTools/Source/Python/UPT/Library/Xml/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-## @file
-# Python 'Library' package initialization file.
-#
-# This file is required to make Python interpreter treat the directory
-# as containing package.
-#
-# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License which accompanies this
-# distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-'''
-Xml
-''' \ No newline at end of file
diff --git a/BaseTools/Source/Python/UPT/Library/__init__.py b/BaseTools/Source/Python/UPT/Library/__init__.py
deleted file mode 100644
index 6a98cd80a3..0000000000
--- a/BaseTools/Source/Python/UPT/Library/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-## @file
-# Python 'Library' package initialization file.
-#
-# This file is required to make Python interpreter treat the directory
-# as containing package.
-#
-# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License which accompanies this
-# distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-'''
-Library
-''' \ No newline at end of file