summaryrefslogtreecommitdiff
path: root/BaseTools/Source/Python
diff options
context:
space:
mode:
Diffstat (limited to 'BaseTools/Source/Python')
-rw-r--r--BaseTools/Source/Python/AutoGen/AutoGen.py177
-rw-r--r--BaseTools/Source/Python/AutoGen/GenMake.py9
-rw-r--r--BaseTools/Source/Python/AutoGen/StrGather.py32
-rw-r--r--BaseTools/Source/Python/AutoGen/UniClassObject.py44
-rw-r--r--BaseTools/Source/Python/Common/BuildVersion.py2
-rw-r--r--BaseTools/Source/Python/Common/Misc.py21
-rw-r--r--BaseTools/Source/Python/Ecc/c.py122
-rw-r--r--BaseTools/Source/Python/GenFds/FdfParser.py53
-rw-r--r--BaseTools/Source/Python/GenFds/FfsFileStatement.py6
-rw-r--r--BaseTools/Source/Python/GenFds/FfsInfStatement.py6
-rw-r--r--BaseTools/Source/Python/GenFds/Fv.py7
-rw-r--r--BaseTools/Source/Python/GenFds/GenFdsGlobalVariable.py8
-rw-r--r--BaseTools/Source/Python/GenFds/OptRomInfStatement.py4
-rw-r--r--BaseTools/Source/Python/Workspace/MetaFileParser.py35
-rw-r--r--BaseTools/Source/Python/Workspace/WorkspaceDatabase.py17
15 files changed, 401 insertions, 142 deletions
diff --git a/BaseTools/Source/Python/AutoGen/AutoGen.py b/BaseTools/Source/Python/AutoGen/AutoGen.py
index 55a6a81ccc..8150ea0b69 100644
--- a/BaseTools/Source/Python/AutoGen/AutoGen.py
+++ b/BaseTools/Source/Python/AutoGen/AutoGen.py
@@ -155,7 +155,7 @@ class AutoGen(object):
class WorkspaceAutoGen(AutoGen):
## Real constructor of WorkspaceAutoGen
#
- # This method behaves the same as __init__ except that it needs explict invoke
+ # This method behaves the same as __init__ except that it needs explicit invoke
# (in super class's __new__ method)
#
# @param WorkspaceDir Root directory of workspace
@@ -246,6 +246,9 @@ class WorkspaceAutoGen(AutoGen):
#
self._CheckPcdDefineAndType()
+ if self.FdfFile:
+ self._CheckDuplicateInFV(Fdf)
+
self._BuildDir = None
self._FvDir = None
self._MakeFileDir = None
@@ -253,6 +256,130 @@ class WorkspaceAutoGen(AutoGen):
return True
+ ## _CheckDuplicateInFV() method
+ #
+ # Check whether there is duplicate modules/files exist in FV section.
+ # The check base on the file GUID;
+ #
+ def _CheckDuplicateInFV(self, Fdf):
+ for Fv in Fdf.Profile.FvDict:
+ _GuidDict = {}
+ for FfsFile in Fdf.Profile.FvDict[Fv].FfsList:
+ if FfsFile.InfFileName and FfsFile.NameGuid == None:
+ #
+ # Get INF file GUID
+ #
+ InfFoundFlag = False
+ for Pa in self.AutoGenObjectList:
+ for Module in Pa.ModuleAutoGenList:
+ if path.normpath(Module.MetaFile.File) == path.normpath(FfsFile.InfFileName):
+ InfFoundFlag = True
+ if not Module.Guid.upper() in _GuidDict.keys():
+ _GuidDict[Module.Guid.upper()] = FfsFile
+ else:
+ EdkLogger.error("build",
+ FORMAT_INVALID,
+ "Duplicate GUID found for these lines: Line %d: %s and Line %d: %s. GUID: %s"%(FfsFile.CurrentLineNum,
+ FfsFile.CurrentLineContent,
+ _GuidDict[Module.Guid.upper()].CurrentLineNum,
+ _GuidDict[Module.Guid.upper()].CurrentLineContent,
+ Module.Guid.upper()),
+ ExtraData=self.FdfFile)
+ #
+ # Some INF files not have entity in DSC file.
+ #
+ if not InfFoundFlag:
+ if FfsFile.InfFileName.find('$') == -1:
+ InfPath = NormPath(FfsFile.InfFileName)
+ if not os.path.exists(InfPath):
+ EdkLogger.error('build', GENFDS_ERROR, "Non-existant Module %s !" % (FfsFile.InfFileName))
+
+ PathClassObj = PathClass(FfsFile.InfFileName, self.WorkspaceDir)
+ #
+ # Here we just need to get FILE_GUID from INF file, use 'COMMON' as ARCH attribute. and use
+ # BuildObject from one of AutoGenObjectList is enough.
+ #
+ InfObj = self.AutoGenObjectList[0].BuildDatabase.WorkspaceDb.BuildObject[PathClassObj, 'COMMON', self.BuildTarget, self.ToolChain]
+ if not InfObj.Guid.upper() in _GuidDict.keys():
+ _GuidDict[InfObj.Guid.upper()] = FfsFile
+ else:
+ EdkLogger.error("build",
+ FORMAT_INVALID,
+ "Duplicate GUID found for these lines: Line %d: %s and Line %d: %s. GUID: %s"%(FfsFile.CurrentLineNum,
+ FfsFile.CurrentLineContent,
+ _GuidDict[InfObj.Guid.upper()].CurrentLineNum,
+ _GuidDict[InfObj.Guid.upper()].CurrentLineContent,
+ InfObj.Guid.upper()),
+ ExtraData=self.FdfFile)
+ InfFoundFlag = False
+
+ if FfsFile.NameGuid != None:
+ _CheckPCDAsGuidPattern = re.compile("^PCD\(.+\..+\)$")
+
+ #
+ # If the NameGuid reference a PCD name.
+ # The style must match: PCD(xxxx.yyy)
+ #
+ if _CheckPCDAsGuidPattern.match(FfsFile.NameGuid):
+ #
+ # Replace the PCD value.
+ #
+ _PcdName = FfsFile.NameGuid.lstrip("PCD(").rstrip(")")
+ PcdFoundFlag = False
+ for Pa in self.AutoGenObjectList:
+ if not PcdFoundFlag:
+ for PcdItem in Pa.AllPcdList:
+ if (PcdItem.TokenSpaceGuidCName + "." + PcdItem.TokenCName) == _PcdName:
+ #
+ # First convert from CFormatGuid to GUID string
+ #
+ _PcdGuidString = GuidStructureStringToGuidString(PcdItem.DefaultValue)
+
+ if not _PcdGuidString:
+ #
+ # Then try Byte array.
+ #
+ _PcdGuidString = GuidStructureByteArrayToGuidString(PcdItem.DefaultValue)
+
+ if not _PcdGuidString:
+ #
+ # Not Byte array or CFormat GUID, raise error.
+ #
+ EdkLogger.error("build",
+ FORMAT_INVALID,
+ "The format of PCD value is incorrect. PCD: %s , Value: %s\n"%(_PcdName, PcdItem.DefaultValue),
+ ExtraData=self.FdfFile)
+
+ if not _PcdGuidString.upper() in _GuidDict.keys():
+ _GuidDict[_PcdGuidString.upper()] = FfsFile
+ PcdFoundFlag = True
+ break
+ else:
+ EdkLogger.error("build",
+ FORMAT_INVALID,
+ "Duplicate GUID found for these lines: Line %d: %s and Line %d: %s. GUID: %s"%(FfsFile.CurrentLineNum,
+ FfsFile.CurrentLineContent,
+ _GuidDict[_PcdGuidString.upper()].CurrentLineNum,
+ _GuidDict[_PcdGuidString.upper()].CurrentLineContent,
+ FfsFile.NameGuid.upper()),
+ ExtraData=self.FdfFile)
+
+ if not FfsFile.NameGuid.upper() in _GuidDict.keys():
+ _GuidDict[FfsFile.NameGuid.upper()] = FfsFile
+ else:
+ #
+ # Two raw file GUID conflict.
+ #
+ EdkLogger.error("build",
+ FORMAT_INVALID,
+ "Duplicate GUID found for these lines: Line %d: %s and Line %d: %s. GUID: %s"%(FfsFile.CurrentLineNum,
+ FfsFile.CurrentLineContent,
+ _GuidDict[FfsFile.NameGuid.upper()].CurrentLineNum,
+ _GuidDict[FfsFile.NameGuid.upper()].CurrentLineContent,
+ FfsFile.NameGuid.upper()),
+ ExtraData=self.FdfFile)
+
+
def _CheckPcdDefineAndType(self):
PcdTypeList = [
"FixedAtBuild", "PatchableInModule", "FeatureFlag",
@@ -1749,6 +1876,7 @@ class ModuleAutoGen(AutoGen):
self._DepexList = None
self._DepexExpressionList = None
self._BuildOption = None
+ self._BuildOptionIncPathList = None
self._BuildTargets = None
self._IntroBuildTargetList = None
self._FinalBuildTargetList = None
@@ -2004,6 +2132,50 @@ class ModuleAutoGen(AutoGen):
self._BuildOption = self.PlatformInfo.ApplyBuildOption(self.Module)
return self._BuildOption
+ ## Get include path list from tool option for the module build
+ #
+ # @retval list The include path list
+ #
+ def _GetBuildOptionIncPathList(self):
+ if self._BuildOptionIncPathList == None:
+ #
+ # Regular expression for finding Include Directories, the difference between MSFT and INTEL/GCC
+ # is the former use /I , the Latter used -I to specify include directories
+ #
+ if self.PlatformInfo.ToolChainFamily in ('MSFT'):
+ gBuildOptIncludePattern = re.compile(r"(?:.*?)/I[ \t]*([^ ]*)", re.MULTILINE|re.DOTALL)
+ elif self.PlatformInfo.ToolChainFamily in ('INTEL', 'GCC'):
+ gBuildOptIncludePattern = re.compile(r"(?:.*?)-I[ \t]*([^ ]*)", re.MULTILINE|re.DOTALL)
+
+ BuildOptionIncPathList = []
+ for Tool in ('CC', 'PP', 'VFRPP', 'ASLPP', 'ASLCC', 'APP', 'ASM'):
+ Attr = 'FLAGS'
+ try:
+ FlagOption = self.BuildOption[Tool][Attr]
+ except KeyError:
+ FlagOption = ''
+
+ IncPathList = [NormPath(Path, self.Macros) for Path in gBuildOptIncludePattern.findall(FlagOption)]
+ #
+ # EDK II modules must not reference header files outside of the packages they depend on or
+ # within the module's directory tree. Report error if violation.
+ #
+ if self.AutoGenVersion >= 0x00010005 and len(IncPathList) > 0:
+ for Path in IncPathList:
+ if (Path not in self.IncludePathList) and (CommonPath([Path, self.MetaFile.Dir]) != self.MetaFile.Dir):
+ ErrMsg = "The include directory for the EDK II module in this line is invalid %s specified in %s FLAGS '%s'" % (Path, Tool, FlagOption)
+ EdkLogger.error("build",
+ PARAMETER_INVALID,
+ ExtraData = ErrMsg,
+ File = str(self.MetaFile))
+
+
+ BuildOptionIncPathList += IncPathList
+
+ self._BuildOptionIncPathList = BuildOptionIncPathList
+
+ return self._BuildOptionIncPathList
+
## Return a list of files which can be built from source
#
# What kind of files can be built is determined by build rules in
@@ -2256,7 +2428,7 @@ class ModuleAutoGen(AutoGen):
#
def _GetLibraryPcdList(self):
if self._LibraryPcdList == None:
- Pcds = {}
+ Pcds = sdict()
if not self.IsLibrary:
# get PCDs from dependent libraries
for Library in self.DependentLibraryList:
@@ -2584,6 +2756,7 @@ class ModuleAutoGen(AutoGen):
DxsFile = property(_GetDxsFile)
DepexExpressionList = property(_GetDepexExpressionTokenList)
BuildOption = property(_GetModuleBuildOption)
+ BuildOptionIncPathList = property(_GetBuildOptionIncPathList)
BuildCommand = property(_GetBuildCommand)
# This acts like the main() function for the script, unless it is 'import'ed into another script.
diff --git a/BaseTools/Source/Python/AutoGen/GenMake.py b/BaseTools/Source/Python/AutoGen/GenMake.py
index 5bf693c50f..3720c8bfed 100644
--- a/BaseTools/Source/Python/AutoGen/GenMake.py
+++ b/BaseTools/Source/Python/AutoGen/GenMake.py
@@ -614,7 +614,7 @@ cleanlib:
self.FileDependency = self.GetFileDependency(
SourceFileList,
ForceIncludedFile,
- self._AutoGenObject.IncludePathList
+ self._AutoGenObject.IncludePathList + self._AutoGenObject.BuildOptionIncPathList
)
DepSet = None
for File in self.FileDependency:
@@ -1253,7 +1253,7 @@ ${END}\t@cd $(BUILD_DIR)
#
fds: init
\t-@cd $(FV_DIR)
-${BEGIN}\tGenFds -f ${fdf_file} -o $(BUILD_DIR) -t $(TOOLCHAIN) -b $(TARGET) -p ${active_platform} -a ${build_architecture_list} ${extra_options}${END}${BEGIN} -r ${fd} ${END}${BEGIN} -i ${fv} ${END}${BEGIN} -C ${cap} ${END}${BEGIN} -D ${macro} ${END}
+${BEGIN}\tGenFds -f ${fdf_file} -o $(BUILD_DIR) -t $(TOOLCHAIN) -b $(TARGET) -p ${active_platform} -a ${build_architecture_list}${END}${BEGIN}${extra_options}${END}${BEGIN} -r ${fd}${END}${BEGIN} -i ${fv}${END}${BEGIN} -C ${cap}${END}${BEGIN} -D${macro}${END}
#
# run command for emulator platform only
@@ -1335,6 +1335,9 @@ ${END}\t@cd $(BUILD_DIR)\n
if GlobalData.gCaseInsensitive:
ExtraOption += " -c"
+ ExtraOptionList = []
+ if ExtraOption:
+ ExtraOptionList.append(ExtraOption)
MakefileName = self._FILE_NAME_[self._FileType]
SubBuildCommandList = []
@@ -1366,7 +1369,7 @@ ${END}\t@cd $(BUILD_DIR)\n
"fd" : PlatformInfo.FdTargetList,
"fv" : PlatformInfo.FvTargetList,
"cap" : PlatformInfo.CapTargetList,
- "extra_options" : ExtraOption,
+ "extra_options" : ExtraOptionList,
"macro" : MacroList,
}
diff --git a/BaseTools/Source/Python/AutoGen/StrGather.py b/BaseTools/Source/Python/AutoGen/StrGather.py
index 3df493834c..7187f0a440 100644
--- a/BaseTools/Source/Python/AutoGen/StrGather.py
+++ b/BaseTools/Source/Python/AutoGen/StrGather.py
@@ -168,8 +168,9 @@ def CreateHFileContent(BaseName, UniObjectClass, IsCompatibleMode, UniGenCFlag):
Str = WriteLine(Str, Line)
Line = COMMENT_DEFINE_STR + ' ' + PRINTABLE_LANGUAGE_NAME_STRING_NAME + ' ' * (ValueStartPtr - len(DEFINE_STR + PRINTABLE_LANGUAGE_NAME_STRING_NAME)) + DecToHexStr(1, 4) + COMMENT_NOT_REFERENCED
Str = WriteLine(Str, Line)
+ UnusedStr = ''
- #Group the referred STRING token together.
+ #Group the referred/Unused STRING token together.
for Index in range(2, len(UniObjectClass.OrderedStringList[UniObjectClass.LanguageDef[0][0]])):
StringItem = UniObjectClass.OrderedStringList[UniObjectClass.LanguageDef[0][0]][Index]
Name = StringItem.StringName
@@ -183,21 +184,14 @@ def CreateHFileContent(BaseName, UniObjectClass, IsCompatibleMode, UniGenCFlag):
else:
Line = DEFINE_STR + ' ' + Name + ' ' * (ValueStartPtr - len(DEFINE_STR + Name)) + DecToHexStr(Token, 4)
Str = WriteLine(Str, Line)
-
- #Group the unused STRING token together.
- for Index in range(2, len(UniObjectClass.OrderedStringList[UniObjectClass.LanguageDef[0][0]])):
- StringItem = UniObjectClass.OrderedStringList[UniObjectClass.LanguageDef[0][0]][Index]
- Name = StringItem.StringName
- Token = StringItem.Token
- Referenced = StringItem.Referenced
- if Name != None:
- Line = ''
- if Referenced == False:
+ else:
if (ValueStartPtr - len(DEFINE_STR + Name)) <= 0:
Line = COMMENT_DEFINE_STR + ' ' + Name + ' ' + DecToHexStr(Token, 4) + COMMENT_NOT_REFERENCED
else:
Line = COMMENT_DEFINE_STR + ' ' + Name + ' ' * (ValueStartPtr - len(DEFINE_STR + Name)) + DecToHexStr(Token, 4) + COMMENT_NOT_REFERENCED
- Str = WriteLine(Str, Line)
+ UnusedStr = WriteLine(UnusedStr, Line)
+
+ Str = ''.join([Str,UnusedStr])
Str = WriteLine(Str, '')
if IsCompatibleMode or UniGenCFlag:
@@ -383,7 +377,6 @@ def CreateCFileContent(BaseName, UniObjectClass, IsCompatibleMode, UniBinBuffer,
#
for IndexI in range(len(UniObjectClass.LanguageDef)):
Language = UniObjectClass.LanguageDef[IndexI][0]
- LangPrintName = UniObjectClass.LanguageDef[IndexI][1]
if Language not in UniLanguageListFiltered:
continue
@@ -393,12 +386,12 @@ def CreateCFileContent(BaseName, UniObjectClass, IsCompatibleMode, UniBinBuffer,
NumberOfUseOtherLangDef = 0
Index = 0
for IndexJ in range(1, len(UniObjectClass.OrderedStringList[UniObjectClass.LanguageDef[IndexI][0]])):
- Item = UniObjectClass.FindByToken(IndexJ, Language)
+ Item = UniObjectClass.OrderedStringListByToken[Language][IndexJ]
+
Name = Item.StringName
Value = Item.StringValueByteList
Referenced = Item.Referenced
Token = Item.Token
- Length = Item.Length
UseOtherLangDef = Item.UseOtherLangDef
if UseOtherLangDef != '' and Referenced:
@@ -595,10 +588,7 @@ def SearchString(UniObjectClass, FileList, IsCompatibleMode):
# This function is used for UEFI2.1 spec
#
#
-def GetStringFiles(UniFilList, SourceFileList, IncludeList, IncludePathList, SkipList, BaseName, IsCompatibleMode = False, ShellMode = False, UniGenCFlag = True, UniGenBinBuffer = None, FilterInfo = [True, []]):
- Status = True
- ErrorMessage = ''
-
+def GetStringFiles(UniFilList, SourceFileList, IncludeList, IncludePathList, SkipList, BaseName, IsCompatibleMode = False, ShellMode = False, UniGenCFlag = True, UniGenBinBuffer = None, FilterInfo = [True, []]):
if len(UniFilList) > 0:
if ShellMode:
#
@@ -627,13 +617,13 @@ def GetStringFiles(UniFilList, SourceFileList, IncludeList, IncludePathList, Ski
# Write an item
#
def Write(Target, Item):
- return Target + Item
+ return ''.join([Target,Item])
#
# Write an item with a break line
#
def WriteLine(Target, Item):
- return Target + Item + '\n'
+ return ''.join([Target,Item,'\n'])
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
diff --git a/BaseTools/Source/Python/AutoGen/UniClassObject.py b/BaseTools/Source/Python/AutoGen/UniClassObject.py
index 1825c81d91..54751bab4e 100644
--- a/BaseTools/Source/Python/AutoGen/UniClassObject.py
+++ b/BaseTools/Source/Python/AutoGen/UniClassObject.py
@@ -193,6 +193,8 @@ class UniFileClassObject(object):
self.Token = 2
self.LanguageDef = [] #[ [u'LanguageIdentifier', u'PrintableName'], ... ]
self.OrderedStringList = {} #{ u'LanguageIdentifier' : [StringDefClassObject] }
+ self.OrderedStringDict = {} #{ u'LanguageIdentifier' : {StringName:(IndexInList)} }
+ self.OrderedStringListByToken = {} #{ u'LanguageIdentifier' : {Token: StringDefClassObject} }
self.IsCompatibleMode = IsCompatibleMode
self.IncludePathList = IncludePathList
if len(self.FileList) > 0:
@@ -246,14 +248,13 @@ class UniFileClassObject(object):
else:
OtherLang = FirstLangName
self.OrderedStringList[LangName].append (StringDefClassObject(Item.StringName, '', Item.Referenced, Item.Token, OtherLang))
-
+ self.OrderedStringDict[LangName][Item.StringName] = len(self.OrderedStringList[LangName]) - 1
return True
#
# Get String name and value
#
def GetStringObject(self, Item):
- Name = ''
Language = ''
Value = ''
@@ -476,20 +477,22 @@ class UniFileClassObject(object):
if Language not in self.OrderedStringList:
self.OrderedStringList[Language] = []
+ self.OrderedStringDict[Language] = {}
IsAdded = True
- for Item in self.OrderedStringList[Language]:
- if Name == Item.StringName:
- IsAdded = False
- if Value != None:
- Item.UpdateValue(Value)
- Item.UseOtherLangDef = ''
- break
+ if Name in self.OrderedStringDict[Language]:
+ IsAdded = False
+ if Value != None:
+ ItemIndexInList = self.OrderedStringDict[Language][Name]
+ Item = self.OrderedStringList[Language][ItemIndexInList]
+ Item.UpdateValue(Value)
+ Item.UseOtherLangDef = ''
if IsAdded:
Token = len(self.OrderedStringList[Language])
if Index == -1:
self.OrderedStringList[Language].append(StringDefClassObject(Name, Value, Referenced, Token, UseOtherLangDef))
+ self.OrderedStringDict[Language][Name] = Token
for LangName in self.LanguageDef:
#
# New STRING token will be added into all language string lists.
@@ -501,8 +504,10 @@ class UniFileClassObject(object):
else:
OtherLangDef = Language
self.OrderedStringList[LangName[0]].append(StringDefClassObject(Name, '', Referenced, Token, OtherLangDef))
+ self.OrderedStringDict[LangName[0]][Name] = len(self.OrderedStringList[LangName[0]]) - 1
else:
self.OrderedStringList[Language].insert(Index, StringDefClassObject(Name, Value, Referenced, Token, UseOtherLangDef))
+ self.OrderedStringDict[Language][Name] = Index
#
# Set the string as referenced
@@ -513,17 +518,18 @@ class UniFileClassObject(object):
# So, only update the status of string stoken in first language string list.
#
Lang = self.LanguageDef[0][0]
- for Item in self.OrderedStringList[Lang]:
- if Name == Item.StringName:
- Item.Referenced = True
- break
+ if Name in self.OrderedStringDict[Lang]:
+ ItemIndexInList = self.OrderedStringDict[Lang][Name]
+ Item = self.OrderedStringList[Lang][ItemIndexInList]
+ Item.Referenced = True
+
#
# Search the string in language definition by Name
#
def FindStringValue(self, Name, Lang):
- for Item in self.OrderedStringList[Lang]:
- if Item.StringName == Name:
- return Item
+ if Name in self.OrderedStringDict[Lang]:
+ ItemIndexInList = self.OrderedStringDict[Lang][Name]
+ return self.OrderedStringList[Lang][ItemIndexInList]
return None
@@ -546,6 +552,10 @@ class UniFileClassObject(object):
#
FirstLangName = self.LanguageDef[0][0]
+ # Convert the OrderedStringList to be OrderedStringListByToken in order to faciliate future search by token
+ for LangNameItem in self.LanguageDef:
+ self.OrderedStringListByToken[LangNameItem[0]] = {}
+
#
# Use small token for all referred string stoken.
#
@@ -558,6 +568,7 @@ class UniFileClassObject(object):
OtherLangItem = self.OrderedStringList[LangName][Index]
OtherLangItem.Referenced = True
OtherLangItem.Token = RefToken
+ self.OrderedStringListByToken[LangName][OtherLangItem.Token] = OtherLangItem
RefToken = RefToken + 1
#
@@ -571,6 +582,7 @@ class UniFileClassObject(object):
LangName = LangNameItem[0]
OtherLangItem = self.OrderedStringList[LangName][Index]
OtherLangItem.Token = RefToken + UnRefToken
+ self.OrderedStringListByToken[LangName][OtherLangItem.Token] = OtherLangItem
UnRefToken = UnRefToken + 1
#
diff --git a/BaseTools/Source/Python/Common/BuildVersion.py b/BaseTools/Source/Python/Common/BuildVersion.py
index 48316716b1..fecc40e84a 100644
--- a/BaseTools/Source/Python/Common/BuildVersion.py
+++ b/BaseTools/Source/Python/Common/BuildVersion.py
@@ -1,3 +1,3 @@
#This file is for build version number auto generation
#
-gBUILD_VERSION = "Build 2322"
+gBUILD_VERSION = "Build 2361"
diff --git a/BaseTools/Source/Python/Common/Misc.py b/BaseTools/Source/Python/Common/Misc.py
index d6a796bd2b..731bbf1458 100644
--- a/BaseTools/Source/Python/Common/Misc.py
+++ b/BaseTools/Source/Python/Common/Misc.py
@@ -1396,6 +1396,27 @@ class PathClass(object):
else:
return self.Path == str(Other)
+ ## Override __cmp__ function
+ #
+ # Customize the comparsion operation of two PathClass
+ #
+ # @retval 0 The two PathClass are different
+ # @retval -1 The first PathClass is less than the second PathClass
+ # @retval 1 The first PathClass is Bigger than the second PathClass
+ def __cmp__(self, Other):
+ if type(Other) == type(self):
+ OtherKey = Other.Path
+ else:
+ OtherKey = str(Other)
+
+ SelfKey = self.Path
+ if SelfKey == OtherKey:
+ return 0
+ elif SelfKey > OtherKey:
+ return 1
+ else:
+ return -1
+
## Override __hash__ function
#
# Use Path as key in hash table
diff --git a/BaseTools/Source/Python/Ecc/c.py b/BaseTools/Source/Python/Ecc/c.py
index 4c831e0ff7..532f4a0918 100644
--- a/BaseTools/Source/Python/Ecc/c.py
+++ b/BaseTools/Source/Python/Ecc/c.py
@@ -62,7 +62,7 @@ def PrintErrorMsg(ErrorType, Msg, TableName, ItemId):
for Part in MsgPartList:
Msg += Part
Msg += ' '
- GetDB().TblReport.Insert(ErrorType, OtherMsg = Msg, BelongsToTable = TableName, BelongsToItem = ItemId)
+ GetDB().TblReport.Insert(ErrorType, OtherMsg=Msg, BelongsToTable=TableName, BelongsToItem=ItemId)
def GetIdType(Str):
Type = DataClass.MODEL_UNKNOWN
@@ -93,16 +93,16 @@ def SuOccurInTypedef (Su, TdList):
def GetIdentifierList():
IdList = []
for comment in FileProfile.CommentList:
- IdComment = DataClass.IdentifierClass(-1, '', '', '', comment.Content, DataClass.MODEL_IDENTIFIER_COMMENT, -1, -1, comment.StartPos[0],comment.StartPos[1],comment.EndPos[0],comment.EndPos[1])
+ IdComment = DataClass.IdentifierClass(-1, '', '', '', comment.Content, DataClass.MODEL_IDENTIFIER_COMMENT, -1, -1, comment.StartPos[0], comment.StartPos[1], comment.EndPos[0], comment.EndPos[1])
IdList.append(IdComment)
for pp in FileProfile.PPDirectiveList:
Type = GetIdType(pp.Content)
- IdPP = DataClass.IdentifierClass(-1, '', '', '', pp.Content, Type, -1, -1, pp.StartPos[0],pp.StartPos[1],pp.EndPos[0],pp.EndPos[1])
+ IdPP = DataClass.IdentifierClass(-1, '', '', '', pp.Content, Type, -1, -1, pp.StartPos[0], pp.StartPos[1], pp.EndPos[0], pp.EndPos[1])
IdList.append(IdPP)
for pe in FileProfile.PredicateExpressionList:
- IdPE = DataClass.IdentifierClass(-1, '', '', '', pe.Content, DataClass.MODEL_IDENTIFIER_PREDICATE_EXPRESSION, -1, -1, pe.StartPos[0],pe.StartPos[1],pe.EndPos[0],pe.EndPos[1])
+ IdPE = DataClass.IdentifierClass(-1, '', '', '', pe.Content, DataClass.MODEL_IDENTIFIER_PREDICATE_EXPRESSION, -1, -1, pe.StartPos[0], pe.StartPos[1], pe.EndPos[0], pe.EndPos[1])
IdList.append(IdPE)
FuncDeclPattern = GetFuncDeclPattern()
@@ -191,7 +191,7 @@ def GetIdentifierList():
var.Modifier += ' ' + Name[LSBPos:]
Name = Name[0:LSBPos]
- IdVar = DataClass.IdentifierClass(-1, var.Modifier, '', Name, (len(DeclList) > 1 and [DeclList[1]]or [''])[0], DataClass.MODEL_IDENTIFIER_VARIABLE, -1, -1, var.StartPos[0],var.StartPos[1], VarNameStartLine, VarNameStartColumn)
+ IdVar = DataClass.IdentifierClass(-1, var.Modifier, '', Name, (len(DeclList) > 1 and [DeclList[1]]or [''])[0], DataClass.MODEL_IDENTIFIER_VARIABLE, -1, -1, var.StartPos[0], var.StartPos[1], VarNameStartLine, VarNameStartColumn)
IdList.append(IdVar)
else:
DeclList = var.Declarator.split('=')
@@ -200,15 +200,15 @@ def GetIdentifierList():
LSBPos = var.Declarator.find('[')
var.Modifier += ' ' + Name[LSBPos:]
Name = Name[0:LSBPos]
- IdVar = DataClass.IdentifierClass(-1, var.Modifier, '', Name, (len(DeclList) > 1 and [DeclList[1]]or [''])[0], DataClass.MODEL_IDENTIFIER_VARIABLE, -1, -1, var.StartPos[0],var.StartPos[1], VarNameStartLine, VarNameStartColumn)
+ IdVar = DataClass.IdentifierClass(-1, var.Modifier, '', Name, (len(DeclList) > 1 and [DeclList[1]]or [''])[0], DataClass.MODEL_IDENTIFIER_VARIABLE, -1, -1, var.StartPos[0], var.StartPos[1], VarNameStartLine, VarNameStartColumn)
IdList.append(IdVar)
for enum in FileProfile.EnumerationDefinitionList:
LBPos = enum.Content.find('{')
RBPos = enum.Content.find('}')
Name = enum.Content[4:LBPos].strip()
- Value = enum.Content[LBPos+1:RBPos]
- IdEnum = DataClass.IdentifierClass(-1, '', '', Name, Value, DataClass.MODEL_IDENTIFIER_ENUMERATE, -1, -1, enum.StartPos[0],enum.StartPos[1],enum.EndPos[0],enum.EndPos[1])
+ Value = enum.Content[LBPos + 1:RBPos]
+ IdEnum = DataClass.IdentifierClass(-1, '', '', Name, Value, DataClass.MODEL_IDENTIFIER_ENUMERATE, -1, -1, enum.StartPos[0], enum.StartPos[1], enum.EndPos[0], enum.EndPos[1])
IdList.append(IdEnum)
for su in FileProfile.StructUnionDefinitionList:
@@ -226,8 +226,8 @@ def GetIdentifierList():
Value = ''
else:
Name = su.Content[SkipLen:LBPos].strip()
- Value = su.Content[LBPos:RBPos+1]
- IdPE = DataClass.IdentifierClass(-1, '', '', Name, Value, Type, -1, -1, su.StartPos[0],su.StartPos[1],su.EndPos[0],su.EndPos[1])
+ Value = su.Content[LBPos:RBPos + 1]
+ IdPE = DataClass.IdentifierClass(-1, '', '', Name, Value, Type, -1, -1, su.StartPos[0], su.StartPos[1], su.EndPos[0], su.EndPos[1])
IdList.append(IdPE)
TdFuncPointerPattern = GetTypedefFuncPointerPattern()
@@ -238,7 +238,7 @@ def GetIdentifierList():
if TdFuncPointerPattern.match(td.ToType):
Modifier = td.FromType
LBPos = td.ToType.find('(')
- TmpStr = td.ToType[LBPos+1:].strip()
+ TmpStr = td.ToType[LBPos + 1:].strip()
StarPos = TmpStr.find('*')
if StarPos != -1:
Modifier += ' ' + TmpStr[0:StarPos]
@@ -260,11 +260,11 @@ def GetIdentifierList():
Value += Name[LBPos : RBPos + 1]
Name = Name[0 : LBPos]
- IdTd = DataClass.IdentifierClass(-1, Modifier, '', Name, Value, DataClass.MODEL_IDENTIFIER_TYPEDEF, -1, -1, td.StartPos[0],td.StartPos[1],td.EndPos[0],td.EndPos[1])
+ IdTd = DataClass.IdentifierClass(-1, Modifier, '', Name, Value, DataClass.MODEL_IDENTIFIER_TYPEDEF, -1, -1, td.StartPos[0], td.StartPos[1], td.EndPos[0], td.EndPos[1])
IdList.append(IdTd)
for funcCall in FileProfile.FunctionCallingList:
- IdFC = DataClass.IdentifierClass(-1, '', '', funcCall.FuncName, funcCall.ParamList, DataClass.MODEL_IDENTIFIER_FUNCTION_CALLING, -1, -1, funcCall.StartPos[0],funcCall.StartPos[1],funcCall.EndPos[0],funcCall.EndPos[1])
+ IdFC = DataClass.IdentifierClass(-1, '', '', funcCall.FuncName, funcCall.ParamList, DataClass.MODEL_IDENTIFIER_FUNCTION_CALLING, -1, -1, funcCall.StartPos[0], funcCall.StartPos[1], funcCall.EndPos[0], funcCall.EndPos[1])
IdList.append(IdFC)
return IdList
@@ -275,7 +275,7 @@ def StripNonAlnumChars(Str):
StrippedStr += Char
return StrippedStr
-def GetParamList(FuncDeclarator, FuncNameLine = 0, FuncNameOffset = 0):
+def GetParamList(FuncDeclarator, FuncNameLine=0, FuncNameOffset=0):
FuncDeclarator = StripComments(FuncDeclarator)
ParamIdList = []
#DeclSplitList = FuncDeclarator.split('(')
@@ -477,7 +477,7 @@ def GetFunctionList():
FuncNameStartColumn += 1
PreChar = FirstChar
- FuncObj = DataClass.FunctionClass(-1, FuncDef.Declarator, FuncDef.Modifier, FuncName.strip(), '', FuncDef.StartPos[0],FuncDef.StartPos[1],FuncDef.EndPos[0],FuncDef.EndPos[1], FuncDef.LeftBracePos[0], FuncDef.LeftBracePos[1], -1, ParamIdList, [], FuncNameStartLine, FuncNameStartColumn)
+ FuncObj = DataClass.FunctionClass(-1, FuncDef.Declarator, FuncDef.Modifier, FuncName.strip(), '', FuncDef.StartPos[0], FuncDef.StartPos[1], FuncDef.EndPos[0], FuncDef.EndPos[1], FuncDef.LeftBracePos[0], FuncDef.LeftBracePos[1], -1, ParamIdList, [], FuncNameStartLine, FuncNameStartColumn)
FuncObjList.append(FuncObj)
return FuncObjList
@@ -547,7 +547,7 @@ def CollectSourceCodeDataIntoDB(RootDir):
Db.UpdateIdentifierBelongsToFunction()
-def GetTableID(FullFileName, ErrorMsgList = None):
+def GetTableID(FullFileName, ErrorMsgList=None):
if ErrorMsgList == None:
ErrorMsgList = []
@@ -562,11 +562,11 @@ def GetTableID(FullFileName, ErrorMsgList = None):
for Result in ResultSet:
if FileID != -1:
ErrorMsgList.append('Duplicate file ID found in DB for file %s' % FullFileName)
- return -2
+ return - 2
FileID = Result[0]
if FileID == -1:
ErrorMsgList.append('NO file ID found in DB for file %s' % FullFileName)
- return -1
+ return - 1
return FileID
def GetIncludeFileList(FullFileName):
@@ -645,7 +645,7 @@ def GetPredicateListFromPredicateExpStr(PES):
while i < len(PES) - 1:
if (PES[i].isalnum() or PES[i] == '_' or PES[i] == '*') and LogicOpPos > PredicateBegin:
PredicateBegin = i
- if (PES[i] == '&' and PES[i+1] == '&') or (PES[i] == '|' and PES[i+1] == '|'):
+ if (PES[i] == '&' and PES[i + 1] == '&') or (PES[i] == '|' and PES[i + 1] == '|'):
LogicOpPos = i
Exp = PES[PredicateBegin:i].strip()
# Exp may contain '.' or '->'
@@ -670,7 +670,7 @@ def GetPredicateListFromPredicateExpStr(PES):
PredicateList.append(Exp.rstrip(';').rstrip(')').strip())
return PredicateList
-def GetCNameList(Lvalue, StarList = []):
+def GetCNameList(Lvalue, StarList=[]):
Lvalue += ' '
i = 0
SearchBegin = 0
@@ -686,7 +686,7 @@ def GetCNameList(Lvalue, StarList = []):
VarEnd = i
i += 1
elif VarEnd != -1:
- VarList.append(Lvalue[VarStart:VarEnd+1])
+ VarList.append(Lvalue[VarStart:VarEnd + 1])
i += 1
break
else:
@@ -714,7 +714,7 @@ def GetCNameList(Lvalue, StarList = []):
return VarList
-def SplitPredicateByOp(Str, Op, IsFuncCalling = False):
+def SplitPredicateByOp(Str, Op, IsFuncCalling=False):
Name = Str.strip()
Value = None
@@ -760,7 +760,7 @@ def SplitPredicateByOp(Str, Op, IsFuncCalling = False):
return [Name]
Name = Str[0:Index + IndexInRemainingStr].strip()
- Value = Str[Index+IndexInRemainingStr+len(Op):].strip().strip(')')
+ Value = Str[Index + IndexInRemainingStr + len(Op):].strip().strip(')')
return [Name, Value]
TmpStr = Str.rstrip(';').rstrip(')')
@@ -769,7 +769,7 @@ def SplitPredicateByOp(Str, Op, IsFuncCalling = False):
if Index == -1:
return [Name]
- if Str[Index - 1].isalnum() or Str[Index - 1].isspace() or Str[Index - 1] == ')':
+ if Str[Index - 1].isalnum() or Str[Index - 1].isspace() or Str[Index - 1] == ')' or Str[Index - 1] == ']':
Name = Str[0:Index].strip()
Value = Str[Index + len(Op):].strip()
return [Name, Value]
@@ -826,15 +826,13 @@ def PatternInModifier(Modifier, SubStr):
def GetDataTypeFromModifier(ModifierStr):
MList = ModifierStr.split()
+ ReturnType = ''
for M in MList:
if M in EccGlobalData.gConfig.ModifierList:
- MList.remove(M)
+ continue
# remove array sufix
- if M.startswith('['):
- MList.remove(M)
-
- ReturnType = ''
- for M in MList:
+ if M.startswith('[') or M.endswith(']'):
+ continue
ReturnType += M + ' '
ReturnType = ReturnType.strip()
@@ -950,7 +948,7 @@ def StripComments(Str):
DoubleSlashComment = False
Index += 1
# check for */ comment end
- elif InComment and not DoubleSlashComment and ListFromStr[Index] == '*' and ListFromStr[Index+1] == '/':
+ elif InComment and not DoubleSlashComment and ListFromStr[Index] == '*' and ListFromStr[Index + 1] == '/':
ListFromStr[Index] = ' '
Index += 1
ListFromStr[Index] = ' '
@@ -961,12 +959,12 @@ def StripComments(Str):
ListFromStr[Index] = ' '
Index += 1
# check for // comment
- elif ListFromStr[Index] == '/' and ListFromStr[Index+1] == '/' and ListFromStr[Index+2] != '\n':
+ elif ListFromStr[Index] == '/' and ListFromStr[Index + 1] == '/' and ListFromStr[Index + 2] != '\n':
InComment = True
DoubleSlashComment = True
# check for /* comment start
- elif ListFromStr[Index] == '/' and ListFromStr[Index+1] == '*':
+ elif ListFromStr[Index] == '/' and ListFromStr[Index + 1] == '*':
ListFromStr[Index] = ' '
Index += 1
ListFromStr[Index] = ' '
@@ -1024,7 +1022,7 @@ def GetFinalTypeValue(Type, FieldName, TypedefDict, SUDict):
return None
-def GetRealType(Type, TypedefDict, TargetType = None):
+def GetRealType(Type, TypedefDict, TargetType=None):
if TargetType != None and Type == TargetType:
return Type
while TypedefDict.get(Type):
@@ -1033,7 +1031,7 @@ def GetRealType(Type, TypedefDict, TargetType = None):
return Type
return Type
-def GetTypeInfo(RefList, Modifier, FullFileName, TargetType = None):
+def GetTypeInfo(RefList, Modifier, FullFileName, TargetType=None):
TypedefDict = GetTypedefDict(FullFileName)
SUDict = GetSUDict(FullFileName)
Type = GetDataTypeFromModifier(Modifier).replace('*', '').strip()
@@ -1051,7 +1049,7 @@ def GetTypeInfo(RefList, Modifier, FullFileName, TargetType = None):
# we only want to check if it is a pointer
else:
Type = FromType
- if Type.find('*') != -1 and Index == len(RefList)-1:
+ if Type.find('*') != -1 and Index == len(RefList) - 1:
return Type
Type = FromType.split()[0]
@@ -1061,7 +1059,7 @@ def GetTypeInfo(RefList, Modifier, FullFileName, TargetType = None):
return Type
-def GetVarInfo(PredVarList, FuncRecord, FullFileName, IsFuncCall = False, TargetType = None, StarList = None):
+def GetVarInfo(PredVarList, FuncRecord, FullFileName, IsFuncCall=False, TargetType=None, StarList=None):
PredVar = PredVarList[0]
FileID = GetTableID(FullFileName)
@@ -1536,7 +1534,7 @@ def CheckFuncLayoutLocalVariable(FullFileName):
FL.append(Result)
for F in FL:
- SqlStatement = """ select Name, Value, ID
+ SqlStatement = """ select Name, Value, ID, Modifier
from %s
where Model = %d and BelongsToFunction = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_VARIABLE, F[0])
@@ -1545,7 +1543,7 @@ def CheckFuncLayoutLocalVariable(FullFileName):
continue
for Result in ResultSet:
- if len(Result[1]) > 0:
+ if len(Result[1]) > 0 and 'CONST' not in Result[3]:
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_NO_INIT_OF_VARIABLE, 'Variable Name: %s' % Result[0], FileTable, Result[2])
def CheckMemberVariableFormat(Name, Value, FileTable, TdId, ModelId):
@@ -1687,9 +1685,9 @@ def CheckDeclTypedefFormat(FullFileName, ModelId):
# Check member variable format.
ErrMsgList = CheckMemberVariableFormat(Name, Value, FileTable, Td[5], ModelId)
for ErrMsg in ErrMsgList:
- if EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, Name+'.'+ErrMsg):
+ if EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, Name + '.' + ErrMsg):
continue
- PrintErrorMsg(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, 'Member variable [%s] NOT follow naming convention.' % (Name+'.'+ErrMsg), FileTable, Td[5])
+ PrintErrorMsg(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, 'Member variable [%s] NOT follow naming convention.' % (Name + '.' + ErrMsg), FileTable, Td[5])
# First check in current file to see whether struct/union/enum is typedef-ed.
UntypedefedList = []
@@ -1710,9 +1708,9 @@ def CheckDeclTypedefFormat(FullFileName, ModelId):
continue
ErrMsgList = CheckMemberVariableFormat(Name, Value, FileTable, Result[3], ModelId)
for ErrMsg in ErrMsgList:
- if EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, Result[0]+'.'+ErrMsg):
+ if EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, Result[0] + '.' + ErrMsg):
continue
- PrintErrorMsg(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, 'Member variable [%s] NOT follow naming convention.' % (Result[0]+'.'+ErrMsg), FileTable, Result[3])
+ PrintErrorMsg(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, 'Member variable [%s] NOT follow naming convention.' % (Result[0] + '.' + ErrMsg), FileTable, Result[3])
# Check whether it is typedefed.
Found = False
for Td in TdList:
@@ -1949,7 +1947,7 @@ def CheckPointerNullComparison(FullFileName):
if SearchInCache:
Type = FuncReturnTypeDict.get(PredVarStr)
if Type != None:
- if Type.find('*') != -1:
+ if Type.find('*') != -1 and Type != 'BOOLEAN*':
PrintErrorMsg(ERROR_PREDICATE_EXPRESSION_CHECK_COMPARISON_NULL_TYPE, 'Predicate Expression: %s' % Exp, FileTable, Str[2])
continue
@@ -1962,7 +1960,7 @@ def CheckPointerNullComparison(FullFileName):
if Type == None:
continue
Type = GetTypeFromArray(Type, PredVarStr)
- if Type.find('*') != -1:
+ if Type.find('*') != -1 and Type != 'BOOLEAN*':
PrintErrorMsg(ERROR_PREDICATE_EXPRESSION_CHECK_COMPARISON_NULL_TYPE, 'Predicate Expression: %s' % Exp, FileTable, Str[2])
def CheckNonBooleanValueComparison(FullFileName):
@@ -2004,8 +2002,6 @@ def CheckNonBooleanValueComparison(FullFileName):
continue
for Exp in GetPredicateListFromPredicateExpStr(Str[0]):
-# if p.match(Exp):
-# continue
PredInfo = SplitPredicateStr(Exp)
if PredInfo[1] == None:
PredVarStr = PredInfo[0][0].strip()
@@ -2037,7 +2033,6 @@ def CheckNonBooleanValueComparison(FullFileName):
if PredVarStr in FuncReturnTypeDict:
continue
-
Type = GetVarInfo(PredVarList, FuncRecord, FullFileName, IsFuncCall, 'BOOLEAN', StarList)
if SearchInCache:
FuncReturnTypeDict[PredVarStr] = Type
@@ -2446,7 +2441,7 @@ def GetDoxygenStrFromComment(Str):
return DoxygenStrList
-def CheckGeneralDoxygenCommentLayout(Str, StartLine, ErrorMsgList, CommentId = -1, TableName = ''):
+def CheckGeneralDoxygenCommentLayout(Str, StartLine, ErrorMsgList, CommentId= -1, TableName=''):
#/** --*/ @retval after @param
if not Str.startswith('/**'):
ErrorMsgList.append('Line %d : Comment does NOT have prefix /** ' % StartLine)
@@ -2460,7 +2455,7 @@ def CheckGeneralDoxygenCommentLayout(Str, StartLine, ErrorMsgList, CommentId = -
ErrorMsgList.append('Line %d : @retval appear before @param ' % StartLine)
PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'in Comment, @retval appear before @param ', TableName, CommentId)
-def CheckFunctionHeaderConsistentWithDoxygenComment(FuncModifier, FuncHeader, FuncStartLine, CommentStr, CommentStartLine, ErrorMsgList, CommentId = -1, TableName = ''):
+def CheckFunctionHeaderConsistentWithDoxygenComment(FuncModifier, FuncHeader, FuncStartLine, CommentStr, CommentStartLine, ErrorMsgList, CommentId= -1, TableName=''):
ParamList = GetParamList(FuncHeader)
CheckGeneralDoxygenCommentLayout(CommentStr, CommentStartLine, ErrorMsgList, CommentId, TableName)
@@ -2480,18 +2475,18 @@ def CheckFunctionHeaderConsistentWithDoxygenComment(FuncModifier, FuncHeader, Fu
ParamName = ParamList[Index].Name.strip()
Tag = DoxygenStrList[Index].strip(' ')
if (not Tag[-1] == ('\n')) and (not Tag[-1] == ('\r')):
- ErrorMsgList.append('Line %d : in Comment, \"%s\" does NOT end with new line ' % (CommentStartLine, Tag.replace('\n', '').replace('\r', '')))
- PrintErrorMsg(ERROR_HEADER_CHECK_FUNCTION, 'in Comment, \"%s\" does NOT end with new line ' % (Tag.replace('\n', '').replace('\r', '')), TableName, CommentId)
+ ErrorMsgList.append('Line %d : in Comment, <%s> does NOT end with new line ' % (CommentStartLine, Tag.replace('\n', '').replace('\r', '')))
+ PrintErrorMsg(ERROR_HEADER_CHECK_FUNCTION, 'in Comment, <%s> does NOT end with new line ' % (Tag.replace('\n', '').replace('\r', '')), TableName, CommentId)
TagPartList = Tag.split()
if len(TagPartList) < 2:
- ErrorMsgList.append('Line %d : in Comment, \"%s\" does NOT contain doxygen contents ' % (CommentStartLine, Tag.replace('\n', '').replace('\r', '')))
- PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'in Comment, \"%s\" does NOT contain doxygen contents ' % (Tag.replace('\n', '').replace('\r', '')), TableName, CommentId)
+ ErrorMsgList.append('Line %d : in Comment, <%s> does NOT contain doxygen contents ' % (CommentStartLine, Tag.replace('\n', '').replace('\r', '')))
+ PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'in Comment, <%s> does NOT contain doxygen contents ' % (Tag.replace('\n', '').replace('\r', '')), TableName, CommentId)
Index += 1
continue
LBPos = Tag.find('[')
RBPos = Tag.find(']')
ParamToLBContent = Tag[len('@param'):LBPos].strip()
- if LBPos > 0 and len(ParamToLBContent)==0 and RBPos > LBPos:
+ if LBPos > 0 and len(ParamToLBContent) == 0 and RBPos > LBPos:
InOutStr = ''
ModifierPartList = ParamModifier.split()
for Part in ModifierPartList:
@@ -2504,12 +2499,19 @@ def CheckFunctionHeaderConsistentWithDoxygenComment(FuncModifier, FuncHeader, Fu
InOutStr = 'out'
if InOutStr != '':
- if Tag.find('['+InOutStr+']') == -1:
- ErrorMsgList.append('Line %d : in Comment, \"%s\" does NOT have %s ' % (CommentStartLine, (TagPartList[0] + ' ' +TagPartList[1]).replace('\n', '').replace('\r', ''), '['+InOutStr+']'))
- PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'in Comment, \"%s\" does NOT have %s ' % ((TagPartList[0] + ' ' +TagPartList[1]).replace('\n', '').replace('\r', ''), '['+InOutStr+']'), TableName, CommentId)
+ if Tag.find('[' + InOutStr + ']') == -1:
+ if InOutStr != 'in, out':
+ ErrorMsgList.append('Line %d : in Comment, <%s> does NOT have %s ' % (CommentStartLine, (TagPartList[0] + ' ' + TagPartList[1]).replace('\n', '').replace('\r', ''), '[' + InOutStr + ']'))
+ PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'in Comment, <%s> does NOT have %s ' % ((TagPartList[0] + ' ' + TagPartList[1]).replace('\n', '').replace('\r', ''), '[' + InOutStr + ']'), TableName, CommentId)
+ else:
+ if Tag.find('[in,out]') == -1:
+ ErrorMsgList.append('Line %d : in Comment, <%s> does NOT have %s ' % (CommentStartLine, (TagPartList[0] + ' ' + TagPartList[1]).replace('\n', '').replace('\r', ''), '[' + InOutStr + ']'))
+ PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'in Comment, <%s> does NOT have %s ' % ((TagPartList[0] + ' ' + TagPartList[1]).replace('\n', '').replace('\r', ''), '[' + InOutStr + ']'), TableName, CommentId)
+
+
if Tag.find(ParamName) == -1 and ParamName != 'VOID' and ParamName != 'void':
- ErrorMsgList.append('Line %d : in Comment, \"%s\" does NOT consistent with parameter name %s ' % (CommentStartLine, (TagPartList[0] + ' ' +TagPartList[1]).replace('\n', '').replace('\r', ''), ParamName))
- PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'in Comment, \"%s\" does NOT consistent with parameter name %s ' % ((TagPartList[0] + ' ' +TagPartList[1]).replace('\n', '').replace('\r', ''), ParamName), TableName, CommentId)
+ ErrorMsgList.append('Line %d : in Comment, <%s> does NOT consistent with parameter name %s ' % (CommentStartLine, (TagPartList[0] + ' ' + TagPartList[1]).replace('\n', '').replace('\r', ''), ParamName))
+ PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'in Comment, <%s> does NOT consistent with parameter name %s ' % ((TagPartList[0] + ' ' + TagPartList[1]).replace('\n', '').replace('\r', ''), ParamName), TableName, CommentId)
Index += 1
if Index < ParamNumber:
diff --git a/BaseTools/Source/Python/GenFds/FdfParser.py b/BaseTools/Source/Python/GenFds/FdfParser.py
index 6a9e5b7b40..4f555e32bb 100644
--- a/BaseTools/Source/Python/GenFds/FdfParser.py
+++ b/BaseTools/Source/Python/GenFds/FdfParser.py
@@ -1690,9 +1690,13 @@ class FdfParser:
self.__UndoToken()
self.__GetRegionFileType( RegionObj)
- else:
+ elif self.__Token == "DATA":
self.__UndoToken()
self.__GetRegionDataType( RegionObj)
+ else:
+ raise Warning("A valid region type was not found. "
+ "Valid types are [SET, FV, CAPSULE, FILE, DATA]. This error occurred",
+ self.FileName, self.CurrentLineNumber)
return True
@@ -1929,6 +1933,8 @@ class FdfParser:
self.__GetSetStatements(FvObj)
self.__GetFvBaseAddress(FvObj)
+
+ self.__GetFvForceRebase(FvObj)
self.__GetFvAlignment(FvObj)
@@ -2006,11 +2012,42 @@ class FdfParser:
IsValidBaseAddrValue = re.compile('^0[x|X][0-9a-fA-F]+')
if not IsValidBaseAddrValue.match(self.__Token.upper()):
- raise Warning("Unknown alignment value '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
+ raise Warning("Unknown FV base address value '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
Obj.FvBaseAddress = self.__Token
return True
+ ## __GetFvForceRebase() method
+ #
+ # Get FvForceRebase for FV
+ #
+ # @param self The object pointer
+ # @param Obj for whom FvForceRebase is got
+ # @retval True Successfully find a FvForceRebase statement
+ # @retval False Not able to find a FvForceRebase statement
+ #
+ def __GetFvForceRebase(self, Obj):
+
+ if not self.__IsKeyword("FvForceRebase"):
+ return False
+
+ if not self.__IsToken( "="):
+ raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
+
+ if not self.__GetNextToken():
+ raise Warning("expected FvForceRebase value", self.FileName, self.CurrentLineNumber)
+ if self.__Token.upper() not in ["TRUE", "FALSE", "0", "0X0", "0X00", "1", "0X1", "0X01"]:
+ raise Warning("Unknown FvForceRebase value '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
+
+ if self.__Token.upper() in ["TRUE", "1", "0X1", "0X01"]:
+ Obj.FvForceRebase = True
+ elif self.__Token.upper() in ["FALSE", "0", "0X0", "0X00"]:
+ Obj.FvForceRebase = False
+ else:
+ Obj.FvForceRebase = None
+
+ return True
+
## __GetFvAttributes() method
#
# Get attributes for FV
@@ -2215,7 +2252,10 @@ class FdfParser:
ffsInf.KeepReloc = True
else:
raise Warning("Unknown reloc strip flag '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
-
+
+ ffsInf.CurrentLineNum = self.CurrentLineNumber
+ ffsInf.CurrentLineContent = self.__CurrentLine()
+
if ForCapsule:
capsuleFfs = CapsuleData.CapsuleFfs()
capsuleFfs.Ffs = ffsInf
@@ -2325,7 +2365,10 @@ class FdfParser:
self.__Token = 'PCD('+PcdPair[1]+'.'+PcdPair[0]+')'
FfsFileObj.NameGuid = self.__Token
-
+
+ FfsFileObj.CurrentLineNum = self.CurrentLineNumber
+ FfsFileObj.CurrentLineContent = self.__CurrentLine()
+
self.__GetFilePart( FfsFileObj, MacroDict.copy())
if ForCapsule:
@@ -3922,7 +3965,7 @@ class FdfParser:
Overrides.PciRevision = self.__Token
continue
- if self.__IsKeyword( "COMPRESS"):
+ if self.__IsKeyword( "PCI_COMPRESS"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
diff --git a/BaseTools/Source/Python/GenFds/FfsFileStatement.py b/BaseTools/Source/Python/GenFds/FfsFileStatement.py
index 013dbb1f02..b858549361 100644
--- a/BaseTools/Source/Python/GenFds/FfsFileStatement.py
+++ b/BaseTools/Source/Python/GenFds/FfsFileStatement.py
@@ -39,6 +39,10 @@ class FileStatement (FileStatementClassObject) :
#
def __init__(self):
FileStatementClassObject.__init__(self)
+ self.CurrentLineNum = None
+ self.CurrentLineContent = None
+ self.FileName = None
+ self.InfFileName = None
## GenFfs() method
#
@@ -94,7 +98,7 @@ class FileStatement (FileStatementClassObject) :
SectionFiles = []
Index = 0
SectionAlignments = []
- for section in self.SectionList :
+ for section in self.SectionList:
Index = Index + 1
SecIndex = '%d' %Index
# process the inside FvImage from FvSection or GuidSection
diff --git a/BaseTools/Source/Python/GenFds/FfsInfStatement.py b/BaseTools/Source/Python/GenFds/FfsInfStatement.py
index b9e18f6bca..c6f29f6ddd 100644
--- a/BaseTools/Source/Python/GenFds/FfsInfStatement.py
+++ b/BaseTools/Source/Python/GenFds/FfsInfStatement.py
@@ -56,6 +56,10 @@ class FfsInfStatement(FfsInfStatementClassObject):
self.PiSpecVersion = '0x00000000'
self.InfModule = None
self.FinalTargetSuffixMap = {}
+ self.CurrentLineNum = None
+ self.CurrentLineContent = None
+ self.FileName = None
+ self.InfFileName = None
## GetFinalTargetSuffixMap() method
#
@@ -452,7 +456,7 @@ class FfsInfStatement(FfsInfStatementClassObject):
Arch = ''
OutputPath = ''
(ModulePath, FileName) = os.path.split(self.InfFileName)
- Index = FileName.find('.')
+ Index = FileName.rfind('.')
FileName = FileName[0:Index]
Arch = "NoneArch"
if self.CurrentArch != None:
diff --git a/BaseTools/Source/Python/GenFds/Fv.py b/BaseTools/Source/Python/GenFds/Fv.py
index 773b0efbe8..f186ab0e73 100644
--- a/BaseTools/Source/Python/GenFds/Fv.py
+++ b/BaseTools/Source/Python/GenFds/Fv.py
@@ -47,6 +47,7 @@ class FV (FvClassObject):
self.FvAddressFileName = None
self.CapsuleName = None
self.FvBaseAddress = None
+ self.FvForceRebase = None
## AddToBuffer()
#
@@ -133,7 +134,8 @@ class FV (FvClassObject):
FvOutputFile,
[self.InfFileName],
AddressFile=FvInfoFileName,
- FfsList=FfsFileList
+ FfsList=FfsFileList,
+ ForceRebase=self.FvForceRebase
)
NewFvInfo = None
@@ -162,7 +164,8 @@ class FV (FvClassObject):
FvOutputFile,
[self.InfFileName],
AddressFile=FvInfoFileName,
- FfsList=FfsFileList
+ FfsList=FfsFileList,
+ ForceRebase=self.FvForceRebase
)
#
diff --git a/BaseTools/Source/Python/GenFds/GenFdsGlobalVariable.py b/BaseTools/Source/Python/GenFds/GenFdsGlobalVariable.py
index 3abaef2023..236283751e 100644
--- a/BaseTools/Source/Python/GenFds/GenFdsGlobalVariable.py
+++ b/BaseTools/Source/Python/GenFds/GenFdsGlobalVariable.py
@@ -422,7 +422,7 @@ class GenFdsGlobalVariable:
GenFdsGlobalVariable.CallExternalTool(Cmd, "Failed to generate FFS")
@staticmethod
- def GenerateFirmwareVolume(Output, Input, BaseAddress=None, Capsule=False, Dump=False,
+ def GenerateFirmwareVolume(Output, Input, BaseAddress=None, ForceRebase=None, Capsule=False, Dump=False,
AddressFile=None, MapFile=None, FfsList=[]):
if not GenFdsGlobalVariable.NeedsUpdate(Output, Input+FfsList):
return
@@ -431,6 +431,12 @@ class GenFdsGlobalVariable:
Cmd = ["GenFv"]
if BaseAddress not in [None, '']:
Cmd += ["-r", BaseAddress]
+
+ if ForceRebase == False:
+ Cmd +=["-F", "FALSE"]
+ elif ForceRebase == True:
+ Cmd +=["-F", "TRUE"]
+
if Capsule:
Cmd += ["-c"]
if Dump:
diff --git a/BaseTools/Source/Python/GenFds/OptRomInfStatement.py b/BaseTools/Source/Python/GenFds/OptRomInfStatement.py
index d64f836164..069414df5b 100644
--- a/BaseTools/Source/Python/GenFds/OptRomInfStatement.py
+++ b/BaseTools/Source/Python/GenFds/OptRomInfStatement.py
@@ -50,10 +50,10 @@ class OptRomInfStatement (FfsInfStatement):
self.OverrideAttribs = OptionRom.OverrideAttribs()
if self.OverrideAttribs.NeedCompress == None:
- self.OverrideAttribs.NeedCompress = self.OptRomDefs.get ('COMPRESS')
+ self.OverrideAttribs.NeedCompress = self.OptRomDefs.get ('PCI_COMPRESS')
if self.OverrideAttribs.NeedCompress is not None:
if self.OverrideAttribs.NeedCompress.upper() not in ('TRUE', 'FALSE'):
- GenFdsGlobalVariable.ErrorLogger( "Expected TRUE/FALSE for COMPRESS: %s" %self.InfFileName)
+ GenFdsGlobalVariable.ErrorLogger( "Expected TRUE/FALSE for PCI_COMPRESS: %s" %self.InfFileName)
self.OverrideAttribs.NeedCompress = \
self.OverrideAttribs.NeedCompress.upper() == 'TRUE'
diff --git a/BaseTools/Source/Python/Workspace/MetaFileParser.py b/BaseTools/Source/Python/Workspace/MetaFileParser.py
index fabc7ed986..4bad21298a 100644
--- a/BaseTools/Source/Python/Workspace/MetaFileParser.py
+++ b/BaseTools/Source/Python/Workspace/MetaFileParser.py
@@ -895,21 +895,28 @@ class DscParser(MetaFileParser):
# three operands
elif TokenNumber == 3:
TokenValue = TokenList[0]
- if TokenValue[0] in ["'", '"'] and TokenValue[-1] in ["'", '"']:
- TokenValue = TokenValue[1:-1]
- if TokenValue.startswith("$(") and TokenValue.endswith(")"):
- TokenValue = self._EvaluateToken(TokenValue, Expression)
- if TokenValue[0] in ["'", '"'] and TokenValue[-1] in ["'", '"']:
- TokenValue = TokenValue[1:-1]
- if TokenValue == None:
- return False
+ if TokenValue != "":
+ if TokenValue[0] in ["'", '"'] and TokenValue[-1] in ["'", '"']:
+ TokenValue = TokenValue[1:-1]
+ if TokenValue.startswith("$(") and TokenValue.endswith(")"):
+ TokenValue = self._EvaluateToken(TokenValue, Expression)
+ if TokenValue == None:
+ return False
+ if TokenValue != "":
+ if TokenValue[0] in ["'", '"'] and TokenValue[-1] in ["'", '"']:
+ TokenValue = TokenValue[1:-1]
+
Value = TokenList[2]
- if Value[0] in ["'", '"'] and Value[-1] in ["'", '"']:
- Value = Value[1:-1]
- if Value.startswith("$(") and Value.endswith(")"):
- Value = self._EvaluateToken(Value, Expression)
- if Value[0] in ["'", '"'] and Value[-1] in ["'", '"']:
- Value = Value[1:-1]
+ if Value != "":
+ if Value[0] in ["'", '"'] and Value[-1] in ["'", '"']:
+ Value = Value[1:-1]
+ if Value.startswith("$(") and Value.endswith(")"):
+ Value = self._EvaluateToken(Value, Expression)
+ if Value == None:
+ return False
+ if Value != "":
+ if Value[0] in ["'", '"'] and Value[-1] in ["'", '"']:
+ Value = Value[1:-1]
Op = TokenList[1]
if Op not in self._OP_:
EdkLogger.error('Parser', FORMAT_INVALID, "Unsupported operator [%s]" % Op, File=self.MetaFile,
diff --git a/BaseTools/Source/Python/Workspace/WorkspaceDatabase.py b/BaseTools/Source/Python/Workspace/WorkspaceDatabase.py
index 4bfa7d8ffd..ac2ca057cc 100644
--- a/BaseTools/Source/Python/Workspace/WorkspaceDatabase.py
+++ b/BaseTools/Source/Python/Workspace/WorkspaceDatabase.py
@@ -1896,7 +1896,7 @@ class InfBuildData(ModuleBuildClassObject):
## Retrieve PCDs used in this module
def _GetPcds(self):
if self._Pcds == None:
- self._Pcds = {}
+ self._Pcds = sdict()
self._Pcds.update(self._GetPcd(MODEL_PCD_FIXED_AT_BUILD))
self._Pcds.update(self._GetPcd(MODEL_PCD_PATCHABLE_IN_MODULE))
self._Pcds.update(self._GetPcd(MODEL_PCD_FEATURE_FLAG))
@@ -1996,7 +1996,7 @@ class InfBuildData(ModuleBuildClassObject):
## Retrieve PCD for given type
def _GetPcd(self, Type):
- Pcds = {}
+ Pcds = sdict()
PcdDict = tdict(True, 4)
PcdList = []
RecordList = self._RawData[Type, self._Arch, self._Platform]
@@ -2071,18 +2071,9 @@ class InfBuildData(ModuleBuildClassObject):
#
# Check hexadecimal token value length and format.
#
+ ReIsValidPcdTokenValue = re.compile(r"^[0][x|X][0]*[0-9a-fA-F]{1,8}$", re.DOTALL)
if Pcd.TokenValue.startswith("0x") or Pcd.TokenValue.startswith("0X"):
- if len(Pcd.TokenValue) < 3 or len(Pcd.TokenValue) > 10:
- EdkLogger.error(
- 'build',
- FORMAT_INVALID,
- "The format of TokenValue [%s] of PCD [%s.%s] in [%s] is invalid:" % (Pcd.TokenValue, TokenSpaceGuid, PcdCName, str(Package)),
- File =self.MetaFile, Line=LineNo,
- ExtraData=None
- )
- try:
- int (Pcd.TokenValue, 16)
- except:
+ if ReIsValidPcdTokenValue.match(Pcd.TokenValue) == None:
EdkLogger.error(
'build',
FORMAT_INVALID,