summaryrefslogtreecommitdiff
path: root/BaseTools/Source/Python/Common
diff options
context:
space:
mode:
authorGuo Mang <mang.guo@intel.com>2018-04-25 17:24:58 +0800
committerGuo Mang <mang.guo@intel.com>2018-04-25 17:26:11 +0800
commit6e3789d7424660b14ef3d7123221c97db5d8aff5 (patch)
tree6a5a7f1e0bc5a5296f2de0c8f02091c85e3443b7 /BaseTools/Source/Python/Common
parentd33896d88d9d32d516129e92e25b80f8fddc6f7b (diff)
downloadedk2-platforms-6e3789d7424660b14ef3d7123221c97db5d8aff5.tar.xz
Remove unused files
Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Guo Mang <mang.guo@intel.com>
Diffstat (limited to 'BaseTools/Source/Python/Common')
-rw-r--r--BaseTools/Source/Python/Common/BuildToolError.py159
-rw-r--r--BaseTools/Source/Python/Common/BuildVersion.py16
-rw-r--r--BaseTools/Source/Python/Common/DataType.py507
-rw-r--r--BaseTools/Source/Python/Common/Database.py120
-rw-r--r--BaseTools/Source/Python/Common/DecClassObject.py564
-rw-r--r--BaseTools/Source/Python/Common/Dictionary.py76
-rw-r--r--BaseTools/Source/Python/Common/DscClassObject.py1445
-rw-r--r--BaseTools/Source/Python/Common/EdkIIWorkspace.py320
-rw-r--r--BaseTools/Source/Python/Common/EdkIIWorkspaceBuild.py1670
-rw-r--r--BaseTools/Source/Python/Common/EdkLogger.py276
-rw-r--r--BaseTools/Source/Python/Common/Expression.py652
-rw-r--r--BaseTools/Source/Python/Common/FdfClassObject.py116
-rw-r--r--BaseTools/Source/Python/Common/FdfParserLite.py3668
-rw-r--r--BaseTools/Source/Python/Common/GlobalData.py89
-rw-r--r--BaseTools/Source/Python/Common/Identification.py58
-rw-r--r--BaseTools/Source/Python/Common/InfClassObject.py1116
-rw-r--r--BaseTools/Source/Python/Common/LongFilePathOs.py73
-rw-r--r--BaseTools/Source/Python/Common/LongFilePathOsPath.py53
-rw-r--r--BaseTools/Source/Python/Common/LongFilePathSupport.py63
-rw-r--r--BaseTools/Source/Python/Common/MigrationUtilities.py568
-rw-r--r--BaseTools/Source/Python/Common/Misc.py2098
-rw-r--r--BaseTools/Source/Python/Common/MultipleWorkspace.py156
-rw-r--r--BaseTools/Source/Python/Common/Parsing.py914
-rw-r--r--BaseTools/Source/Python/Common/PyUtility.pydbin6144 -> 0 bytes
-rw-r--r--BaseTools/Source/Python/Common/RangeExpression.py737
-rw-r--r--BaseTools/Source/Python/Common/String.py868
-rw-r--r--BaseTools/Source/Python/Common/TargetTxtClassObject.py190
-rw-r--r--BaseTools/Source/Python/Common/ToolDefClassObject.py286
-rw-r--r--BaseTools/Source/Python/Common/VariableAttributes.py57
-rw-r--r--BaseTools/Source/Python/Common/VpdInfoFile.py258
-rw-r--r--BaseTools/Source/Python/Common/__init__.py15
31 files changed, 0 insertions, 17188 deletions
diff --git a/BaseTools/Source/Python/Common/BuildToolError.py b/BaseTools/Source/Python/Common/BuildToolError.py
deleted file mode 100644
index bee5850fc5..0000000000
--- a/BaseTools/Source/Python/Common/BuildToolError.py
+++ /dev/null
@@ -1,159 +0,0 @@
-## @file
-# Standardized Error Hanlding infrastructures.
-#
-# Copyright (c) 2007 - 2016, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-FILE_OPEN_FAILURE = 1
-FILE_WRITE_FAILURE = 2
-FILE_PARSE_FAILURE = 3
-FILE_READ_FAILURE = 4
-FILE_CREATE_FAILURE = 5
-FILE_CHECKSUM_FAILURE = 6
-FILE_COMPRESS_FAILURE = 7
-FILE_DECOMPRESS_FAILURE = 8
-FILE_MOVE_FAILURE = 9
-FILE_DELETE_FAILURE = 10
-FILE_COPY_FAILURE = 11
-FILE_POSITIONING_FAILURE = 12
-FILE_ALREADY_EXIST = 13
-FILE_NOT_FOUND = 14
-FILE_TYPE_MISMATCH = 15
-FILE_CASE_MISMATCH = 16
-FILE_DUPLICATED = 17
-FILE_UNKNOWN_ERROR = 0x0FFF
-
-OPTION_UNKNOWN = 0x1000
-OPTION_MISSING = 0x1001
-OPTION_CONFLICT = 0x1002
-OPTION_VALUE_INVALID = 0x1003
-OPTION_DEPRECATED = 0x1004
-OPTION_NOT_SUPPORTED = 0x1005
-OPTION_UNKNOWN_ERROR = 0x1FFF
-
-PARAMETER_INVALID = 0x2000
-PARAMETER_MISSING = 0x2001
-PARAMETER_UNKNOWN_ERROR =0x2FFF
-
-FORMAT_INVALID = 0x3000
-FORMAT_NOT_SUPPORTED = 0x3001
-FORMAT_UNKNOWN = 0x3002
-FORMAT_UNKNOWN_ERROR = 0x3FFF
-
-RESOURCE_NOT_AVAILABLE = 0x4000
-RESOURCE_ALLOCATE_FAILURE = 0x4001
-RESOURCE_FULL = 0x4002
-RESOURCE_OVERFLOW = 0x4003
-RESOURCE_UNDERRUN = 0x4004
-RESOURCE_UNKNOWN_ERROR = 0x4FFF
-
-ATTRIBUTE_NOT_AVAILABLE = 0x5000
-ATTRIBUTE_GET_FAILURE = 0x5001
-ATTRIBUTE_SET_FAILURE = 0x5002
-ATTRIBUTE_UPDATE_FAILURE = 0x5003
-ATTRIBUTE_ACCESS_DENIED = 0x5004
-ATTRIBUTE_UNKNOWN_ERROR = 0x5FFF
-
-IO_NOT_READY = 0x6000
-IO_BUSY = 0x6001
-IO_TIMEOUT = 0x6002
-IO_UNKNOWN_ERROR = 0x6FFF
-
-COMMAND_FAILURE = 0x7000
-
-PERMISSION_FAILURE = 0x8000
-
-CODE_ERROR = 0xC0DE
-
-AUTOGEN_ERROR = 0xF000
-PARSER_ERROR = 0xF001
-BUILD_ERROR = 0xF002
-GENFDS_ERROR = 0xF003
-ECC_ERROR = 0xF004
-EOT_ERROR = 0xF005
-PREBUILD_ERROR = 0xF007
-POSTBUILD_ERROR = 0xF008
-DDC_ERROR = 0xF009
-WARNING_AS_ERROR = 0xF006
-MIGRATION_ERROR = 0xF010
-PCD_VALIDATION_INFO_ERROR = 0xF011
-PCD_VARIABLE_ATTRIBUTES_ERROR = 0xF012
-PCD_VARIABLE_ATTRIBUTES_CONFLICT_ERROR = 0xF013
-ABORT_ERROR = 0xFFFE
-UNKNOWN_ERROR = 0xFFFF
-
-## Error message of each error code
-gErrorMessage = {
- FILE_NOT_FOUND : "File/directory not found in workspace",
- FILE_OPEN_FAILURE : "File open failure",
- FILE_WRITE_FAILURE : "File write failure",
- FILE_PARSE_FAILURE : "File parse failure",
- FILE_READ_FAILURE : "File read failure",
- FILE_CREATE_FAILURE : "File create failure",
- FILE_CHECKSUM_FAILURE : "Invalid checksum of file",
- FILE_COMPRESS_FAILURE : "File compress failure",
- FILE_DECOMPRESS_FAILURE : "File decompress failure",
- FILE_MOVE_FAILURE : "File move failure",
- FILE_DELETE_FAILURE : "File delete failure",
- FILE_COPY_FAILURE : "File copy failure",
- FILE_POSITIONING_FAILURE: "Failed to seeking position",
- FILE_ALREADY_EXIST : "File or directory already exists",
- FILE_TYPE_MISMATCH : "Incorrect file type",
- FILE_CASE_MISMATCH : "File name case mismatch",
- FILE_DUPLICATED : "Duplicated file found",
- FILE_UNKNOWN_ERROR : "Unknown error encountered on file",
-
- OPTION_UNKNOWN : "Unknown option",
- OPTION_MISSING : "Missing option",
- OPTION_CONFLICT : "Conflict options",
- OPTION_VALUE_INVALID : "Invalid value of option",
- OPTION_DEPRECATED : "Deprecated option",
- OPTION_NOT_SUPPORTED : "Unsupported option",
- OPTION_UNKNOWN_ERROR : "Unknown error when processing options",
-
- PARAMETER_INVALID : "Invalid parameter",
- PARAMETER_MISSING : "Missing parameter",
- PARAMETER_UNKNOWN_ERROR : "Unknown error in parameters",
-
- FORMAT_INVALID : "Invalid syntax/format",
- FORMAT_NOT_SUPPORTED : "Not supported syntax/format",
- FORMAT_UNKNOWN : "Unknown format",
- FORMAT_UNKNOWN_ERROR : "Unknown error in syntax/format ",
-
- RESOURCE_NOT_AVAILABLE : "Not available",
- RESOURCE_ALLOCATE_FAILURE : "Allocate failure",
- RESOURCE_FULL : "Full",
- RESOURCE_OVERFLOW : "Overflow",
- RESOURCE_UNDERRUN : "Underrun",
- RESOURCE_UNKNOWN_ERROR : "Unknown error",
-
- ATTRIBUTE_NOT_AVAILABLE : "Not available",
- ATTRIBUTE_GET_FAILURE : "Failed to retrieve",
- ATTRIBUTE_SET_FAILURE : "Failed to set",
- ATTRIBUTE_UPDATE_FAILURE: "Failed to update",
- ATTRIBUTE_ACCESS_DENIED : "Access denied",
- ATTRIBUTE_UNKNOWN_ERROR : "Unknown error when accessing",
-
- COMMAND_FAILURE : "Failed to execute command",
-
- IO_NOT_READY : "Not ready",
- IO_BUSY : "Busy",
- IO_TIMEOUT : "Timeout",
- IO_UNKNOWN_ERROR : "Unknown error in IO operation",
-
- UNKNOWN_ERROR : "Unknown error",
-}
-
-## Exception indicating a fatal error
-class FatalError(Exception):
- pass
-
-if __name__ == "__main__":
- pass
diff --git a/BaseTools/Source/Python/Common/BuildVersion.py b/BaseTools/Source/Python/Common/BuildVersion.py
deleted file mode 100644
index 7414d30f49..0000000000
--- a/BaseTools/Source/Python/Common/BuildVersion.py
+++ /dev/null
@@ -1,16 +0,0 @@
-## @file
-#
-# This file is for build version number auto generation
-#
-# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License which accompanies this
-# distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-gBUILD_VERSION = ""
diff --git a/BaseTools/Source/Python/Common/DataType.py b/BaseTools/Source/Python/Common/DataType.py
deleted file mode 100644
index 6edc0c0950..0000000000
--- a/BaseTools/Source/Python/Common/DataType.py
+++ /dev/null
@@ -1,507 +0,0 @@
-## @file
-# This file is used to define common static strings used by INF/DEC/DSC files
-#
-# Copyright (c) 2007 - 2017, Intel Corporation. All rights reserved.<BR>
-# Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-
-##
-# Common Definitions
-#
-TAB_SPLIT = '.'
-TAB_COMMENT_EDK_START = '/*'
-TAB_COMMENT_EDK_END = '*/'
-TAB_COMMENT_EDK_SPLIT = '//'
-TAB_COMMENT_SPLIT = '#'
-TAB_SPECIAL_COMMENT = '##'
-TAB_EQUAL_SPLIT = '='
-TAB_VALUE_SPLIT = '|'
-TAB_COMMA_SPLIT = ','
-TAB_SPACE_SPLIT = ' '
-TAB_SEMI_COLON_SPLIT = ';'
-TAB_SECTION_START = '['
-TAB_SECTION_END = ']'
-TAB_OPTION_START = '<'
-TAB_OPTION_END = '>'
-TAB_SLASH = '\\'
-TAB_BACK_SLASH = '/'
-TAB_LINE_BREAK = '\n'
-TAB_PRINTCHAR_VT = '\x0b'
-TAB_PRINTCHAR_BS = '\b'
-TAB_PRINTCHAR_NUL = '\0'
-TAB_UINT8 = 'UINT8'
-TAB_UINT16 = 'UINT16'
-TAB_UINT32 = 'UINT32'
-TAB_UINT64 = 'UINT64'
-TAB_VOID = 'VOID*'
-
-TAB_EDK_SOURCE = '$(EDK_SOURCE)'
-TAB_EFI_SOURCE = '$(EFI_SOURCE)'
-TAB_WORKSPACE = '$(WORKSPACE)'
-
-TAB_ARCH_NULL = ''
-TAB_ARCH_COMMON = 'COMMON'
-TAB_ARCH_IA32 = 'IA32'
-TAB_ARCH_X64 = 'X64'
-TAB_ARCH_IPF = 'IPF'
-TAB_ARCH_ARM = 'ARM'
-TAB_ARCH_EBC = 'EBC'
-TAB_ARCH_AARCH64 = 'AARCH64'
-
-ARCH_LIST = [TAB_ARCH_IA32, TAB_ARCH_X64, TAB_ARCH_IPF, TAB_ARCH_ARM, TAB_ARCH_EBC, TAB_ARCH_AARCH64]
-ARCH_LIST_FULL = [TAB_ARCH_COMMON] + ARCH_LIST
-
-SUP_MODULE_BASE = 'BASE'
-SUP_MODULE_SEC = 'SEC'
-SUP_MODULE_PEI_CORE = 'PEI_CORE'
-SUP_MODULE_PEIM = 'PEIM'
-SUP_MODULE_DXE_CORE = 'DXE_CORE'
-SUP_MODULE_DXE_DRIVER = 'DXE_DRIVER'
-SUP_MODULE_DXE_RUNTIME_DRIVER = 'DXE_RUNTIME_DRIVER'
-SUP_MODULE_DXE_SAL_DRIVER = 'DXE_SAL_DRIVER'
-SUP_MODULE_DXE_SMM_DRIVER = 'DXE_SMM_DRIVER'
-SUP_MODULE_UEFI_DRIVER = 'UEFI_DRIVER'
-SUP_MODULE_UEFI_APPLICATION = 'UEFI_APPLICATION'
-SUP_MODULE_USER_DEFINED = 'USER_DEFINED'
-SUP_MODULE_SMM_CORE = 'SMM_CORE'
-
-SUP_MODULE_LIST = [SUP_MODULE_BASE, SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, SUP_MODULE_DXE_CORE, SUP_MODULE_DXE_DRIVER, \
- SUP_MODULE_DXE_RUNTIME_DRIVER, SUP_MODULE_DXE_SAL_DRIVER, SUP_MODULE_DXE_SMM_DRIVER, SUP_MODULE_UEFI_DRIVER, \
- SUP_MODULE_UEFI_APPLICATION, SUP_MODULE_USER_DEFINED, SUP_MODULE_SMM_CORE]
-SUP_MODULE_LIST_STRING = TAB_VALUE_SPLIT.join(l for l in SUP_MODULE_LIST)
-
-EDK_COMPONENT_TYPE_LIBRARY = 'LIBRARY'
-EDK_COMPONENT_TYPE_SECUARITY_CORE = 'SECUARITY_CORE'
-EDK_COMPONENT_TYPE_PEI_CORE = 'PEI_CORE'
-EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER = 'COMBINED_PEIM_DRIVER'
-EDK_COMPONENT_TYPE_PIC_PEIM = 'PIC_PEIM'
-EDK_COMPONENT_TYPE_RELOCATABLE_PEIM = 'RELOCATABLE_PEIM'
-EDK_COMPONENT_TYPE_BS_DRIVER = 'BS_DRIVER'
-EDK_COMPONENT_TYPE_RT_DRIVER = 'RT_DRIVER'
-EDK_COMPONENT_TYPE_SAL_RT_DRIVER = 'SAL_RT_DRIVER'
-EDK_COMPONENT_TYPE_APPLICATION = 'APPLICATION'
-EDK_NAME = 'EDK'
-EDKII_NAME = 'EDKII'
-
-BINARY_FILE_TYPE_FW = 'FW'
-BINARY_FILE_TYPE_GUID = 'GUID'
-BINARY_FILE_TYPE_PREEFORM = 'PREEFORM'
-BINARY_FILE_TYPE_UEFI_APP = 'UEFI_APP'
-BINARY_FILE_TYPE_UNI_UI = 'UNI_UI'
-BINARY_FILE_TYPE_UNI_VER = 'UNI_VER'
-BINARY_FILE_TYPE_LIB = 'LIB'
-BINARY_FILE_TYPE_PE32 = 'PE32'
-BINARY_FILE_TYPE_PIC = 'PIC'
-BINARY_FILE_TYPE_PEI_DEPEX = 'PEI_DEPEX'
-BINARY_FILE_TYPE_DXE_DEPEX = 'DXE_DEPEX'
-BINARY_FILE_TYPE_SMM_DEPEX = 'SMM_DEPEX'
-BINARY_FILE_TYPE_TE = 'TE'
-BINARY_FILE_TYPE_VER = 'VER'
-BINARY_FILE_TYPE_UI = 'UI'
-BINARY_FILE_TYPE_BIN = 'BIN'
-BINARY_FILE_TYPE_FV = 'FV'
-
-PLATFORM_COMPONENT_TYPE_LIBRARY = 'LIBRARY'
-PLATFORM_COMPONENT_TYPE_LIBRARY_CLASS = 'LIBRARY_CLASS'
-PLATFORM_COMPONENT_TYPE_MODULE = 'MODULE'
-
-TAB_LIBRARIES = 'Libraries'
-
-TAB_SOURCES = 'Sources'
-TAB_SOURCES_COMMON = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_SOURCES_IA32 = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_IA32
-TAB_SOURCES_X64 = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_X64
-TAB_SOURCES_IPF = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_IPF
-TAB_SOURCES_ARM = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_ARM
-TAB_SOURCES_EBC = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_EBC
-TAB_SOURCES_AARCH64 = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_AARCH64
-
-TAB_BINARIES = 'Binaries'
-TAB_BINARIES_COMMON = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_BINARIES_IA32 = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_IA32
-TAB_BINARIES_X64 = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_X64
-TAB_BINARIES_IPF = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_IPF
-TAB_BINARIES_ARM = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_ARM
-TAB_BINARIES_EBC = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_EBC
-TAB_BINARIES_AARCH64 = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_AARCH64
-
-TAB_INCLUDES = 'Includes'
-TAB_INCLUDES_COMMON = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_INCLUDES_IA32 = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_IA32
-TAB_INCLUDES_X64 = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_X64
-TAB_INCLUDES_IPF = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_IPF
-TAB_INCLUDES_ARM = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_ARM
-TAB_INCLUDES_EBC = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_EBC
-TAB_INCLUDES_AARCH64 = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_AARCH64
-
-TAB_GUIDS = 'Guids'
-TAB_GUIDS_COMMON = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_GUIDS_IA32 = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_IA32
-TAB_GUIDS_X64 = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_X64
-TAB_GUIDS_IPF = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_IPF
-TAB_GUIDS_ARM = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_ARM
-TAB_GUIDS_EBC = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_EBC
-TAB_GUIDS_AARCH64 = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_AARCH64
-
-TAB_PROTOCOLS = 'Protocols'
-TAB_PROTOCOLS_COMMON = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_PROTOCOLS_IA32 = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_IA32
-TAB_PROTOCOLS_X64 = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_X64
-TAB_PROTOCOLS_IPF = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_IPF
-TAB_PROTOCOLS_ARM = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_ARM
-TAB_PROTOCOLS_EBC = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_EBC
-TAB_PROTOCOLS_AARCH64 = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_AARCH64
-
-TAB_PPIS = 'Ppis'
-TAB_PPIS_COMMON = TAB_PPIS + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_PPIS_IA32 = TAB_PPIS + TAB_SPLIT + TAB_ARCH_IA32
-TAB_PPIS_X64 = TAB_PPIS + TAB_SPLIT + TAB_ARCH_X64
-TAB_PPIS_IPF = TAB_PPIS + TAB_SPLIT + TAB_ARCH_IPF
-TAB_PPIS_ARM = TAB_PPIS + TAB_SPLIT + TAB_ARCH_ARM
-TAB_PPIS_EBC = TAB_PPIS + TAB_SPLIT + TAB_ARCH_EBC
-TAB_PPIS_AARCH64 = TAB_PPIS + TAB_SPLIT + TAB_ARCH_AARCH64
-
-TAB_LIBRARY_CLASSES = 'LibraryClasses'
-TAB_LIBRARY_CLASSES_COMMON = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_LIBRARY_CLASSES_IA32 = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_IA32
-TAB_LIBRARY_CLASSES_X64 = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_X64
-TAB_LIBRARY_CLASSES_IPF = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_IPF
-TAB_LIBRARY_CLASSES_ARM = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_ARM
-TAB_LIBRARY_CLASSES_EBC = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_EBC
-TAB_LIBRARY_CLASSES_AARCH64 = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_AARCH64
-
-TAB_PACKAGES = 'Packages'
-TAB_PACKAGES_COMMON = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_PACKAGES_IA32 = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_IA32
-TAB_PACKAGES_X64 = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_X64
-TAB_PACKAGES_IPF = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_IPF
-TAB_PACKAGES_ARM = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_ARM
-TAB_PACKAGES_EBC = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_EBC
-TAB_PACKAGES_AARCH64 = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_AARCH64
-
-TAB_PCDS = 'Pcds'
-TAB_PCDS_FIXED_AT_BUILD = 'FixedAtBuild'
-TAB_PCDS_PATCHABLE_IN_MODULE = 'PatchableInModule'
-TAB_PCDS_FEATURE_FLAG = 'FeatureFlag'
-TAB_PCDS_DYNAMIC_EX = 'DynamicEx'
-TAB_PCDS_DYNAMIC_EX_DEFAULT = 'DynamicExDefault'
-TAB_PCDS_DYNAMIC_EX_VPD = 'DynamicExVpd'
-TAB_PCDS_DYNAMIC_EX_HII = 'DynamicExHii'
-TAB_PCDS_DYNAMIC = 'Dynamic'
-TAB_PCDS_DYNAMIC_DEFAULT = 'DynamicDefault'
-TAB_PCDS_DYNAMIC_VPD = 'DynamicVpd'
-TAB_PCDS_DYNAMIC_HII = 'DynamicHii'
-
-PCD_DYNAMIC_TYPE_LIST = [TAB_PCDS_DYNAMIC, TAB_PCDS_DYNAMIC_DEFAULT, TAB_PCDS_DYNAMIC_VPD, TAB_PCDS_DYNAMIC_HII]
-PCD_DYNAMIC_EX_TYPE_LIST = [TAB_PCDS_DYNAMIC_EX, TAB_PCDS_DYNAMIC_EX_DEFAULT, TAB_PCDS_DYNAMIC_EX_VPD, TAB_PCDS_DYNAMIC_EX_HII]
-
-## Dynamic-ex PCD types
-gDynamicExPcd = [TAB_PCDS_DYNAMIC_EX, TAB_PCDS_DYNAMIC_EX_DEFAULT, TAB_PCDS_DYNAMIC_EX_VPD, TAB_PCDS_DYNAMIC_EX_HII]
-
-TAB_PCDS_FIXED_AT_BUILD_NULL = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD
-TAB_PCDS_FIXED_AT_BUILD_COMMON = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_PCDS_FIXED_AT_BUILD_IA32 = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + TAB_SPLIT + TAB_ARCH_IA32
-TAB_PCDS_FIXED_AT_BUILD_X64 = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + TAB_SPLIT + TAB_ARCH_X64
-TAB_PCDS_FIXED_AT_BUILD_IPF = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + TAB_SPLIT + TAB_ARCH_IPF
-TAB_PCDS_FIXED_AT_BUILD_ARM = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + TAB_SPLIT + TAB_ARCH_ARM
-TAB_PCDS_FIXED_AT_BUILD_EBC = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + TAB_SPLIT + TAB_ARCH_EBC
-TAB_PCDS_FIXED_AT_BUILD_AARCH64 = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + TAB_SPLIT + TAB_ARCH_AARCH64
-
-TAB_PCDS_PATCHABLE_IN_MODULE_NULL = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE
-TAB_PCDS_PATCHABLE_IN_MODULE_COMMON = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_PCDS_PATCHABLE_IN_MODULE_IA32 = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + TAB_SPLIT + TAB_ARCH_IA32
-TAB_PCDS_PATCHABLE_IN_MODULE_X64 = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + TAB_SPLIT + TAB_ARCH_X64
-TAB_PCDS_PATCHABLE_IN_MODULE_IPF = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + TAB_SPLIT + TAB_ARCH_IPF
-TAB_PCDS_PATCHABLE_IN_MODULE_ARM = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + TAB_SPLIT + TAB_ARCH_ARM
-TAB_PCDS_PATCHABLE_IN_MODULE_EBC = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + TAB_SPLIT + TAB_ARCH_EBC
-TAB_PCDS_PATCHABLE_IN_MODULE_AARCH64 = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + TAB_SPLIT + TAB_ARCH_AARCH64
-
-TAB_PCDS_FEATURE_FLAG_NULL = TAB_PCDS + TAB_PCDS_FEATURE_FLAG
-TAB_PCDS_FEATURE_FLAG_COMMON = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_PCDS_FEATURE_FLAG_IA32 = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + TAB_ARCH_IA32
-TAB_PCDS_FEATURE_FLAG_X64 = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + TAB_ARCH_X64
-TAB_PCDS_FEATURE_FLAG_IPF = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + TAB_ARCH_IPF
-TAB_PCDS_FEATURE_FLAG_ARM = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + TAB_ARCH_ARM
-TAB_PCDS_FEATURE_FLAG_EBC = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + TAB_ARCH_EBC
-TAB_PCDS_FEATURE_FLAG_AARCH64 = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + TAB_ARCH_AARCH64
-
-TAB_PCDS_DYNAMIC_EX_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX
-TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX_DEFAULT
-TAB_PCDS_DYNAMIC_EX_HII_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX_HII
-TAB_PCDS_DYNAMIC_EX_VPD_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX_VPD
-TAB_PCDS_DYNAMIC_EX_COMMON = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_PCDS_DYNAMIC_EX_IA32 = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + TAB_ARCH_IA32
-TAB_PCDS_DYNAMIC_EX_X64 = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + TAB_ARCH_X64
-TAB_PCDS_DYNAMIC_EX_IPF = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + TAB_ARCH_IPF
-TAB_PCDS_DYNAMIC_EX_ARM = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + TAB_ARCH_ARM
-TAB_PCDS_DYNAMIC_EX_EBC = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + TAB_ARCH_EBC
-TAB_PCDS_DYNAMIC_EX_AARCH64 = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + TAB_ARCH_AARCH64
-
-TAB_PCDS_DYNAMIC_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC
-TAB_PCDS_DYNAMIC_DEFAULT_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_DEFAULT
-TAB_PCDS_DYNAMIC_HII_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_HII
-TAB_PCDS_DYNAMIC_VPD_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_VPD
-TAB_PCDS_DYNAMIC_COMMON = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_PCDS_DYNAMIC_IA32 = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_IA32
-TAB_PCDS_DYNAMIC_X64 = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_X64
-TAB_PCDS_DYNAMIC_IPF = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_IPF
-TAB_PCDS_DYNAMIC_ARM = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_ARM
-TAB_PCDS_DYNAMIC_EBC = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_EBC
-TAB_PCDS_DYNAMIC_AARCH64 = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_AARCH64
-
-TAB_PCD_DYNAMIC_TYPE_LIST = [TAB_PCDS_DYNAMIC_DEFAULT_NULL, TAB_PCDS_DYNAMIC_VPD_NULL, TAB_PCDS_DYNAMIC_HII_NULL]
-TAB_PCD_DYNAMIC_EX_TYPE_LIST = [TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL, TAB_PCDS_DYNAMIC_EX_VPD_NULL, TAB_PCDS_DYNAMIC_EX_HII_NULL]
-
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE = 'PcdLoadFixAddressPeiCodePageNumber'
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE = 'UINT32'
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE = 'PcdLoadFixAddressBootTimeCodePageNumber'
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE = 'UINT32'
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE = 'PcdLoadFixAddressRuntimeCodePageNumber'
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE = 'UINT32'
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE = 'PcdLoadFixAddressSmmCodePageNumber'
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE = 'UINT32'
-TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_LIST = [TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE, \
- TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE, \
- TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE, \
- TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE]
-
-TAB_DEPEX = 'Depex'
-TAB_DEPEX_COMMON = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_DEPEX_IA32 = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_IA32
-TAB_DEPEX_X64 = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_X64
-TAB_DEPEX_IPF = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_IPF
-TAB_DEPEX_ARM = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_ARM
-TAB_DEPEX_EBC = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_EBC
-TAB_DEPEX_AARCH64 = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_AARCH64
-
-TAB_SKUIDS = 'SkuIds'
-
-TAB_LIBRARIES = 'Libraries'
-TAB_LIBRARIES_COMMON = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_LIBRARIES_IA32 = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_IA32
-TAB_LIBRARIES_X64 = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_X64
-TAB_LIBRARIES_IPF = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_IPF
-TAB_LIBRARIES_ARM = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_ARM
-TAB_LIBRARIES_EBC = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_EBC
-TAB_LIBRARIES_AARCH64 = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_AARCH64
-
-TAB_COMPONENTS = 'Components'
-TAB_COMPONENTS_COMMON = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_COMMON
-TAB_COMPONENTS_IA32 = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_IA32
-TAB_COMPONENTS_X64 = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_X64
-TAB_COMPONENTS_IPF = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_IPF
-TAB_COMPONENTS_ARM = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_ARM
-TAB_COMPONENTS_EBC = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_EBC
-TAB_COMPONENTS_AARCH64 = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_AARCH64
-
-TAB_COMPONENTS_SOURCE_OVERRIDE_PATH = 'SOURCE_OVERRIDE_PATH'
-
-TAB_BUILD_OPTIONS = 'BuildOptions'
-
-TAB_DEFINE = 'DEFINE'
-TAB_NMAKE = 'Nmake'
-TAB_USER_EXTENSIONS = 'UserExtensions'
-TAB_INCLUDE = '!include'
-
-#
-# Common Define
-#
-TAB_COMMON_DEFINES = 'Defines'
-
-#
-# Inf Definitions
-#
-TAB_INF_DEFINES = TAB_COMMON_DEFINES
-TAB_INF_DEFINES_INF_VERSION = 'INF_VERSION'
-TAB_INF_DEFINES_BASE_NAME = 'BASE_NAME'
-TAB_INF_DEFINES_FILE_GUID = 'FILE_GUID'
-TAB_INF_DEFINES_MODULE_TYPE = 'MODULE_TYPE'
-TAB_INF_DEFINES_EFI_SPECIFICATION_VERSION = 'EFI_SPECIFICATION_VERSION'
-TAB_INF_DEFINES_UEFI_SPECIFICATION_VERSION = 'UEFI_SPECIFICATION_VERSION'
-TAB_INF_DEFINES_PI_SPECIFICATION_VERSION = 'PI_SPECIFICATION_VERSION'
-TAB_INF_DEFINES_EDK_RELEASE_VERSION = 'EDK_RELEASE_VERSION'
-TAB_INF_DEFINES_BINARY_MODULE = 'BINARY_MODULE'
-TAB_INF_DEFINES_LIBRARY_CLASS = 'LIBRARY_CLASS'
-TAB_INF_DEFINES_COMPONENT_TYPE = 'COMPONENT_TYPE'
-TAB_INF_DEFINES_MAKEFILE_NAME = 'MAKEFILE_NAME'
-TAB_INF_DEFINES_DPX_SOURCE = 'DPX_SOURCE'
-TAB_INF_DEFINES_BUILD_NUMBER = 'BUILD_NUMBER'
-TAB_INF_DEFINES_BUILD_TYPE = 'BUILD_TYPE'
-TAB_INF_DEFINES_FFS_EXT = 'FFS_EXT'
-TAB_INF_DEFINES_FV_EXT = 'FV_EXT'
-TAB_INF_DEFINES_SOURCE_FV = 'SOURCE_FV'
-TAB_INF_DEFINES_VERSION_NUMBER = 'VERSION_NUMBER'
-TAB_INF_DEFINES_VERSION = 'VERSION' # for Edk inf, the same as VERSION_NUMBER
-TAB_INF_DEFINES_VERSION_STRING = 'VERSION_STRING'
-TAB_INF_DEFINES_PCD_IS_DRIVER = 'PCD_IS_DRIVER'
-TAB_INF_DEFINES_TIANO_EDK_FLASHMAP_H = 'TIANO_EDK_FLASHMAP_H'
-TAB_INF_DEFINES_ENTRY_POINT = 'ENTRY_POINT'
-TAB_INF_DEFINES_UNLOAD_IMAGE = 'UNLOAD_IMAGE'
-TAB_INF_DEFINES_CONSTRUCTOR = 'CONSTRUCTOR'
-TAB_INF_DEFINES_DESTRUCTOR = 'DESTRUCTOR'
-TAB_INF_DEFINES_DEFINE = 'DEFINE'
-TAB_INF_DEFINES_SPEC = 'SPEC'
-TAB_INF_DEFINES_CUSTOM_MAKEFILE = 'CUSTOM_MAKEFILE'
-TAB_INF_DEFINES_MACRO = '__MACROS__'
-TAB_INF_DEFINES_SHADOW = 'SHADOW'
-TAB_INF_FIXED_PCD = 'FixedPcd'
-TAB_INF_FEATURE_PCD = 'FeaturePcd'
-TAB_INF_PATCH_PCD = 'PatchPcd'
-TAB_INF_PCD = 'Pcd'
-TAB_INF_PCD_EX = 'PcdEx'
-TAB_INF_USAGE_PRO = 'PRODUCES'
-TAB_INF_USAGE_SOME_PRO = 'SOMETIMES_PRODUCES'
-TAB_INF_USAGE_CON = 'CONSUMES'
-TAB_INF_USAGE_SOME_CON = 'SOMETIMES_CONSUMES'
-TAB_INF_USAGE_NOTIFY = 'NOTIFY'
-TAB_INF_USAGE_TO_START = 'TO_START'
-TAB_INF_USAGE_BY_START = 'BY_START'
-TAB_INF_GUIDTYPE_EVENT = 'Event'
-TAB_INF_GUIDTYPE_FILE = 'File'
-TAB_INF_GUIDTYPE_FV = 'FV'
-TAB_INF_GUIDTYPE_GUID = 'GUID'
-TAB_INF_GUIDTYPE_HII = 'HII'
-TAB_INF_GUIDTYPE_HOB = 'HOB'
-TAB_INF_GUIDTYPE_ST = 'SystemTable'
-TAB_INF_GUIDTYPE_TSG = 'TokenSpaceGuid'
-TAB_INF_GUIDTYPE_VAR = 'Variable'
-TAB_INF_GUIDTYPE_PROTOCOL = 'PROTOCOL'
-TAB_INF_GUIDTYPE_PPI = 'PPI'
-TAB_INF_USAGE_UNDEFINED = 'UNDEFINED'
-
-#
-# Dec Definitions
-#
-TAB_DEC_DEFINES = TAB_COMMON_DEFINES
-TAB_DEC_DEFINES_DEC_SPECIFICATION = 'DEC_SPECIFICATION'
-TAB_DEC_DEFINES_PACKAGE_NAME = 'PACKAGE_NAME'
-TAB_DEC_DEFINES_PACKAGE_GUID = 'PACKAGE_GUID'
-TAB_DEC_DEFINES_PACKAGE_VERSION = 'PACKAGE_VERSION'
-TAB_DEC_DEFINES_PKG_UNI_FILE = 'PKG_UNI_FILE'
-
-#
-# Dsc Definitions
-#
-TAB_DSC_DEFINES = TAB_COMMON_DEFINES
-TAB_DSC_DEFINES_PLATFORM_NAME = 'PLATFORM_NAME'
-TAB_DSC_DEFINES_PLATFORM_GUID = 'PLATFORM_GUID'
-TAB_DSC_DEFINES_PLATFORM_VERSION = 'PLATFORM_VERSION'
-TAB_DSC_DEFINES_DSC_SPECIFICATION = 'DSC_SPECIFICATION'
-TAB_DSC_DEFINES_OUTPUT_DIRECTORY = 'OUTPUT_DIRECTORY'
-TAB_DSC_DEFINES_SUPPORTED_ARCHITECTURES = 'SUPPORTED_ARCHITECTURES'
-TAB_DSC_DEFINES_BUILD_TARGETS = 'BUILD_TARGETS'
-TAB_DSC_DEFINES_SKUID_IDENTIFIER = 'SKUID_IDENTIFIER'
-TAB_DSC_DEFINES_PCD_INFO_GENERATION = 'PCD_INFO_GENERATION'
-TAB_DSC_DEFINES_PCD_VAR_CHECK_GENERATION = 'PCD_VAR_CHECK_GENERATION'
-TAB_DSC_DEFINES_FLASH_DEFINITION = 'FLASH_DEFINITION'
-TAB_DSC_DEFINES_BUILD_NUMBER = 'BUILD_NUMBER'
-TAB_DSC_DEFINES_MAKEFILE_NAME = 'MAKEFILE_NAME'
-TAB_DSC_DEFINES_BS_BASE_ADDRESS = 'BsBaseAddress'
-TAB_DSC_DEFINES_RT_BASE_ADDRESS = 'RtBaseAddress'
-TAB_DSC_DEFINES_RFC_LANGUAGES = 'RFC_LANGUAGES'
-TAB_DSC_DEFINES_ISO_LANGUAGES = 'ISO_LANGUAGES'
-TAB_DSC_DEFINES_DEFINE = 'DEFINE'
-TAB_DSC_DEFINES_VPD_TOOL_GUID = 'VPD_TOOL_GUID'
-TAB_FIX_LOAD_TOP_MEMORY_ADDRESS = 'FIX_LOAD_TOP_MEMORY_ADDRESS'
-TAB_DSC_DEFINES_EDKGLOBAL = 'EDK_GLOBAL'
-TAB_DSC_PREBUILD = 'PREBUILD'
-TAB_DSC_POSTBUILD = 'POSTBUILD'
-#
-# TargetTxt Definitions
-#
-TAB_TAT_DEFINES_ACTIVE_PLATFORM = 'ACTIVE_PLATFORM'
-TAB_TAT_DEFINES_ACTIVE_MODULE = 'ACTIVE_MODULE'
-TAB_TAT_DEFINES_TOOL_CHAIN_CONF = 'TOOL_CHAIN_CONF'
-TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER = 'MAX_CONCURRENT_THREAD_NUMBER'
-TAB_TAT_DEFINES_TARGET = 'TARGET'
-TAB_TAT_DEFINES_TOOL_CHAIN_TAG = 'TOOL_CHAIN_TAG'
-TAB_TAT_DEFINES_TARGET_ARCH = 'TARGET_ARCH'
-TAB_TAT_DEFINES_BUILD_RULE_CONF = "BUILD_RULE_CONF"
-
-#
-# ToolDef Definitions
-#
-TAB_TOD_DEFINES_TARGET = 'TARGET'
-TAB_TOD_DEFINES_TOOL_CHAIN_TAG = 'TOOL_CHAIN_TAG'
-TAB_TOD_DEFINES_TARGET_ARCH = 'TARGET_ARCH'
-TAB_TOD_DEFINES_COMMAND_TYPE = 'COMMAND_TYPE'
-TAB_TOD_DEFINES_FAMILY = 'FAMILY'
-TAB_TOD_DEFINES_BUILDRULEFAMILY = 'BUILDRULEFAMILY'
-TAB_TOD_DEFINES_BUILDRULEORDER = 'BUILDRULEORDER'
-
-#
-# Conditional Statements
-#
-TAB_IF = '!if'
-TAB_END_IF = '!endif'
-TAB_ELSE_IF = '!elseif'
-TAB_ELSE = '!else'
-TAB_IF_DEF = '!ifdef'
-TAB_IF_N_DEF = '!ifndef'
-TAB_IF_EXIST = '!if exist'
-TAB_ERROR = '!ERROR'
-
-#
-# Unknown section
-#
-TAB_UNKNOWN = 'UNKNOWN'
-
-#
-# Build database path
-#
-DATABASE_PATH = ":memory:" #"BuildDatabase.db"
-
-# used by ECC
-MODIFIER_LIST = ['IN', 'OUT', 'OPTIONAL', 'UNALIGNED', 'EFI_RUNTIMESERVICE', 'EFI_BOOTSERVICE', 'EFIAPI']
-
-# Dependency Expression
-DEPEX_SUPPORTED_OPCODE = ["BEFORE", "AFTER", "PUSH", "AND", "OR", "NOT", "END", "SOR", "TRUE", "FALSE", '(', ')']
-
-TAB_STATIC_LIBRARY = "STATIC-LIBRARY-FILE"
-TAB_DYNAMIC_LIBRARY = "DYNAMIC-LIBRARY-FILE"
-TAB_FRAMEWORK_IMAGE = "EFI-IMAGE-FILE"
-TAB_C_CODE_FILE = "C-CODE-FILE"
-TAB_C_HEADER_FILE = "C-HEADER-FILE"
-TAB_UNICODE_FILE = "UNICODE-TEXT-FILE"
-TAB_IMAGE_FILE = "IMAGE-DEFINITION-FILE"
-TAB_DEPENDENCY_EXPRESSION_FILE = "DEPENDENCY-EXPRESSION-FILE"
-TAB_UNKNOWN_FILE = "UNKNOWN-TYPE-FILE"
-TAB_DEFAULT_BINARY_FILE = "_BINARY_FILE_"
-TAB_OBJECT_FILE = "OBJECT-FILE"
-TAB_VFR_FILE = 'VISUAL-FORM-REPRESENTATION-FILE'
-
-# used by BRG
-TAB_BRG_PCD = 'PCD'
-TAB_BRG_LIBRARY = 'Library'
-
-#
-# Build Rule File Version Definition
-#
-TAB_BUILD_RULE_VERSION = "build_rule_version"
-
-# section name for PCDs
-PCDS_DYNAMIC_DEFAULT = "PcdsDynamicDefault"
-PCDS_DYNAMIC_VPD = "PcdsDynamicVpd"
-PCDS_DYNAMIC_HII = "PcdsDynamicHii"
-PCDS_DYNAMICEX_DEFAULT = "PcdsDynamicExDefault"
-PCDS_DYNAMICEX_VPD = "PcdsDynamicExVpd"
-PCDS_DYNAMICEX_HII = "PcdsDynamicExHii"
-
-SECTIONS_HAVE_ITEM_PCD = [PCDS_DYNAMIC_DEFAULT.upper(),PCDS_DYNAMIC_VPD.upper(),PCDS_DYNAMIC_HII.upper(), \
- PCDS_DYNAMICEX_DEFAULT.upper(),PCDS_DYNAMICEX_VPD.upper(),PCDS_DYNAMICEX_HII.upper()]
-# Section allowed to have items after arch
-SECTIONS_HAVE_ITEM_AFTER_ARCH = [TAB_LIBRARY_CLASSES.upper(), TAB_DEPEX.upper(), TAB_USER_EXTENSIONS.upper(),
- PCDS_DYNAMIC_DEFAULT.upper(),
- PCDS_DYNAMIC_VPD.upper(),
- PCDS_DYNAMIC_HII.upper(),
- PCDS_DYNAMICEX_DEFAULT.upper(),
- PCDS_DYNAMICEX_VPD.upper(),
- PCDS_DYNAMICEX_HII.upper(),
- TAB_BUILD_OPTIONS.upper(),
- TAB_INCLUDES.upper()]
diff --git a/BaseTools/Source/Python/Common/Database.py b/BaseTools/Source/Python/Common/Database.py
deleted file mode 100644
index a81a44731f..0000000000
--- a/BaseTools/Source/Python/Common/Database.py
+++ /dev/null
@@ -1,120 +0,0 @@
-## @file
-# This file is used to create a database used by ECC tool
-#
-# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-##
-# Import Modules
-#
-import sqlite3
-import Common.LongFilePathOs as os
-
-import EdkLogger as EdkLogger
-from CommonDataClass.DataClass import *
-from String import *
-from DataType import *
-
-from Table.TableDataModel import TableDataModel
-from Table.TableFile import TableFile
-from Table.TableInf import TableInf
-from Table.TableDec import TableDec
-from Table.TableDsc import TableDsc
-
-## Database
-#
-# This class defined the build databse
-# During the phase of initialization, the database will create all tables and
-# insert all records of table DataModel
-#
-# @param object: Inherited from object class
-# @param DbPath: A string for the path of the ECC database
-#
-# @var Conn: Connection of the ECC database
-# @var Cur: Cursor of the connection
-# @var TblDataModel: Local instance for TableDataModel
-#
-class Database(object):
- def __init__(self, DbPath):
- if os.path.exists(DbPath):
- os.remove(DbPath)
- self.Conn = sqlite3.connect(DbPath, isolation_level = 'DEFERRED')
- self.Conn.execute("PRAGMA page_size=8192")
- self.Conn.execute("PRAGMA synchronous=OFF")
- self.Cur = self.Conn.cursor()
- self.TblDataModel = TableDataModel(self.Cur)
- self.TblFile = TableFile(self.Cur)
- self.TblInf = TableInf(self.Cur)
- self.TblDec = TableDec(self.Cur)
- self.TblDsc = TableDsc(self.Cur)
-
- ## Initialize build database
- #
- # 1. Delete all old existing tables
- # 2. Create new tables
- # 3. Initialize table DataModel
- #
- def InitDatabase(self):
- EdkLogger.verbose("\nInitialize ECC database started ...")
- #
- # Drop all old existing tables
- #
-# self.TblDataModel.Drop()
-# self.TblDsc.Drop()
-# self.TblFile.Drop()
-
- #
- # Create new tables
- #
- self.TblDataModel.Create()
- self.TblFile.Create()
- self.TblInf.Create()
- self.TblDec.Create()
- self.TblDsc.Create()
-
- #
- # Initialize table DataModel
- #
- self.TblDataModel.InitTable()
- EdkLogger.verbose("Initialize ECC database ... DONE!")
-
- ## Query a table
- #
- # @param Table: The instance of the table to be queried
- #
- def QueryTable(self, Table):
- Table.Query()
-
- ## Close entire database
- #
- # Commit all first
- # Close the connection and cursor
- #
- def Close(self):
- self.Conn.commit()
- self.Cur.close()
- self.Conn.close()
-
-##
-#
-# This acts like the main() function for the script, unless it is 'import'ed into another
-# script.
-#
-if __name__ == '__main__':
- EdkLogger.Initialize()
- EdkLogger.SetLevel(EdkLogger.DEBUG_0)
-
- Db = Database(DATABASE_PATH)
- Db.InitDatabase()
- Db.QueryTable(Db.TblDataModel)
- Db.QueryTable(Db.TblFile)
- Db.QueryTable(Db.TblDsc)
- Db.Close()
- \ No newline at end of file
diff --git a/BaseTools/Source/Python/Common/DecClassObject.py b/BaseTools/Source/Python/Common/DecClassObject.py
deleted file mode 100644
index d7c70a7336..0000000000
--- a/BaseTools/Source/Python/Common/DecClassObject.py
+++ /dev/null
@@ -1,564 +0,0 @@
-## @file
-# This file is used to define each component of DEC file
-#
-# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-##
-# Import Modules
-#
-import Common.LongFilePathOs as os
-from String import *
-from DataType import *
-from Identification import *
-from Dictionary import *
-from CommonDataClass.PackageClass import *
-from CommonDataClass.CommonClass import PcdClass
-from BuildToolError import *
-from Table.TableDec import TableDec
-import Database
-from Parsing import *
-import GlobalData
-from Common.LongFilePathSupport import OpenLongFilePath as open
-
-#
-# Global variable
-#
-Section = {TAB_UNKNOWN.upper() : MODEL_UNKNOWN,
- TAB_DEC_DEFINES.upper() : MODEL_META_DATA_HEADER,
- TAB_INCLUDES.upper() : MODEL_EFI_INCLUDE,
- TAB_LIBRARY_CLASSES.upper() : MODEL_EFI_LIBRARY_CLASS,
- TAB_COMPONENTS.upper() : MODEL_META_DATA_COMPONENT,
- TAB_GUIDS.upper() : MODEL_EFI_GUID,
- TAB_PROTOCOLS.upper() : MODEL_EFI_PROTOCOL,
- TAB_PPIS.upper() : MODEL_EFI_PPI,
- TAB_PCDS_FIXED_AT_BUILD_NULL.upper() : MODEL_PCD_FIXED_AT_BUILD,
- TAB_PCDS_PATCHABLE_IN_MODULE_NULL.upper() : MODEL_PCD_PATCHABLE_IN_MODULE,
- TAB_PCDS_FEATURE_FLAG_NULL.upper() : MODEL_PCD_FEATURE_FLAG,
- TAB_PCDS_DYNAMIC_EX_NULL.upper() : MODEL_PCD_DYNAMIC_EX,
- TAB_PCDS_DYNAMIC_NULL.upper() : MODEL_PCD_DYNAMIC,
- TAB_USER_EXTENSIONS.upper() : MODEL_META_DATA_USER_EXTENSION
- }
-
-
-## DecObject
-#
-# This class defined basic Dec object which is used by inheriting
-#
-# @param object: Inherited from object class
-#
-class DecObject(object):
- def __init__(self):
- object.__init__()
-
-## Dec
-#
-# This class defined the structure used in Dec object
-#
-# @param DecObject: Inherited from DecObject class
-# @param Filename: Input value for Filename of Dec file, default is None
-# @param IsMergeAllArches: Input value for IsMergeAllArches
-# True is to merge all arches
-# Fales is not to merge all arches
-# default is False
-# @param IsToPackage: Input value for IsToPackage
-# True is to transfer to PackageObject automatically
-# False is not to transfer to PackageObject automatically
-# default is False
-# @param WorkspaceDir: Input value for current workspace directory, default is None
-#
-# @var Identification: To store value for Identification, it is a structure as Identification
-# @var Defines: To store value for Defines, it is a structure as DecDefines
-# @var UserExtensions: To store value for UserExtensions
-# @var Package: To store value for Package, it is a structure as PackageClass
-# @var WorkspaceDir: To store value for WorkspaceDir
-# @var Contents: To store value for Contents, it is a structure as DecContents
-# @var KeyList: To store value for KeyList, a list for all Keys used in Dec
-#
-class Dec(DecObject):
- def __init__(self, Filename=None, IsToDatabase=False, IsToPackage=False, WorkspaceDir=None, Database=None, SupArchList=DataType.ARCH_LIST):
- self.Identification = Identification()
- self.Package = PackageClass()
- self.UserExtensions = ''
- self.WorkspaceDir = WorkspaceDir
- self.SupArchList = SupArchList
- self.IsToDatabase = IsToDatabase
-
- self.Cur = Database.Cur
- self.TblFile = Database.TblFile
- self.TblDec = Database.TblDec
- self.FileID = -1
-
- self.KeyList = [
- TAB_INCLUDES, TAB_GUIDS, TAB_PROTOCOLS, TAB_PPIS, TAB_LIBRARY_CLASSES, \
- TAB_PCDS_FIXED_AT_BUILD_NULL, TAB_PCDS_PATCHABLE_IN_MODULE_NULL, TAB_PCDS_FEATURE_FLAG_NULL, \
- TAB_PCDS_DYNAMIC_NULL, TAB_PCDS_DYNAMIC_EX_NULL, TAB_DEC_DEFINES
- ]
- #
- # Upper all KEYs to ignore case sensitive when parsing
- #
- self.KeyList = map(lambda c: c.upper(), self.KeyList)
-
- #
- # Init RecordSet
- #
- self.RecordSet = {}
- for Key in self.KeyList:
- self.RecordSet[Section[Key]] = []
-
- #
- # Load Dec file if filename is not None
- #
- if Filename != None:
- self.LoadDecFile(Filename)
-
- #
- # Transfer to Package Object if IsToPackage is True
- #
- if IsToPackage:
- self.DecToPackage()
-
- ## Load Dec file
- #
- # Load the file if it exists
- #
- # @param Filename: Input value for filename of Dec file
- #
- def LoadDecFile(self, Filename):
- #
- # Insert a record for file
- #
- Filename = NormPath(Filename)
- self.Identification.FileFullPath = Filename
- (self.Identification.FileRelativePath, self.Identification.FileName) = os.path.split(Filename)
- self.FileID = self.TblFile.InsertFile(Filename, MODEL_FILE_DEC)
-
- #
- # Init DecTable
- #
- #self.TblDec.Table = "Dec%s" % self.FileID
- #self.TblDec.Create()
-
- #
- # Init common datas
- #
- IfDefList, SectionItemList, CurrentSection, ArchList, ThirdList, IncludeFiles = \
- [], [], TAB_UNKNOWN, [], [], []
- LineNo = 0
-
- #
- # Parse file content
- #
- IsFindBlockComment = False
- ReservedLine = ''
- for Line in open(Filename, 'r'):
- LineNo = LineNo + 1
- #
- # Remove comment block
- #
- if Line.find(TAB_COMMENT_EDK_START) > -1:
- ReservedLine = GetSplitList(Line, TAB_COMMENT_EDK_START, 1)[0]
- IsFindBlockComment = True
- if Line.find(TAB_COMMENT_EDK_END) > -1:
- Line = ReservedLine + GetSplitList(Line, TAB_COMMENT_EDK_END, 1)[1]
- ReservedLine = ''
- IsFindBlockComment = False
- if IsFindBlockComment:
- continue
-
- #
- # Remove comments at tail and remove spaces again
- #
- Line = CleanString(Line)
- if Line == '':
- continue
-
- #
- # Find a new section tab
- # First insert previous section items
- # And then parse the content of the new section
- #
- if Line.startswith(TAB_SECTION_START) and Line.endswith(TAB_SECTION_END):
- #
- # Insert items data of previous section
- #
- Model = Section[CurrentSection.upper()]
- InsertSectionItemsIntoDatabase(self.TblDec, self.FileID, Filename, Model, CurrentSection, SectionItemList, ArchList, ThirdList, IfDefList, self.RecordSet)
-
- #
- # Parse the new section
- #
- SectionItemList = []
- ArchList = []
- ThirdList = []
-
- CurrentSection = ''
- LineList = GetSplitValueList(Line[len(TAB_SECTION_START):len(Line) - len(TAB_SECTION_END)], TAB_COMMA_SPLIT)
- for Item in LineList:
- ItemList = GetSplitValueList(Item, TAB_SPLIT)
- if CurrentSection == '':
- CurrentSection = ItemList[0]
- else:
- if CurrentSection != ItemList[0]:
- EdkLogger.error("Parser", PARSER_ERROR, "Different section names '%s' and '%s' are found in one section definition, this is not allowed." % (CurrentSection, ItemList[0]), File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
- if CurrentSection.upper() not in self.KeyList:
- RaiseParserError(Line, CurrentSection, Filename, '', LineNo)
- ItemList.append('')
- ItemList.append('')
- if len(ItemList) > 5:
- RaiseParserError(Line, CurrentSection, Filename, '', LineNo)
- else:
- if ItemList[1] != '' and ItemList[1].upper() not in ARCH_LIST_FULL:
- EdkLogger.error("Parser", PARSER_ERROR, "Invalid Arch definition '%s' found" % ItemList[1], File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
- ArchList.append(ItemList[1].upper())
- ThirdList.append(ItemList[2])
-
- continue
-
- #
- # Not in any defined section
- #
- if CurrentSection == TAB_UNKNOWN:
- ErrorMsg = "%s is not in any defined section" % Line
- EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg, File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
-
- #
- # Add a section item
- #
- SectionItemList.append([Line, LineNo])
- # End of parse
- #End of For
-
- #
- # Insert items data of last section
- #
- Model = Section[CurrentSection.upper()]
- InsertSectionItemsIntoDatabase(self.TblDec, self.FileID, Filename, Model, CurrentSection, SectionItemList, ArchList, ThirdList, IfDefList, self.RecordSet)
-
- #
- # Replace all DEFINE macros with its actual values
- #
- ParseDefineMacro2(self.TblDec, self.RecordSet, GlobalData.gGlobalDefines)
-
- ## Transfer to Package Object
- #
- # Transfer all contents of a Dec file to a standard Package Object
- #
- def DecToPackage(self):
- #
- # Init global information for the file
- #
- ContainerFile = self.Identification.FileFullPath
-
- #
- # Generate Package Header
- #
- self.GenPackageHeader(ContainerFile)
-
- #
- # Generate Includes
- #
- self.GenIncludes(ContainerFile)
-
- #
- # Generate Guids
- #
- self.GenGuidProtocolPpis(DataType.TAB_GUIDS, ContainerFile)
-
- #
- # Generate Protocols
- #
- self.GenGuidProtocolPpis(DataType.TAB_PROTOCOLS, ContainerFile)
-
- #
- # Generate Ppis
- #
- self.GenGuidProtocolPpis(DataType.TAB_PPIS, ContainerFile)
-
- #
- # Generate LibraryClasses
- #
- self.GenLibraryClasses(ContainerFile)
-
- #
- # Generate Pcds
- #
- self.GenPcds(ContainerFile)
-
- ## Get Package Header
- #
- # Gen Package Header of Dec as <Key> = <Value>
- #
- # @param ContainerFile: The Dec file full path
- #
- def GenPackageHeader(self, ContainerFile):
- EdkLogger.debug(2, "Generate PackageHeader ...")
- #
- # Update all defines item in database
- #
- RecordSet = self.RecordSet[MODEL_META_DATA_HEADER]
- for Record in RecordSet:
- ValueList = GetSplitValueList(Record[0], TAB_EQUAL_SPLIT)
- if len(ValueList) != 2:
- RaiseParserError(Record[0], 'Defines', ContainerFile, '<Key> = <Value>', Record[2])
- ID, Value1, Value2, Arch, LineNo = Record[3], ValueList[0], ValueList[1], Record[1], Record[2]
- SqlCommand = """update %s set Value1 = '%s', Value2 = '%s'
- where ID = %s""" % (self.TblDec.Table, ConvertToSqlString2(Value1), ConvertToSqlString2(Value2), ID)
- self.TblDec.Exec(SqlCommand)
-
- #
- # Get detailed information
- #
- for Arch in self.SupArchList:
- PackageHeader = PackageHeaderClass()
-
- PackageHeader.Name = QueryDefinesItem(self.TblDec, TAB_DEC_DEFINES_PACKAGE_NAME, Arch, self.FileID)[0]
- PackageHeader.Guid = QueryDefinesItem(self.TblDec, TAB_DEC_DEFINES_PACKAGE_GUID, Arch, self.FileID)[0]
- PackageHeader.Version = QueryDefinesItem(self.TblDec, TAB_DEC_DEFINES_PACKAGE_VERSION, Arch, self.FileID)[0]
- PackageHeader.FileName = self.Identification.FileName
- PackageHeader.FullPath = self.Identification.FileFullPath
- PackageHeader.DecSpecification = QueryDefinesItem(self.TblDec, TAB_DEC_DEFINES_DEC_SPECIFICATION, Arch, self.FileID)[0]
-
- self.Package.Header[Arch] = PackageHeader
-
- ## GenIncludes
- #
- # Gen Includes of Dec
- #
- #
- # @param ContainerFile: The Dec file full path
- #
- def GenIncludes(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_INCLUDES)
- Includes = {}
- #
- # Get all Includes
- #
- RecordSet = self.RecordSet[MODEL_EFI_INCLUDE]
-
- #
- # Go through each arch
- #
- for Arch in self.SupArchList:
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- MergeArches(Includes, Record[0], Arch)
-
- for Key in Includes.keys():
- Include = IncludeClass()
- Include.FilePath = NormPath(Key)
- Include.SupArchList = Includes[Key]
- self.Package.Includes.append(Include)
-
- ## GenPpis
- #
- # Gen Ppis of Dec
- # <CName>=<GuidValue>
- #
- # @param ContainerFile: The Dec file full path
- #
- def GenGuidProtocolPpis(self, Type, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % Type)
- Lists = {}
- #
- # Get all Items
- #
- RecordSet = self.RecordSet[Section[Type.upper()]]
-
- #
- # Go through each arch
- #
- for Arch in self.SupArchList:
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- (Name, Value) = GetGuidsProtocolsPpisOfDec(Record[0], Type, ContainerFile, Record[2])
- MergeArches(Lists, (Name, Value), Arch)
- if self.IsToDatabase:
- SqlCommand = """update %s set Value1 = '%s', Value2 = '%s'
- where ID = %s""" % (self.TblDec.Table, ConvertToSqlString2(Name), ConvertToSqlString2(Value), Record[3])
- self.TblDec.Exec(SqlCommand)
-
- ListMember = None
- if Type == TAB_GUIDS:
- ListMember = self.Package.GuidDeclarations
- elif Type == TAB_PROTOCOLS:
- ListMember = self.Package.ProtocolDeclarations
- elif Type == TAB_PPIS:
- ListMember = self.Package.PpiDeclarations
-
- for Key in Lists.keys():
- ListClass = GuidProtocolPpiCommonClass()
- ListClass.CName = Key[0]
- ListClass.Guid = Key[1]
- ListClass.SupArchList = Lists[Key]
- ListMember.append(ListClass)
-
-
- ## GenLibraryClasses
- #
- # Gen LibraryClasses of Dec
- # <CName>=<GuidValue>
- #
- # @param ContainerFile: The Dec file full path
- #
- def GenLibraryClasses(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_LIBRARY_CLASSES)
- LibraryClasses = {}
- #
- # Get all Guids
- #
- RecordSet = self.RecordSet[MODEL_EFI_LIBRARY_CLASS]
-
- #
- # Go through each arch
- #
- for Arch in self.SupArchList:
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- List = GetSplitValueList(Record[0], DataType.TAB_VALUE_SPLIT)
- if len(List) != 2:
- RaiseParserError(Record[0], 'LibraryClasses', ContainerFile, '<LibraryClassName>|<LibraryClassInstanceFilename>', Record[2])
- else:
- CheckFileExist(self.Identification.FileRelativePath, List[1], ContainerFile, 'LibraryClasses', Record[0])
- MergeArches(LibraryClasses, (List[0], List[1]), Arch)
- if self.IsToDatabase:
- SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s'
- where ID = %s""" % (self.TblDec.Table, ConvertToSqlString2(List[0]), ConvertToSqlString2(List[1]), SUP_MODULE_LIST_STRING, Record[3])
- self.TblDec.Exec(SqlCommand)
-
-
- for Key in LibraryClasses.keys():
- LibraryClass = LibraryClassClass()
- LibraryClass.LibraryClass = Key[0]
- LibraryClass.RecommendedInstance = NormPath(Key[1])
- LibraryClass.SupModuleList = SUP_MODULE_LIST
- LibraryClass.SupArchList = LibraryClasses[Key]
- self.Package.LibraryClassDeclarations.append(LibraryClass)
-
- ## GenPcds
- #
- # Gen Pcds of Dec
- # <TokenSpcCName>.<TokenCName>|<Value>|<DatumType>|<Token>
- #
- # @param ContainerFile: The Dec file full path
- #
- def GenPcds(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_PCDS)
- Pcds = {}
- PcdToken = {}
- #
- # Get all Guids
- #
- RecordSet1 = self.RecordSet[MODEL_PCD_FIXED_AT_BUILD]
- RecordSet2 = self.RecordSet[MODEL_PCD_PATCHABLE_IN_MODULE]
- RecordSet3 = self.RecordSet[MODEL_PCD_FEATURE_FLAG]
- RecordSet4 = self.RecordSet[MODEL_PCD_DYNAMIC_EX]
- RecordSet5 = self.RecordSet[MODEL_PCD_DYNAMIC]
-
- #
- # Go through each arch
- #
- for Arch in self.SupArchList:
- for Record in RecordSet1:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- (TokenGuidCName, TokenName, Value, DatumType, Token, Type) = GetPcdOfDec(Record[0], TAB_PCDS_FIXED_AT_BUILD, ContainerFile, Record[2])
- MergeArches(Pcds, (TokenGuidCName, TokenName, Value, DatumType, Token, Type), Arch)
- PcdToken[Record[3]] = (TokenGuidCName, TokenName)
- for Record in RecordSet2:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- (TokenGuidCName, TokenName, Value, DatumType, Token, Type) = GetPcdOfDec(Record[0], TAB_PCDS_PATCHABLE_IN_MODULE, ContainerFile, Record[2])
- MergeArches(Pcds, (TokenGuidCName, TokenName, Value, DatumType, Token, Type), Arch)
- PcdToken[Record[3]] = (TokenGuidCName, TokenName)
- for Record in RecordSet3:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- (TokenGuidCName, TokenName, Value, DatumType, Token, Type) = GetPcdOfDec(Record[0], TAB_PCDS_FEATURE_FLAG, ContainerFile, Record[2])
- MergeArches(Pcds, (TokenGuidCName, TokenName, Value, DatumType, Token, Type), Arch)
- PcdToken[Record[3]] = (TokenGuidCName, TokenName)
- for Record in RecordSet4:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- (TokenGuidCName, TokenName, Value, DatumType, Token, Type) = GetPcdOfDec(Record[0], TAB_PCDS_DYNAMIC_EX, ContainerFile, Record[2])
- MergeArches(Pcds, (TokenGuidCName, TokenName, Value, DatumType, Token, Type), Arch)
- PcdToken[Record[3]] = (TokenGuidCName, TokenName)
- for Record in RecordSet5:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- (TokenGuidCName, TokenName, Value, DatumType, Token, Type) = GetPcdOfDec(Record[0], TAB_PCDS_DYNAMIC, ContainerFile, Record[2])
- MergeArches(Pcds, (TokenGuidCName, TokenName, Value, DatumType, Token, Type), Arch)
- PcdToken[Record[3]] = (TokenGuidCName, TokenName)
- #
- # Update to database
- #
- if self.IsToDatabase:
- for Key in PcdToken.keys():
- SqlCommand = """update %s set Value2 = '%s' where ID = %s""" % (self.TblDec.Table, ".".join((PcdToken[Key][0], PcdToken[Key][1])), Key)
- self.TblDec.Exec(SqlCommand)
-
- for Key in Pcds.keys():
- Pcd = PcdClass()
- Pcd.CName = Key[1]
- Pcd.Token = Key[4]
- Pcd.TokenSpaceGuidCName = Key[0]
- Pcd.DatumType = Key[3]
- Pcd.DefaultValue = Key[2]
- Pcd.ItemType = Key[5]
- Pcd.SupArchList = Pcds[Key]
- self.Package.PcdDeclarations.append(Pcd)
-
- ## Show detailed information of Package
- #
- # Print all members and their values of Package class
- #
- def ShowPackage(self):
- M = self.Package
- for Arch in M.Header.keys():
- print '\nArch =', Arch
- print 'Filename =', M.Header[Arch].FileName
- print 'FullPath =', M.Header[Arch].FullPath
- print 'BaseName =', M.Header[Arch].Name
- print 'Guid =', M.Header[Arch].Guid
- print 'Version =', M.Header[Arch].Version
- print 'DecSpecification =', M.Header[Arch].DecSpecification
- print '\nIncludes =', M.Includes
- for Item in M.Includes:
- print Item.FilePath, Item.SupArchList
- print '\nGuids =', M.GuidDeclarations
- for Item in M.GuidDeclarations:
- print Item.CName, Item.Guid, Item.SupArchList
- print '\nProtocols =', M.ProtocolDeclarations
- for Item in M.ProtocolDeclarations:
- print Item.CName, Item.Guid, Item.SupArchList
- print '\nPpis =', M.PpiDeclarations
- for Item in M.PpiDeclarations:
- print Item.CName, Item.Guid, Item.SupArchList
- print '\nLibraryClasses =', M.LibraryClassDeclarations
- for Item in M.LibraryClassDeclarations:
- print Item.LibraryClass, Item.RecommendedInstance, Item.SupModuleList, Item.SupArchList
- print '\nPcds =', M.PcdDeclarations
- for Item in M.PcdDeclarations:
- print 'CName=', Item.CName, 'TokenSpaceGuidCName=', Item.TokenSpaceGuidCName, 'DefaultValue=', Item.DefaultValue, 'ItemType=', Item.ItemType, 'Token=', Item.Token, 'DatumType=', Item.DatumType, Item.SupArchList
-
-##
-#
-# This acts like the main() function for the script, unless it is 'import'ed into another
-# script.
-#
-if __name__ == '__main__':
- EdkLogger.Initialize()
- EdkLogger.SetLevel(EdkLogger.DEBUG_0)
-
- W = os.getenv('WORKSPACE')
- F = os.path.join(W, 'Nt32Pkg/Nt32Pkg.dec')
-
- Db = Database.Database('Dec.db')
- Db.InitDatabase()
-
- P = Dec(os.path.normpath(F), True, True, W, Db)
- P.ShowPackage()
-
- Db.Close()
diff --git a/BaseTools/Source/Python/Common/Dictionary.py b/BaseTools/Source/Python/Common/Dictionary.py
deleted file mode 100644
index 1c33fefabf..0000000000
--- a/BaseTools/Source/Python/Common/Dictionary.py
+++ /dev/null
@@ -1,76 +0,0 @@
-## @file
-# Define a dictionary structure
-#
-# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-##
-# Import Modules
-#
-import EdkLogger
-from DataType import *
-from Common.LongFilePathSupport import OpenLongFilePath as open
-
-## Convert a text file to a dictionary
-#
-# Convert a text file to a dictionary of (name:value) pairs.
-#
-# @retval 0 Convert successful
-# @retval 1 Open file failed
-#
-def ConvertTextFileToDictionary(FileName, Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter):
- try:
- F = open(FileName, 'r')
- Keys = []
- for Line in F:
- if Line.startswith(CommentCharacter):
- continue
- LineList = Line.split(KeySplitCharacter, 1)
- if len(LineList) >= 2:
- Key = LineList[0].split()
- if len(Key) == 1 and Key[0][0] != CommentCharacter and Key[0] not in Keys:
- if ValueSplitFlag:
- Dictionary[Key[0]] = LineList[1].replace('\\', '/').split(ValueSplitCharacter)
- else:
- Dictionary[Key[0]] = LineList[1].strip().replace('\\', '/')
- Keys += [Key[0]]
- F.close()
- return 0
- except:
- EdkLogger.info('Open file failed')
- return 1
-
-## Print the dictionary
-#
-# Print all items of dictionary one by one
-#
-# @param Dict: The dictionary to be printed
-#
-def printDict(Dict):
- if Dict != None:
- KeyList = Dict.keys()
- for Key in KeyList:
- if Dict[Key] != '':
- print Key + ' = ' + str(Dict[Key])
-
-## Print the dictionary
-#
-# Print the items of dictionary which matched with input key
-#
-# @param list: The dictionary to be printed
-# @param key: The key of the item to be printed
-#
-def printList(Key, List):
- if type(List) == type([]):
- if len(List) > 0:
- if Key.find(TAB_SPLIT) != -1:
- print "\n" + Key
- for Item in List:
- print Item
diff --git a/BaseTools/Source/Python/Common/DscClassObject.py b/BaseTools/Source/Python/Common/DscClassObject.py
deleted file mode 100644
index c2fa1c275a..0000000000
--- a/BaseTools/Source/Python/Common/DscClassObject.py
+++ /dev/null
@@ -1,1445 +0,0 @@
-## @file
-# This file is used to define each component of DSC file
-#
-# Copyright (c) 2007 - 2016, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-##
-# Import Modules
-#
-import Common.LongFilePathOs as os
-import EdkLogger as EdkLogger
-import Database
-from String import *
-from Parsing import *
-from DataType import *
-from Identification import *
-from Dictionary import *
-from CommonDataClass.PlatformClass import *
-from CommonDataClass.CommonClass import SkuInfoClass
-from BuildToolError import *
-from Misc import sdict
-import GlobalData
-from Table.TableDsc import TableDsc
-from Common.LongFilePathSupport import OpenLongFilePath as open
-
-#
-# Global variable
-#
-Section = {TAB_UNKNOWN.upper() : MODEL_UNKNOWN,
- TAB_DSC_DEFINES.upper() : MODEL_META_DATA_HEADER,
- TAB_BUILD_OPTIONS.upper() : MODEL_META_DATA_BUILD_OPTION,
- TAB_SKUIDS.upper() : MODEL_EFI_SKU_ID,
- TAB_LIBRARIES.upper() : MODEL_EFI_LIBRARY_INSTANCE,
- TAB_LIBRARY_CLASSES.upper() : MODEL_EFI_LIBRARY_CLASS,
- TAB_PCDS_FIXED_AT_BUILD_NULL.upper() : MODEL_PCD_FIXED_AT_BUILD,
- TAB_PCDS_PATCHABLE_IN_MODULE_NULL.upper() : MODEL_PCD_PATCHABLE_IN_MODULE,
- TAB_PCDS_FEATURE_FLAG_NULL.upper() : MODEL_PCD_FEATURE_FLAG,
- TAB_PCDS_DYNAMIC_EX_NULL.upper() : MODEL_PCD_DYNAMIC_EX,
- TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL.upper() : MODEL_PCD_DYNAMIC_EX_DEFAULT,
- TAB_PCDS_DYNAMIC_EX_VPD_NULL.upper() : MODEL_PCD_DYNAMIC_EX_VPD,
- TAB_PCDS_DYNAMIC_EX_HII_NULL.upper() : MODEL_PCD_DYNAMIC_EX_HII,
- TAB_PCDS_DYNAMIC_NULL.upper() : MODEL_PCD_DYNAMIC,
- TAB_PCDS_DYNAMIC_DEFAULT_NULL.upper() : MODEL_PCD_DYNAMIC_DEFAULT,
- TAB_PCDS_DYNAMIC_VPD_NULL.upper() : MODEL_PCD_DYNAMIC_VPD,
- TAB_PCDS_DYNAMIC_HII_NULL.upper() : MODEL_PCD_DYNAMIC_HII,
- TAB_COMPONENTS.upper() : MODEL_META_DATA_COMPONENT,
- TAB_USER_EXTENSIONS.upper() : MODEL_META_DATA_USER_EXTENSION
- }
-
-## DscObject
-#
-# This class defined basic Dsc object which is used by inheriting
-#
-# @param object: Inherited from object class
-#
-class DscObject(object):
- def __init__(self):
- object.__init__()
-
-## Dsc
-#
-# This class defined the structure used in Dsc object
-#
-# @param DscObject: Inherited from InfObject class
-# @param Ffilename: Input value for Ffilename of Inf file, default is None
-# @param IsMergeAllArches: Input value for IsMergeAllArches
-# True is to merge all arches
-# Fales is not to merge all arches
-# default is False
-# @param IsToPlatform: Input value for IsToPlatform
-# True is to transfer to ModuleObject automatically
-# False is not to transfer to ModuleObject automatically
-# default is False
-# @param WorkspaceDir: Input value for current workspace directory, default is None
-#
-# @var _NullClassIndex: To store value for _NullClassIndex, default is 0
-# @var Identification: To store value for Identification, it is a structure as Identification
-# @var Defines: To store value for Defines, it is a structure as DscDefines
-# @var Contents: To store value for Contents, it is a structure as DscContents
-# @var UserExtensions: To store value for UserExtensions
-# @var Platform: To store value for Platform, it is a structure as PlatformClass
-# @var WorkspaceDir: To store value for WorkspaceDir
-# @var KeyList: To store value for KeyList, a list for all Keys used in Dec
-#
-class Dsc(DscObject):
- _NullClassIndex = 0
-
- def __init__(self, Filename=None, IsToDatabase=False, IsToPlatform=False, WorkspaceDir=None, Database=None):
- self.Identification = Identification()
- self.Platform = PlatformClass()
- self.UserExtensions = ''
- self.WorkspaceDir = WorkspaceDir
- self.IsToDatabase = IsToDatabase
- if Database:
- self.Cur = Database.Cur
- self.TblFile = Database.TblFile
- self.TblDsc = Database.TblDsc
-
- self.KeyList = [
- TAB_SKUIDS, TAB_LIBRARIES, TAB_LIBRARY_CLASSES, TAB_BUILD_OPTIONS, TAB_PCDS_FIXED_AT_BUILD_NULL, \
- TAB_PCDS_PATCHABLE_IN_MODULE_NULL, TAB_PCDS_FEATURE_FLAG_NULL, \
- TAB_PCDS_DYNAMIC_DEFAULT_NULL, TAB_PCDS_DYNAMIC_HII_NULL, TAB_PCDS_DYNAMIC_VPD_NULL, \
- TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL, TAB_PCDS_DYNAMIC_EX_HII_NULL, TAB_PCDS_DYNAMIC_EX_VPD_NULL, \
- TAB_COMPONENTS, TAB_DSC_DEFINES
- ]
-
- self.PcdToken = {}
-
- #
- # Upper all KEYs to ignore case sensitive when parsing
- #
- self.KeyList = map(lambda c: c.upper(), self.KeyList)
-
- #
- # Init RecordSet
- #
-# self.RecordSet = {}
-# for Key in self.KeyList:
-# self.RecordSet[Section[Key]] = []
-
- #
- # Load Dsc file if filename is not None
- #
- if Filename != None:
- self.LoadDscFile(Filename)
-
- #
- # Transfer to Platform Object if IsToPlatform is True
- #
- if IsToPlatform:
- self.DscToPlatform()
-
- ## Transfer to Platform Object
- #
- # Transfer all contents of an Inf file to a standard Module Object
- #
- def DscToPlatform(self):
- #
- # Init global information for the file
- #
- ContainerFile = self.Identification.FileFullPath
-
- #
- # Generate Platform Header
- #
- self.GenPlatformHeader(ContainerFile)
-
- #
- # Generate BuildOptions
- #
- self.GenBuildOptions(ContainerFile)
-
- #
- # Generate SkuInfos
- #
- self.GenSkuInfos(ContainerFile)
-
- #
- # Generate Libraries
- #
- self.GenLibraries(ContainerFile)
-
- #
- # Generate LibraryClasses
- #
- self.GenLibraryClasses(ContainerFile)
-
- #
- # Generate Pcds
- #
- self.GenPcds(DataType.TAB_PCDS_FIXED_AT_BUILD, ContainerFile)
- self.GenPcds(DataType.TAB_PCDS_PATCHABLE_IN_MODULE, ContainerFile)
- self.GenFeatureFlagPcds(DataType.TAB_PCDS_FEATURE_FLAG, ContainerFile)
- self.GenDynamicDefaultPcds(DataType.TAB_PCDS_DYNAMIC_DEFAULT, ContainerFile)
- self.GenDynamicDefaultPcds(DataType.TAB_PCDS_DYNAMIC_EX_DEFAULT, ContainerFile)
- self.GenDynamicHiiPcds(DataType.TAB_PCDS_DYNAMIC_HII, ContainerFile)
- self.GenDynamicHiiPcds(DataType.TAB_PCDS_DYNAMIC_EX_HII, ContainerFile)
- self.GenDynamicVpdPcds(DataType.TAB_PCDS_DYNAMIC_VPD, ContainerFile)
- self.GenDynamicVpdPcds(DataType.TAB_PCDS_DYNAMIC_EX_VPD, ContainerFile)
-
- #
- # Generate Components
- #
- self.GenComponents(ContainerFile)
-
- #
- # Update to database
- #
- if self.IsToDatabase:
- for Key in self.PcdToken.keys():
- SqlCommand = """update %s set Value2 = '%s' where ID = %s""" % (self.TblDsc.Table, ".".join((self.PcdToken[Key][0], self.PcdToken[Key][1])), Key)
- self.TblDsc.Exec(SqlCommand)
- #End of DscToPlatform
-
- ## Get Platform Header
- #
- # Gen Platform Header of Dsc as <Key> = <Value>
- #
- # @param ContainerFile: The Dsc file full path
- #
- def GenPlatformHeader(self, ContainerFile):
- EdkLogger.debug(2, "Generate PlatformHeader ...")
- #
- # Update all defines item in database
- #
- SqlCommand = """select ID, Value1, Arch, StartLine from %s
- where Model = %s
- and BelongsToFile = %s
- and Enabled > -1""" % (self.TblDsc.Table, MODEL_META_DATA_HEADER, self.FileID)
- RecordSet = self.TblDsc.Exec(SqlCommand)
- for Record in RecordSet:
- ValueList = GetSplitValueList(Record[1], TAB_EQUAL_SPLIT)
- if len(ValueList) != 2:
- RaiseParserError(Record[1], 'Defines', ContainerFile, '<Key> = <Value>', Record[3])
- ID, Value1, Value2, Arch = Record[0], ValueList[0], ValueList[1], Record[2]
- SqlCommand = """update %s set Value1 = '%s', Value2 = '%s'
- where ID = %s""" % (self.TblDsc.Table, ConvertToSqlString2(Value1), ConvertToSqlString2(Value2), ID)
- self.TblDsc.Exec(SqlCommand)
-
- #
- # Get detailed information
- #
- for Arch in DataType.ARCH_LIST:
- PlatformHeader = PlatformHeaderClass()
-
- PlatformHeader.Name = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_PLATFORM_NAME, Arch, self.FileID)[0]
- PlatformHeader.Guid = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_PLATFORM_GUID, Arch, self.FileID)[0]
- PlatformHeader.Version = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_PLATFORM_VERSION, Arch, self.FileID)[0]
- PlatformHeader.FileName = self.Identification.FileName
- PlatformHeader.FullPath = self.Identification.FileFullPath
- PlatformHeader.DscSpecification = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_DSC_SPECIFICATION, Arch, self.FileID)[0]
-
- PlatformHeader.SkuIdName = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_SKUID_IDENTIFIER, Arch, self.FileID)
- PlatformHeader.SupArchList = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_SUPPORTED_ARCHITECTURES, Arch, self.FileID)
- PlatformHeader.BuildTargets = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_BUILD_TARGETS, Arch, self.FileID)
- PlatformHeader.OutputDirectory = NormPath(QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_OUTPUT_DIRECTORY, Arch, self.FileID)[0])
- PlatformHeader.BuildNumber = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_BUILD_NUMBER, Arch, self.FileID)[0]
- PlatformHeader.MakefileName = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_MAKEFILE_NAME, Arch, self.FileID)[0]
-
- PlatformHeader.BsBaseAddress = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_BS_BASE_ADDRESS, Arch, self.FileID)[0]
- PlatformHeader.RtBaseAddress = QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_RT_BASE_ADDRESS, Arch, self.FileID)[0]
-
- self.Platform.Header[Arch] = PlatformHeader
- Fdf = PlatformFlashDefinitionFileClass()
- Fdf.FilePath = NormPath(QueryDefinesItem(self.TblDsc, TAB_DSC_DEFINES_FLASH_DEFINITION, Arch, self.FileID)[0])
- self.Platform.FlashDefinitionFile = Fdf
- Prebuild = BuildScriptClass()
- Prebuild.FilePath = NormPath(QueryDefinesItem(self.TblDsc, TAB_DSC_PREBUILD, Arch, self.FileID)[0])
- self.Platform.Prebuild = Prebuild
- Postbuild = BuildScriptClass()
- Postbuild.FilePath = NormPath(QueryDefinesItem(self.TblDsc, TAB_DSC_POSTBUILD, Arch, self.FileID)[0])
- self.Platform.Postbuild = Postbuild
-
- ## GenBuildOptions
- #
- # Gen BuildOptions of Dsc
- # [<Family>:]<ToolFlag>=Flag
- #
- # @param ContainerFile: The Dsc file full path
- #
- def GenBuildOptions(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_BUILD_OPTIONS)
- BuildOptions = {}
- #
- # Get all include files
- #
- IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, MODEL_META_DATA_BUILD_OPTION, self.FileID)
-
- #
- # Get all BuildOptions
- #
- RecordSet = QueryDscItem(self.TblDsc, MODEL_META_DATA_BUILD_OPTION, -1, self.FileID)
-
- #
- # Go through each arch
- #
- for Arch in DataType.ARCH_LIST:
- for IncludeFile in IncludeFiles:
- if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
- Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, TAB_BUILD_OPTIONS, '', IncludeFile[2])
- for NewItem in open(Filename, 'r').readlines():
- if CleanString(NewItem) == '':
- continue
- (Family, ToolChain, Flag) = GetBuildOption(NewItem, Filename, -1)
- MergeArches(BuildOptions, (Family, ToolChain, Flag), Arch)
-
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
- (Family, ToolChain, Flag) = GetBuildOption(Record[0], ContainerFile, Record[2])
- MergeArches(BuildOptions, (Family, ToolChain, Flag), Arch)
- #
- # Update to Database
- #
- if self.IsToDatabase:
- SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s'
- where ID = %s""" % (self.TblDsc.Table, ConvertToSqlString2(Family), ConvertToSqlString2(ToolChain), ConvertToSqlString2(Flag), Record[3])
- self.TblDsc.Exec(SqlCommand)
-
- for Key in BuildOptions.keys():
- BuildOption = BuildOptionClass(Key[0], Key[1], Key[2])
- BuildOption.SupArchList = BuildOptions[Key]
- self.Platform.BuildOptions.BuildOptionList.append(BuildOption)
-
- ## GenSkuInfos
- #
- # Gen SkuInfos of Dsc
- # <Integer>|<UiName>
- #
- # @param ContainerFile: The Dsc file full path
- #
- def GenSkuInfos(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_SKUIDS)
- #
- # SkuIds
- # <Integer>|<UiName>
- #
- self.Platform.SkuInfos.SkuInfoList['DEFAULT'] = '0'
-
- #
- # Get all include files
- #
- IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, MODEL_EFI_SKU_ID, self.FileID)
-
- #
- # Get all SkuInfos
- #
- RecordSet = QueryDscItem(self.TblDsc, MODEL_EFI_SKU_ID, -1, self.FileID)
-
- #
- # Go through each arch
- #
- for Arch in DataType.ARCH_LIST:
- for IncludeFile in IncludeFiles:
- if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
- Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, TAB_SKUIDS, '', IncludeFile[2])
- for NewItem in open(Filename, 'r').readlines():
- if CleanString(NewItem) == '':
- continue
- List = GetSplitValueList(NewItem)
- if len(List) != 2:
- RaiseParserError(NewItem, TAB_SKUIDS, Filename, '<Integer>|<UiName>')
- else:
- self.Platform.SkuInfos.SkuInfoList[List[1]] = List[0]
-
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
- List = GetSplitValueList(Record[0])
- if len(List) != 2:
- RaiseParserError(Record[0], TAB_SKUIDS, ContainerFile, '<Integer>|<UiName>')
- else:
- self.Platform.SkuInfos.SkuInfoList[List[1]] = List[0]
- #
- # Update to Database
- #
- if self.IsToDatabase:
- SqlCommand = """update %s set Value1 = '%s', Value2 = '%s'
- where ID = %s""" % (self.TblDsc.Table, ConvertToSqlString2(List[0]), ConvertToSqlString2(List[1]), Record[3])
- self.TblDsc.Exec(SqlCommand)
-
- ## GenLibraries
- #
- # Gen Libraries of Dsc
- # <PathAndFilename>
- #
- # @param ContainerFile: The Dsc file full path
- #
- def GenLibraries(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_LIBRARIES)
- Libraries = {}
- #
- # Get all include files
- #
- IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, MODEL_EFI_LIBRARY_INSTANCE, self.FileID)
-
- #
- # Get all Libraries
- #
- RecordSet = QueryDscItem(self.TblDsc, MODEL_EFI_LIBRARY_INSTANCE, -1, self.FileID)
-
- #
- # Go through each arch
- #
- for Arch in DataType.ARCH_LIST:
- for IncludeFile in IncludeFiles:
- if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
- Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, TAB_LIBRARIES, '', IncludeFile[2])
- if os.path.exists(Filename):
- for NewItem in open(Filename, 'r').readlines():
- if CleanString(NewItem) == '':
- continue
- MergeArches(Libraries, NewItem, Arch)
-
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
- MergeArches(Libraries, Record[0], Arch)
-
- for Key in Libraries.keys():
- Library = PlatformLibraryClass()
- Library.FilePath = NormPath(Key)
- Library.SupArchList = Libraries[Key]
- self.Platform.Libraries.LibraryList.append(Library)
-
- ## GenLibraryClasses
- #
- # Get LibraryClasses of Dsc
- # <LibraryClassKeyWord>|<LibraryInstance>
- #
- # @param ContainerFile: The Dsc file full path
- #
- def GenLibraryClasses(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_LIBRARY_CLASSES)
- LibraryClasses = {}
- #
- # Get all include files
- #
- IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, MODEL_EFI_LIBRARY_CLASS, self.FileID)
-
- #
- # Get all LibraryClasses
- #
- RecordSet = QueryDscItem(self.TblDsc, MODEL_EFI_LIBRARY_CLASS, -1, self.FileID)
-
- #
- # Go through each arch
- #
- for Arch in DataType.ARCH_LIST:
- for IncludeFile in IncludeFiles:
- if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
- Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, TAB_LIBRARY_CLASSES, '', IncludeFile[2])
- for NewItem in open(Filename, 'r').readlines():
- if CleanString(NewItem) == '':
- continue
- MergeArches(LibraryClasses, GetLibraryClass([NewItem, IncludeFile[4]], Filename, self.WorkspaceDir, -1), Arch)
-
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
- (LibClassName, LibClassIns, SupModelList) = GetLibraryClass([Record[0], Record[4]], ContainerFile, self.WorkspaceDir, Record[2])
- MergeArches(LibraryClasses, (LibClassName, LibClassIns, SupModelList), Arch)
- #
- # Update to Database
- #
- if self.IsToDatabase:
- SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s'
- where ID = %s""" % (self.TblDsc.Table, ConvertToSqlString2(LibClassName), ConvertToSqlString2(LibClassIns), ConvertToSqlString2(SupModelList), Record[3])
- self.TblDsc.Exec(SqlCommand)
-
- for Key in LibraryClasses.keys():
- Library = PlatformLibraryClass()
- Library.Name = Key[0]
- Library.FilePath = NormPath(Key[1])
- Library.SupModuleList = GetSplitValueList(Key[2])
- Library.SupArchList = LibraryClasses[Key]
- self.Platform.LibraryClasses.LibraryList.append(Library)
-
- ## Gen Pcds
- #
- # Gen Pcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<Value>[|<Type>|<MaximumDatumSize>]
- #
- # @param Type: The type of Pcd
- # @param ContainerFile: The file which describes the pcd, used for error report
- #
- def GenPcds(self, Type='', ContainerFile=''):
- Pcds = {}
- if Type == DataType.TAB_PCDS_PATCHABLE_IN_MODULE:
- Model = MODEL_PCD_PATCHABLE_IN_MODULE
- elif Type == DataType.TAB_PCDS_FIXED_AT_BUILD:
- Model = MODEL_PCD_FIXED_AT_BUILD
- else:
- pass
- EdkLogger.debug(2, "Generate %s ..." % Type)
-
- #
- # Get all include files
- #
- IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, Model, self.FileID)
-
- #
- # Get all Pcds
- #
- RecordSet = QueryDscItem(self.TblDsc, Model, -1, self.FileID)
-
- #
- # Go through each arch
- #
- for Arch in DataType.ARCH_LIST:
- for IncludeFile in IncludeFiles:
- if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
- Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, Type, '', IncludeFile[2])
- for NewItem in open(Filename, 'r').readlines():
- if CleanString(NewItem) == '':
- continue
- (TokenName, TokenGuidCName, Value, DatumType, MaxDatumSize, Type) = GetPcd(NewItem, Type, Filename, -1)
- MergeArches(Pcds, (TokenName, TokenGuidCName, Value, DatumType, MaxDatumSize, Type), Arch)
- self.PcdToken[Record[3]] = (TokenGuidCName, TokenName)
-
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
- (TokenName, TokenGuidCName, Value, DatumType, MaxDatumSize, Type) = GetPcd(Record[0], Type, ContainerFile, Record[2])
- MergeArches(Pcds, (TokenName, TokenGuidCName, Value, DatumType, MaxDatumSize, Type), Arch)
- self.PcdToken[Record[3]] = (TokenGuidCName, TokenName)
-
- for Key in Pcds:
- Pcd = PcdClass(Key[0], '', Key[1], Key[3], Key[4], Key[2], Key[5], [], {}, [])
- Pcd.SupArchList = Pcds[Key]
- self.Platform.DynamicPcdBuildDefinitions.append(Pcd)
-
- ## Gen FeatureFlagPcds
- #
- # Gen FeatureFlagPcds of Dsc file as <PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE
- #
- # @param Type: The type of Pcd
- # @param ContainerFile: The file which describes the pcd, used for error report
- #
- def GenFeatureFlagPcds(self, Type='', ContainerFile=''):
- Pcds = {}
- if Type == DataType.TAB_PCDS_FEATURE_FLAG:
- Model = MODEL_PCD_FEATURE_FLAG
- else:
- pass
- EdkLogger.debug(2, "Generate %s ..." % Type)
-
- #
- # Get all include files
- #
- IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, Model, self.FileID)
-
- #
- # Get all FeatureFlagPcds
- #
- RecordSet = QueryDscItem(self.TblDsc, Model, -1, self.FileID)
-
- #
- # Go through each arch
- #
- for Arch in DataType.ARCH_LIST:
- for IncludeFile in IncludeFiles:
- if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
- Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, Type, '', IncludeFile[2])
- for NewItem in open(Filename, 'r').readlines():
- if CleanString(NewItem) == '':
- continue
- (TokenName, TokenGuidCName, Value, Type) = GetFeatureFlagPcd(NewItem, Type, Filename, -1)
- MergeArches(Pcds, (TokenName, TokenGuidCName, Value, Type), Arch)
- self.PcdToken[Record[3]] = (TokenGuidCName, TokenName)
-
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
- (TokenName, TokenGuidCName, Value, Type) = GetFeatureFlagPcd(Record[0], Type, ContainerFile, Record[2])
- MergeArches(Pcds, (TokenName, TokenGuidCName, Value, Type), Arch)
- self.PcdToken[Record[3]] = (TokenGuidCName, TokenName)
-
- for Key in Pcds:
- Pcd = PcdClass(Key[0], '', Key[1], '', '', Key[2], Key[3], [], {}, [])
- Pcd.SupArchList = Pcds[Key]
- self.Platform.DynamicPcdBuildDefinitions.append(Pcd)
-
- ## Gen DynamicDefaultPcds
- #
- # Gen DynamicDefaultPcds of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<Value>[|<DatumTyp>[|<MaxDatumSize>]]
- #
- # @param Type: The type of Pcd
- # @param ContainerFile: The file which describes the pcd, used for error report
- #
- def GenDynamicDefaultPcds(self, Type='', ContainerFile=''):
- Pcds = {}
- SkuInfoList = {}
- if Type == DataType.TAB_PCDS_DYNAMIC_DEFAULT:
- Model = MODEL_PCD_DYNAMIC_DEFAULT
- elif Type == DataType.TAB_PCDS_DYNAMIC_EX_DEFAULT:
- Model = MODEL_PCD_DYNAMIC_EX_DEFAULT
- else:
- pass
- EdkLogger.debug(2, "Generate %s ..." % Type)
-
- #
- # Get all include files
- #
- IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, Model, self.FileID)
-
- #
- # Get all DynamicDefaultPcds
- #
- RecordSet = QueryDscItem(self.TblDsc, Model, -1, self.FileID)
-
- #
- # Go through each arch
- #
- for Arch in DataType.ARCH_LIST:
- for IncludeFile in IncludeFiles:
- if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
- Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, Type, '', IncludeFile[2])
- for NewItem in open(Filename, 'r').readlines():
- if CleanString(NewItem) == '':
- continue
- (K1, K2, K3, K4, K5, K6) = GetDynamicDefaultPcd(NewItem, Type, Filename, -1)
- MergeArches(Pcds, (K1, K2, K3, K4, K5, K6, IncludeFile[4]), Arch)
- self.PcdToken[Record[3]] = (K2, K1)
-
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
- (K1, K2, K3, K4, K5, K6) = GetDynamicDefaultPcd(Record[0], Type, ContainerFile, Record[2])
- MergeArches(Pcds, (K1, K2, K3, K4, K5, K6, Record[4]), Arch)
- self.PcdToken[Record[3]] = (K2, K1)
-
- for Key in Pcds:
- (Status, SkuInfoList) = self.GenSkuInfoList(Key[6], self.Platform.SkuInfos.SkuInfoList, '', '', '', '', '', Key[2])
- if Status == False:
- ErrorMsg = "The SKUID '%s' used in section '%s' is not defined in section [SkuIds]" % (SkuInfoList, Type)
- EdkLogger.error("DSC File Parser", PARSER_ERROR, ErrorMsg, ContainerFile, RaiseError=EdkLogger.IsRaiseError)
- Pcd = PcdClass(Key[0], '', Key[1], Key[3], Key[4], Key[2], Key[5], [], SkuInfoList, [])
- Pcd.SupArchList = Pcds[Key]
- self.Platform.DynamicPcdBuildDefinitions.append(Pcd)
-
- ## Gen DynamicHiiPcds
- #
- # Gen DynamicHiiPcds of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<String>|<VariableGuidCName>|<VariableOffset>[|<DefaultValue>[|<MaximumDatumSize>]]
- #
- # @param Type: The type of Pcd
- # @param ContainerFile: The file which describes the pcd, used for error report
- #
- def GenDynamicHiiPcds(self, Type='', ContainerFile=''):
- Pcds = {}
- SkuInfoList = {}
- if Type == DataType.TAB_PCDS_DYNAMIC_HII:
- Model = MODEL_PCD_DYNAMIC_HII
- elif Type == DataType.TAB_PCDS_DYNAMIC_EX_HII:
- Model = MODEL_PCD_DYNAMIC_EX_HII
- else:
- pass
- EdkLogger.debug(2, "Generate %s ..." % Type)
-
- #
- # Get all include files
- #
- IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, Model, self.FileID)
-
- #
- # Get all DynamicHiiPcds
- #
- RecordSet = QueryDscItem(self.TblDsc, Model, -1, self.FileID)
-
- #
- # Go through each arch
- #
- for Arch in DataType.ARCH_LIST:
- for IncludeFile in IncludeFiles:
- if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
- Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, Type, '', IncludeFile[2])
- for NewItem in open(Filename, 'r').readlines():
- if CleanString(NewItem) == '':
- continue
- (K1, K2, K3, K4, K5, K6, K7, K8) = GetDynamicHiiPcd(NewItem, Type, Filename, -1)
- MergeArches(Pcds, (K1, K2, K3, K4, K5, K6, K7, K8, IncludeFile[4]), Arch)
- self.PcdToken[Record[3]] = (K2, K1)
-
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
- (K1, K2, K3, K4, K5, K6, K7, K8) = GetDynamicHiiPcd(Record[0], Type, ContainerFile, Record[2])
- MergeArches(Pcds, (K1, K2, K3, K4, K5, K6, K7, K8, Record[4]), Arch)
- self.PcdToken[Record[3]] = (K2, K1)
-
- for Key in Pcds:
- (Status, SkuInfoList) = self.GenSkuInfoList(Key[8], self.Platform.SkuInfos.SkuInfoList, Key[2], Key[3], Key[4], Key[5], '', '')
- if Status == False:
- ErrorMsg = "The SKUID '%s' used in section '%s' is not defined in section [SkuIds]" % (SkuInfoList, Type)
- EdkLogger.error("DSC File Parser", PARSER_ERROR, ErrorMsg, ContainerFile, RaiseError=EdkLogger.IsRaiseError)
- Pcd = PcdClass(Key[0], '', Key[1], '', Key[6], Key[5], Key[7], [], SkuInfoList, [])
- Pcd.SupArchList = Pcds[Key]
- self.Platform.DynamicPcdBuildDefinitions.append(Pcd)
-
- ## Gen DynamicVpdPcds
- #
- # Gen DynamicVpdPcds of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<VpdOffset>[|<MaximumDatumSize>]
- #
- # @param Type: The type of Pcd
- # @param ContainerFile: The file which describes the pcd, used for error report
- #
- def GenDynamicVpdPcds(self, Type='', ContainerFile=''):
- Pcds = {}
- SkuInfoList = {}
- if Type == DataType.TAB_PCDS_DYNAMIC_VPD:
- Model = MODEL_PCD_DYNAMIC_VPD
- elif Type == DataType.TAB_PCDS_DYNAMIC_EX_VPD:
- Model = MODEL_PCD_DYNAMIC_EX_VPD
- else:
- pass
- EdkLogger.debug(2, "Generate %s ..." % Type)
-
- #
- # Get all include files
- #
- IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, Model, self.FileID)
-
- #
- # Get all DynamicVpdPcds
- #
- RecordSet = QueryDscItem(self.TblDsc, Model, -1, self.FileID)
-
- #
- # Go through each arch
- #
- for Arch in DataType.ARCH_LIST:
- for IncludeFile in IncludeFiles:
- if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
- Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, Type, '', IncludeFile[2])
- for NewItem in open(Filename, 'r').readlines():
- if CleanString(NewItem) == '':
- continue
- (K1, K2, K3, K4, K5) = GetDynamicVpdPcd(NewItem, Type, Filename, -1)
- MergeArches(Pcds, (K1, K2, K3, K4, K5, IncludeFile[4]), Arch)
- self.PcdToken[Record[3]] = (K2, K1)
-
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
- (K1, K2, K3, K4, K5) = GetDynamicVpdPcd(Record[0], Type, ContainerFile, Record[2])
- MergeArches(Pcds, (K1, K2, K3, K4, K5, Record[4]), Arch)
- self.PcdToken[Record[3]] = (K2, K1)
-
- for Key in Pcds:
- (Status, SkuInfoList) = self.GenSkuInfoList(Key[5], self.Platform.SkuInfos.SkuInfoList, '', '', '', '', Key[2], '')
- if Status == False:
- ErrorMsg = "The SKUID '%s' used in section '%s' is not defined in section [SkuIds]" % (SkuInfoList, Type)
- EdkLogger.error("DSC File Parser", PARSER_ERROR, ErrorMsg, ContainerFile, RaiseError=EdkLogger.IsRaiseError)
- Pcd = PcdClass(Key[0], '', Key[1], '', Key[3], '', Key[4], [], SkuInfoList, [])
- Pcd.SupArchList = Pcds[Key]
- self.Platform.DynamicPcdBuildDefinitions.append(Pcd)
-
-
- ## Get Component
- #
- # Get Component section defined in Dsc file
- #
- # @param ContainerFile: The file which describes the Components, used for error report
- #
- # @retval PlatformModuleClass() A instance for PlatformModuleClass
- #
- def GenComponents(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_COMPONENTS)
- Components = sdict()
- #
- # Get all include files
- #
- IncludeFiles = QueryDscItem(self.TblDsc, MODEL_META_DATA_INCLUDE, MODEL_META_DATA_COMPONENT, self.FileID)
-
- #
- # Get all Components
- #
- RecordSet = QueryDscItem(self.TblDsc, MODEL_META_DATA_COMPONENT, -1, self.FileID)
-
- #
- # Go through each arch
- #
- for Arch in DataType.ARCH_LIST:
- for IncludeFile in IncludeFiles:
- if IncludeFile[1] == Arch or IncludeFile[1] == TAB_ARCH_COMMON.upper():
- Filename = CheckFileExist(self.WorkspaceDir, IncludeFile[0], ContainerFile, TAB_COMPONENTS, '', IncludeFile[2])
- for NewItem in open(Filename, 'r').readlines():
- if CleanString(NewItem) == '':
- continue
- NewItems = []
- GetComponents(open(Filename, 'r').read(), TAB_COMPONENTS, NewItems, TAB_COMMENT_SPLIT)
- for NewComponent in NewItems:
- MergeArches(Components, self.GenComponent(NewComponent, Filename), Arch)
-
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON.upper():
- Lib, Bo, Pcd = [], [], []
-
- SubLibSet = QueryDscItem(self.TblDsc, MODEL_EFI_LIBRARY_CLASS, Record[3], self.FileID)
- for SubLib in SubLibSet:
- Lib.append(TAB_VALUE_SPLIT.join([SubLib[0], SubLib[4]]))
-
- SubBoSet = QueryDscItem(self.TblDsc, MODEL_META_DATA_BUILD_OPTION, Record[3], self.FileID)
- for SubBo in SubBoSet:
- Bo.append(SubBo[0])
-
- SubPcdSet1 = QueryDscItem(self.TblDsc, MODEL_PCD_FIXED_AT_BUILD, Record[3], self.FileID)
- SubPcdSet2 = QueryDscItem(self.TblDsc, MODEL_PCD_PATCHABLE_IN_MODULE, Record[3], self.FileID)
- SubPcdSet3 = QueryDscItem(self.TblDsc, MODEL_PCD_FEATURE_FLAG, Record[3], self.FileID)
- SubPcdSet4 = QueryDscItem(self.TblDsc, MODEL_PCD_DYNAMIC_EX_DEFAULT, Record[3], self.FileID)
- SubPcdSet5 = QueryDscItem(self.TblDsc, MODEL_PCD_DYNAMIC_DEFAULT, Record[3], self.FileID)
- for SubPcd in SubPcdSet1:
- Pcd.append([DataType.TAB_PCDS_FIXED_AT_BUILD, SubPcd[0], SubPcd[3]])
- for SubPcd in SubPcdSet2:
- Pcd.append([DataType.TAB_PCDS_PATCHABLE_IN_MODULE, SubPcd[0], SubPcd[3]])
- for SubPcd in SubPcdSet3:
- Pcd.append([DataType.TAB_PCDS_FEATURE_FLAG, SubPcd[0], SubPcd[3]])
- for SubPcd in SubPcdSet4:
- Pcd.append([DataType.TAB_PCDS_DYNAMIC_EX, SubPcd[0], SubPcd[3]])
- for SubPcd in SubPcdSet5:
- Pcd.append([DataType.TAB_PCDS_DYNAMIC, SubPcd[0], SubPcd[3]])
- Item = [Record[0], Lib, Bo, Pcd]
- MergeArches(Components, self.GenComponent(Item, ContainerFile), Arch)
-
- for Key in Components.keys():
- Key.SupArchList = Components[Key]
- self.Platform.Modules.ModuleList.append(Key)
-
- ## Get Component
- #
- # Get Component section defined in Dsc file
- #
- # @param Item: Contents includes a component block
- # @param ContainerFile: The file which describes the library class, used for error report
- #
- # @retval PlatformModuleClass() A instance for PlatformModuleClass
- #
- def GenComponent(self, Item, ContainerFile, LineNo= -1):
- (InfFilename, ExecFilename) = GetExec(Item[0])
- LibraryClasses = Item[1]
- BuildOptions = Item[2]
- Pcds = Item[3]
- Component = PlatformModuleClass()
- Component.FilePath = NormPath(InfFilename)
- Component.ExecFilePath = NormPath(ExecFilename)
- CheckFileType(Component.FilePath, '.Inf', ContainerFile, 'component name', Item[0], LineNo)
- CheckFileExist(self.WorkspaceDir, Component.FilePath, ContainerFile, 'component', Item[0], LineNo)
- for Lib in LibraryClasses:
- List = GetSplitValueList(Lib)
- if len(List) != 2:
- RaiseParserError(Lib, 'LibraryClasses', ContainerFile, '<ClassName>|<InfFilename>')
- LibName = List[0]
- LibFile = NormPath(List[1])
- if LibName == "" or LibName == "NULL":
- LibName = "NULL%d" % self._NullClassIndex
- self._NullClassIndex += 1
- CheckFileType(List[1], '.Inf', ContainerFile, 'library instance of component ', Lib, LineNo)
- CheckFileExist(self.WorkspaceDir, LibFile, ContainerFile, 'library instance of component', Lib, LineNo)
- Component.LibraryClasses.LibraryList.append(PlatformLibraryClass(LibName, LibFile))
- for BuildOption in BuildOptions:
- Key = GetBuildOption(BuildOption, ContainerFile)
- Component.ModuleSaBuildOption.BuildOptionList.append(BuildOptionClass(Key[0], Key[1], Key[2]))
- for Pcd in Pcds:
- Type = Pcd[0]
- List = GetSplitValueList(Pcd[1])
- PcdId = Pcd[2]
-
- TokenInfo = None
- #
- # For FeatureFlag
- #
- if Type == DataType.TAB_PCDS_FEATURE_FLAG:
- if len(List) != 2:
- RaiseParserError(Pcd[1], 'Components', ContainerFile, '<PcdTokenSpaceGuidCName>.<PcdTokenName>|TRUE/FALSE')
-
- CheckPcdTokenInfo(List[0], 'Components', ContainerFile)
- TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
- Component.PcdBuildDefinitions.append(PcdClass(TokenInfo[1], '', TokenInfo[0], '', '', List[1], Type, [], {}, []))
- #
- # For FixedAtBuild or PatchableInModule
- #
- if Type == DataType.TAB_PCDS_FIXED_AT_BUILD or Type == DataType.TAB_PCDS_PATCHABLE_IN_MODULE:
- List.append('')
- if len(List) != 3 and len(List) != 4:
- RaiseParserError(Pcd[1], 'Components', ContainerFile, '<PcdTokenSpaceGuidCName>.<PcdTokenName>|<Value>[|<MaxDatumSize>]')
-
- CheckPcdTokenInfo(List[0], 'Components', ContainerFile)
- TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
- Component.PcdBuildDefinitions.append(PcdClass(TokenInfo[1], '', TokenInfo[0], '', List[2], List[1], Type, [], {}, []))
-
- #
- # For Dynamic or DynamicEx
- #
- if Type == DataType.TAB_PCDS_DYNAMIC or Type == DataType.TAB_PCDS_DYNAMIC_EX:
- if len(List) != 1:
- RaiseParserError(Pcd[1], 'Components', ContainerFile, '<PcdTokenSpaceGuidCName>.<PcdTokenName>')
-
- CheckPcdTokenInfo(List[0], 'Components', ContainerFile)
- TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
- Component.PcdBuildDefinitions.append(PcdClass(TokenInfo[1], '', TokenInfo[0], '', '', '', Type, [], {}, []))
-
- #
- # Add to PcdToken
- #
- self.PcdToken[PcdId] = (TokenInfo[0], TokenInfo[1])
-
- return Component
- #End of GenComponent
-
- ## Gen SkuInfoList
- #
- # Gen SkuInfoList section defined in Dsc file
- #
- # @param SkuNameList: Input value for SkuNameList
- # @param SkuInfo: Input value for SkuInfo
- # @param VariableName: Input value for VariableName
- # @param VariableGuid: Input value for VariableGuid
- # @param VariableOffset: Input value for VariableOffset
- # @param HiiDefaultValue: Input value for HiiDefaultValue
- # @param VpdOffset: Input value for VpdOffset
- # @param DefaultValue: Input value for DefaultValue
- #
- # @retval (False, SkuName) Not found in section SkuId Dsc file
- # @retval (True, SkuInfoList) Found in section SkuId of Dsc file
- #
- def GenSkuInfoList(self, SkuNameList, SkuInfo, VariableName='', VariableGuid='', VariableOffset='', HiiDefaultValue='', VpdOffset='', DefaultValue=''):
- SkuNameList = GetSplitValueList(SkuNameList)
- if SkuNameList == None or SkuNameList == [] or SkuNameList == ['']:
- SkuNameList = ['DEFAULT']
- SkuInfoList = {}
- for Item in SkuNameList:
- if Item not in SkuInfo:
- return False, Item
- Sku = SkuInfoClass(Item, SkuInfo[Item], VariableName, VariableGuid, VariableOffset, HiiDefaultValue, VpdOffset, DefaultValue)
- SkuInfoList[Item] = Sku
-
- return True, SkuInfoList
-
- ## Parse Include statement
- #
- # Get include file path
- #
- # 1. Insert a record into TblFile ???
- # 2. Insert a record into TblDsc
- # Value1: IncludeFilePath
- #
- # @param LineValue: The line of incude statement
- def ParseInclude(self, LineValue, StartLine, Table, FileID, Filename, SectionName, Model, Arch):
- EdkLogger.debug(EdkLogger.DEBUG_2, "!include statement '%s' found in section %s" % (LineValue, SectionName))
- SectionModel = Section[SectionName.upper()]
- IncludeFile = CleanString(LineValue[LineValue.upper().find(DataType.TAB_INCLUDE.upper() + ' ') + len(DataType.TAB_INCLUDE + ' ') : ])
- Table.Insert(Model, IncludeFile, '', '', Arch, SectionModel, FileID, StartLine, -1, StartLine, -1, 0)
-
- ## Parse DEFINE statement
- #
- # Get DEFINE macros
- #
- # 1. Insert a record into TblDsc
- # Value1: Macro Name
- # Value2: Macro Value
- #
- def ParseDefine(self, LineValue, StartLine, Table, FileID, Filename, SectionName, Model, Arch):
- EdkLogger.debug(EdkLogger.DEBUG_2, "DEFINE statement '%s' found in section %s" % (LineValue, SectionName))
- SectionModel = Section[SectionName.upper()]
- Define = GetSplitValueList(CleanString(LineValue[LineValue.upper().find(DataType.TAB_DEFINE.upper() + ' ') + len(DataType.TAB_DEFINE + ' ') : ]), TAB_EQUAL_SPLIT, 1)
- Table.Insert(Model, Define[0], Define[1], '', Arch, SectionModel, FileID, StartLine, -1, StartLine, -1, 0)
-
- ## Parse Defines section
- #
- # Get one item in defines section
- #
- # Value1: Item Name
- # Value2: Item Value
- #
- def ParseDefinesSection(self, LineValue, StartLine, Table, FileID, Filename, SectionName, Model, Arch):
- EdkLogger.debug(EdkLogger.DEBUG_2, "Parse '%s' found in section %s" % (LineValue, SectionName))
- Defines = GetSplitValueList(LineValue, TAB_EQUAL_SPLIT, 1)
- if len(Defines) != 2:
- RaiseParserError(LineValue, SectionName, Filename, '', StartLine)
- self.TblDsc.Insert(Model, Defines[0], Defines[1], '', Arch, -1, FileID, StartLine, -1, StartLine, -1, 0)
-
- ## Insert conditional statements
- #
- # Pop an item from IfDefList
- # Insert conditional statements to database
- #
- # @param Filename: Path of parsing file
- # @param IfDefList: A list stored current conditional statements
- # @param EndLine: The end line no
- # @param ArchList: Support arch list
- #
- def InsertConditionalStatement(self, Filename, FileID, BelongsToItem, IfDefList, EndLine, ArchList):
- (Value1, Value2, Value3, Model, StartColumn, EndColumn, Enabled) = ('', '', '', -1, -1, -1, 0)
- if IfDefList == []:
- ErrorMsg = 'Not suited conditional statement in file %s' % Filename
- EdkLogger.error("DSC File Parser", PARSER_ERROR, ErrorMsg, Filename, RaiseError=EdkLogger.IsRaiseError)
- else:
- #
- # Get New Dsc item ID
- #
- DscID = self.TblDsc.GetCount() + 1
-
- #
- # Pop the conditional statements which is closed
- #
- PreviousIf = IfDefList.pop()
- EdkLogger.debug(EdkLogger.DEBUG_5, 'Previous IfDef: ' + str(PreviousIf))
-
- #
- # !ifdef and !ifndef
- #
- if PreviousIf[2] in (MODEL_META_DATA_CONDITIONAL_STATEMENT_IFDEF, MODEL_META_DATA_CONDITIONAL_STATEMENT_IFNDEF):
- Value1 = PreviousIf[0]
- Model = PreviousIf[2]
- self.TblDsc.Insert(Model, Value1, Value2, Value3, ArchList, BelongsToItem, self.FileID, PreviousIf[1], StartColumn, EndLine, EndColumn, Enabled)
- #
- # !if and !elseif
- #
- elif PreviousIf[2] in (MODEL_META_DATA_CONDITIONAL_STATEMENT_IF, Model):
- List = PreviousIf[0].split(' ')
- Value1, Value2, Value3 = '', '==', '0'
- if len(List) == 3:
- Value1 = List[0]
- Value2 = List[1]
- Value3 = List[2]
- Value3 = SplitString(Value3)
- if len(List) == 1:
- Value1 = List[0]
- Model = PreviousIf[2]
- self.TblDsc.Insert(Model, Value1, Value2, Value3, ArchList, BelongsToItem, self.FileID, PreviousIf[1], StartColumn, EndLine, EndColumn, Enabled)
- #
- # !else
- #
- elif PreviousIf[2] in (MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE, Model):
- Value1 = PreviousIf[0].strip()
- Model = PreviousIf[2]
- self.TblDsc.Insert(Model, Value1, Value2, Value3, ArchList, BelongsToItem, self.FileID, PreviousIf[1], StartColumn, EndLine, EndColumn, Enabled)
-
- ## Load Dsc file
- #
- # Load the file if it exists
- #
- # @param Filename: Input value for filename of Dsc file
- #
- def LoadDscFile(self, Filename):
- #
- # Insert a record for file
- #
- Filename = NormPath(Filename)
- self.Identification.FileFullPath = Filename
- (self.Identification.FileRelativePath, self.Identification.FileName) = os.path.split(Filename)
- self.FileID = self.TblFile.InsertFile(Filename, MODEL_FILE_DSC)
-
- #
- # Init DscTable
- #
- #self.TblDsc.Table = "Dsc%s" % FileID
- #self.TblDsc.Create()
-
- #
- # Init common datas
- #
- IfDefList, SectionItemList, CurrentSection, ArchList, ThirdList, IncludeFiles = \
- [], [], TAB_UNKNOWN, [], [], []
- LineNo = 0
-
- #
- # Parse file content
- #
- IsFindBlockComment = False
- ReservedLine = ''
- for Line in open(Filename, 'r'):
- LineNo = LineNo + 1
- #
- # Remove comment block
- #
- if Line.find(TAB_COMMENT_EDK_START) > -1:
- ReservedLine = GetSplitList(Line, TAB_COMMENT_EDK_START, 1)[0]
- IsFindBlockComment = True
- if Line.find(TAB_COMMENT_EDK_END) > -1:
- Line = ReservedLine + GetSplitList(Line, TAB_COMMENT_EDK_END, 1)[1]
- ReservedLine = ''
- IsFindBlockComment = False
- if IsFindBlockComment:
- continue
-
- #
- # Remove comments at tail and remove spaces again
- #
- Line = CleanString(Line)
- if Line == '':
- continue
-
- #
- # Find a new section tab
- # First insert previous section items
- # And then parse the content of the new section
- #
- if Line.startswith(TAB_SECTION_START) and Line.endswith(TAB_SECTION_END):
- #
- # Insert items data of previous section
- #
- self.InsertSectionItemsIntoDatabase(self.FileID, Filename, CurrentSection, SectionItemList, ArchList, ThirdList, IfDefList)
- #
- # Parse the new section
- #
- SectionItemList = []
- ArchList = []
- ThirdList = []
-
- CurrentSection = ''
- LineList = GetSplitValueList(Line[len(TAB_SECTION_START):len(Line) - len(TAB_SECTION_END)], TAB_COMMA_SPLIT)
- for Item in LineList:
- ItemList = GetSplitValueList(Item, TAB_SPLIT)
- if CurrentSection == '':
- CurrentSection = ItemList[0]
- else:
- if CurrentSection != ItemList[0]:
- EdkLogger.error("Parser", PARSER_ERROR, "Different section names '%s' and '%s' are found in one section definition, this is not allowed." % (CurrentSection, ItemList[0]), File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
- if CurrentSection.upper() not in self.KeyList:
- RaiseParserError(Line, CurrentSection, Filename, '', LineNo)
- CurrentSection = TAB_UNKNOWN
- continue
- ItemList.append('')
- ItemList.append('')
- if len(ItemList) > 5:
- RaiseParserError(Line, CurrentSection, Filename, '', LineNo)
- else:
- if ItemList[1] != '' and ItemList[1].upper() not in ARCH_LIST_FULL:
- EdkLogger.error("Parser", PARSER_ERROR, "Invalid Arch definition '%s' found" % ItemList[1], File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
- ArchList.append(ItemList[1].upper())
- ThirdList.append(ItemList[2])
-
- continue
-
- #
- # Not in any defined section
- #
- if CurrentSection == TAB_UNKNOWN:
- ErrorMsg = "%s is not in any defined section" % Line
- EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg, File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
-
- #
- # Add a section item
- #
- SectionItemList.append([Line, LineNo])
- # End of parse
- #End of For
-
- #
- # Insert items data of last section
- #
- self.InsertSectionItemsIntoDatabase(self.FileID, Filename, CurrentSection, SectionItemList, ArchList, ThirdList, IfDefList)
-
- #
- # Parse conditional statements
- #
- self.ParseConditionalStatement()
-
- #
- # Replace all DEFINE macros with its actual values
- #
- #ParseDefineMacro2(self.TblDsc, self.RecordSet, GlobalData.gGlobalDefines)
- ParseDefineMacro(self.TblDsc, GlobalData.gGlobalDefines)
-
-
- ## ParseConditionalStatement
- #
- # Search all conditional statement and disable no match records
- #
- def ParseConditionalStatement(self):
- #
- # Disabled all !if/!elif/!ifdef statements without DEFINE
- #
- SqlCommand = """select A.StartLine, A.EndLine from %s as A
- where A.Model in (%s, %s, %s)
- and A.Enabled = 0
- and A.BelongsToFile = %s
- and A.Value1 not in (select B.Value1 from %s as B
- where B.Model = %s
- and B.Enabled = 0
- and A.StartLine > B.StartLine
- and A.Arch = B.Arch
- and A.BelongsToItem = B.BelongsToItem
- and A.BelongsToFile = B.BelongsToFile) """ % \
- (self.TblDsc.Table, \
- MODEL_META_DATA_CONDITIONAL_STATEMENT_IF, MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE, MODEL_META_DATA_CONDITIONAL_STATEMENT_IFDEF, \
- self.FileID, \
- self.TblDsc.Table, \
- MODEL_META_DATA_DEFINE)
- RecordSet = self.TblDsc.Exec(SqlCommand)
- for Record in RecordSet:
- SqlCommand = """Update %s set Enabled = -1 where StartLine >= %s and EndLine <= %s""" % (self.TblDsc.Table, Record[0], Record[1])
- self.TblDsc.Exec(SqlCommand)
-
- #
- # Disabled !ifndef with DEFINE
- #
- SqlCommand = """select A.StartLine, A.EndLine from %s as A
- where A.Model = %s
- and A.Enabled = 0
- and A.BelongsToFile = %s
- and A.Value1 in (select B.Value1 from %s as B
- where B.Model = %s
- and B.Enabled = 0
- and A.StartLine > B.StartLine
- and A.Arch = B.Arch
- and A.BelongsToItem = B.BelongsToItem
- and A.BelongsToFile = B.BelongsToFile)""" % \
- (self.TblDsc.Table, \
- MODEL_META_DATA_CONDITIONAL_STATEMENT_IFNDEF, \
- self.FileID, \
- self.TblDsc.Table, \
- MODEL_META_DATA_DEFINE)
- RecordSet = self.TblDsc.Exec(SqlCommand)
- for Record in RecordSet:
- SqlCommand = """Update %s set Enabled = -1 where StartLine >= %s and EndLine <= %s""" % (self.TblDsc.Table, Record[0], Record[1])
- EdkLogger.debug(4, "SqlCommand: %s" % SqlCommand)
- self.Cur.execute(SqlCommand)
-
- #
- # Disabled !if, !elif and !else with un-match value
- #
- SqlCommand = """select A.Model, A.Value1, A.Value2, A.Value3, A.StartLine, A.EndLine, B.Value2 from %s as A join %s as B
- where A.Model in (%s, %s)
- and A.Enabled = 0
- and A.BelongsToFile = %s
- and B.Enabled = 0
- and B.Model = %s
- and A.Value1 = B.Value1
- and A.StartLine > B.StartLine
- and A.BelongsToItem = B.BelongsToItem
- and A.BelongsToFile = B.BelongsToFile""" % \
- (self.TblDsc.Table, self.TblDsc.Table, \
- MODEL_META_DATA_CONDITIONAL_STATEMENT_IF, MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE, \
- self.FileID, MODEL_META_DATA_DEFINE)
- RecordSet = self.TblDsc.Exec(SqlCommand)
- DisabledList = []
- for Record in RecordSet:
- if Record[0] == MODEL_META_DATA_CONDITIONAL_STATEMENT_IF:
- if not self.Compare(Record[6], Record[2], Record[3]):
- SqlCommand = """Update %s set Enabled = -1 where StartLine >= %s and EndLine <= %s""" % (self.TblDsc.Table, Record[4], Record[5])
- self.TblDsc.Exec(SqlCommand)
- else:
- DisabledList.append(Record[1])
- continue
- if Record[0] == MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE and Record[1] in DisabledList:
- SqlCommand = """Update %s set Enabled = -1 where StartLine >= %s and EndLine <= %s""" % (self.TblDsc.Table, Record[4], Record[5])
- self.TblDsc.Exec(SqlCommand)
-
- ## Compare
- #
- # Compare two values
- # @param Value1:
- # @param CompareType:
- # @param Value2:
- #
- def Compare(self, Value1, CompareType, Value2):
- Command = """Value1 %s Value2""" % CompareType
- return eval(Command)
-
- ## First time to insert records to database
- #
- # Insert item data of a section to database
- # @param FileID: The ID of belonging file
- # @param Filename: The name of belonging file
- # @param CurrentSection: The name of currect section
- # @param SectionItemList: A list of items of the section
- # @param ArchList: A list of arches
- # @param ThirdList: A list of third parameters, ModuleType for LibraryClass and SkuId for Dynamic Pcds
- # @param IfDefList: A list of all conditional statements
- #
- def InsertSectionItemsIntoDatabase(self, FileID, Filename, CurrentSection, SectionItemList, ArchList, ThirdList, IfDefList):
- #
- # Insert each item data of a section
- #
- for Index in range(0, len(ArchList)):
- Arch = ArchList[Index]
- Third = ThirdList[Index]
- if Arch == '':
- Arch = TAB_ARCH_COMMON.upper()
-
- Model = Section[CurrentSection.upper()]
- #Records = self.RecordSet[Model]
-
- for SectionItem in SectionItemList:
- BelongsToItem, EndLine, EndColumn = -1, -1, -1
- LineValue, StartLine, EndLine = SectionItem[0], SectionItem[1], SectionItem[1]
-
-
- EdkLogger.debug(4, "Parsing %s ..." % LineValue)
- #
- # Parse '!ifdef'
- #
- if LineValue.upper().find(TAB_IF_DEF.upper()) > -1:
- IfDefList.append((LineValue[len(TAB_IF_N_DEF):].strip(), StartLine, MODEL_META_DATA_CONDITIONAL_STATEMENT_IFDEF))
- continue
-
- #
- # Parse '!ifndef'
- #
- if LineValue.upper().find(TAB_IF_N_DEF.upper()) > -1:
- IfDefList.append((LineValue[len(TAB_IF_N_DEF):].strip(), StartLine, MODEL_META_DATA_CONDITIONAL_STATEMENT_IFNDEF))
- continue
-
- #
- # Parse '!endif'
- #
- if LineValue.upper().find(TAB_END_IF.upper()) > -1:
- self.InsertConditionalStatement(Filename, FileID, Model, IfDefList, StartLine, Arch)
- continue
- #
- # Parse '!if'
- #
- if LineValue.upper().find(TAB_IF.upper()) > -1:
- IfDefList.append((LineValue[len(TAB_IF):].strip(), StartLine, MODEL_META_DATA_CONDITIONAL_STATEMENT_IF))
- continue
-
- #
- # Parse '!elseif'
- #
- if LineValue.upper().find(TAB_ELSE_IF.upper()) > -1:
- self.InsertConditionalStatement(Filename, FileID, Model, IfDefList, StartLine - 1, Arch)
- IfDefList.append((LineValue[len(TAB_ELSE_IF):].strip(), StartLine, MODEL_META_DATA_CONDITIONAL_STATEMENT_IF))
- continue
-
- #
- # Parse '!else'
- #
- if LineValue.upper().find(TAB_ELSE.upper()) > -1:
- Key = IfDefList[-1][0].split(' ' , 1)[0].strip()
- self.InsertConditionalStatement(Filename, FileID, Model, IfDefList, StartLine, Arch)
- IfDefList.append((Key, StartLine, MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE))
- continue
-
- #
- # Parse !include statement first
- #
- if LineValue.upper().find(DataType.TAB_INCLUDE.upper() + ' ') > -1:
- self.ParseInclude(LineValue, StartLine, self.TblDsc, FileID, Filename, CurrentSection, MODEL_META_DATA_INCLUDE, Arch)
- continue
-
- #
- # And then parse DEFINE statement
- #
- if LineValue.upper().find(DataType.TAB_DEFINE.upper() + ' ') > -1:
- self.ParseDefine(LineValue, StartLine, self.TblDsc, FileID, Filename, CurrentSection, MODEL_META_DATA_DEFINE, Arch)
- continue
-
- #
- # At last parse other sections
- #
- if CurrentSection == TAB_LIBRARY_CLASSES or CurrentSection in TAB_PCD_DYNAMIC_TYPE_LIST or CurrentSection in TAB_PCD_DYNAMIC_EX_TYPE_LIST:
- ID = self.TblDsc.Insert(Model, LineValue, Third, '', Arch, -1, FileID, StartLine, -1, StartLine, -1, 0)
- #Records.append([LineValue, Arch, StartLine, ID, Third])
- continue
- elif CurrentSection != TAB_COMPONENTS:
- ID = self.TblDsc.Insert(Model, LineValue, '', '', Arch, -1, FileID, StartLine, -1, StartLine, -1, 0)
- #Records.append([LineValue, Arch, StartLine, ID, Third])
- continue
-
- #
- # Parse COMPONENT section
- #
- if CurrentSection == TAB_COMPONENTS:
- Components = []
- GetComponent(SectionItemList, Components)
- for Component in Components:
- EdkLogger.debug(4, "Parsing component %s ..." % Component)
- DscItmeID = self.TblDsc.Insert(MODEL_META_DATA_COMPONENT, Component[0], '', '', Arch, -1, FileID, StartLine, -1, StartLine, -1, 0)
- for Item in Component[1]:
- List = GetSplitValueList(Item, MaxSplit=2)
- LibName, LibIns = '', ''
- if len(List) == 2:
- LibName = List[0]
- LibIns = List[1]
- else:
- LibName = List[0]
- self.TblDsc.Insert(MODEL_EFI_LIBRARY_CLASS, LibName, LibIns, '', Arch, DscItmeID, FileID, StartLine, -1, StartLine, -1, 0)
- for Item in Component[2]:
- self.TblDsc.Insert(MODEL_META_DATA_BUILD_OPTION, Item, '', '', Arch, DscItmeID, FileID, StartLine, -1, StartLine, -1, 0)
- for Item in Component[3]:
- Model = Section[Item[0].upper()]
- self.TblDsc.Insert(Model, Item[1], '', '', Arch, DscItmeID, FileID, StartLine, -1, StartLine, -1, 0)
-
- ## Show detailed information of Dsc
- #
- # Print all members and their values of Dsc class
- #
- def ShowDsc(self):
- print TAB_SECTION_START + TAB_INF_DEFINES + TAB_SECTION_END
- printDict(self.Defines.DefinesDictionary)
-
- for Key in self.KeyList:
- for Arch in DataType.ARCH_LIST_FULL:
- Command = "printList(TAB_SECTION_START + '" + \
- Key + DataType.TAB_SPLIT + Arch + \
- "' + TAB_SECTION_END, self.Contents[arch]." + Key + ')'
- eval(Command)
-
- ## Show detailed information of Platform
- #
- # Print all members and their values of Platform class
- #
- def ShowPlatform(self):
- M = self.Platform
- for Arch in M.Header.keys():
- print '\nArch =', Arch
- print 'Filename =', M.Header[Arch].FileName
- print 'FullPath =', M.Header[Arch].FullPath
- print 'BaseName =', M.Header[Arch].Name
- print 'Guid =', M.Header[Arch].Guid
- print 'Version =', M.Header[Arch].Version
- print 'DscSpecification =', M.Header[Arch].DscSpecification
- print 'SkuId =', M.Header[Arch].SkuIdName
- print 'SupArchList =', M.Header[Arch].SupArchList
- print 'BuildTargets =', M.Header[Arch].BuildTargets
- print 'OutputDirectory =', M.Header[Arch].OutputDirectory
- print 'BuildNumber =', M.Header[Arch].BuildNumber
- print 'MakefileName =', M.Header[Arch].MakefileName
- print 'BsBaseAddress =', M.Header[Arch].BsBaseAddress
- print 'RtBaseAddress =', M.Header[Arch].RtBaseAddress
- print 'Define =', M.Header[Arch].Define
- print 'Fdf =', M.FlashDefinitionFile.FilePath
- print '\nBuildOptions =', M.BuildOptions, M.BuildOptions.IncludeFiles
- for Item in M.BuildOptions.BuildOptionList:
- print '\t', 'ToolChainFamily =', Item.ToolChainFamily, 'ToolChain =', Item.ToolChain, 'Option =', Item.Option, 'Arch =', Item.SupArchList
- print '\nSkuIds =', M.SkuInfos.SkuInfoList, M.SkuInfos.IncludeFiles
- print '\nLibraries =', M.Libraries, M.Libraries.IncludeFiles
- for Item in M.Libraries.LibraryList:
- print '\t', Item.FilePath, Item.SupArchList, Item.Define
- print '\nLibraryClasses =', M.LibraryClasses, M.LibraryClasses.IncludeFiles
- for Item in M.LibraryClasses.LibraryList:
- print '\t', Item.Name, Item.FilePath, Item.SupModuleList, Item.SupArchList, Item.Define
- print '\nPcds =', M.DynamicPcdBuildDefinitions
- for Item in M.DynamicPcdBuildDefinitions:
- print '\tCname=', Item.CName, 'TSG=', Item.TokenSpaceGuidCName, 'Value=', Item.DefaultValue, 'Token=', Item.Token, 'Type=', Item.ItemType, 'Datum=', Item.DatumType, 'Size=', Item.MaxDatumSize, 'Arch=', Item.SupArchList, Item.SkuInfoList
- for Sku in Item.SkuInfoList.values():
- print '\t\t', str(Sku)
- print '\nComponents =', M.Modules.ModuleList, M.Modules.IncludeFiles
- for Item in M.Modules.ModuleList:
- print '\t', Item.FilePath, Item.ExecFilePath, Item.SupArchList
- for Lib in Item.LibraryClasses.LibraryList:
- print '\t\tLib:', Lib.Name, Lib.FilePath
- for Bo in Item.ModuleSaBuildOption.BuildOptionList:
- print '\t\tBuildOption:', Bo.ToolChainFamily, Bo.ToolChain, Bo.Option
- for Pcd in Item.PcdBuildDefinitions:
- print '\t\tPcd:', Pcd.CName, Pcd.TokenSpaceGuidCName, Pcd.MaxDatumSize, Pcd.DefaultValue, Pcd.ItemType
-
-##
-#
-# This acts like the main() function for the script, unless it is 'import'ed into another
-# script.
-#
-if __name__ == '__main__':
- EdkLogger.Initialize()
- EdkLogger.SetLevel(EdkLogger.DEBUG_0)
-
- W = os.getenv('WORKSPACE')
- F = os.path.join(W, 'Nt32Pkg/Nt32Pkg.dsc')
-
- Db = Database.Database('Dsc.db')
- Db.InitDatabase()
-
- P = Dsc(os.path.normpath(F), True, True, W, Db)
- P.ShowPlatform()
-
- Db.Close()
diff --git a/BaseTools/Source/Python/Common/EdkIIWorkspace.py b/BaseTools/Source/Python/Common/EdkIIWorkspace.py
deleted file mode 100644
index f22a545b77..0000000000
--- a/BaseTools/Source/Python/Common/EdkIIWorkspace.py
+++ /dev/null
@@ -1,320 +0,0 @@
-## @file
-# This is the base class for applications that operate on an EDK II Workspace
-#
-# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-##
-# Import Modules
-#
-import Common.LongFilePathOs as os, sys, time
-from DataType import *
-from Common.LongFilePathSupport import OpenLongFilePath as open
-from Common.MultipleWorkspace import MultipleWorkspace as mws
-
-## EdkIIWorkspace
-#
-# Collect WorkspaceDir from the environment, the Verbose command line flag, and detect an icon bitmap file.
-#
-# @var StartTime: Time of build system starting
-# @var PrintRunTime: Printable time of build system running
-# @var PrintRunStatus: Printable status of build system running
-# @var RunStatus: Status of build system running
-#
-class EdkIIWorkspace:
- def __init__(self):
- self.StartTime = time.time()
- self.PrintRunTime = False
- self.PrintRunStatus = False
- self.RunStatus = ''
-
- #
- # Check environment valiable 'WORKSPACE'
- #
- if os.environ.get('WORKSPACE') == None:
- print 'ERROR: WORKSPACE not defined. Please run EdkSetup from the EDK II install directory.'
- return False
-
- self.CurrentWorkingDir = os.getcwd()
-
- self.WorkspaceDir = os.path.realpath(os.environ.get('WORKSPACE'))
- (Drive, Path) = os.path.splitdrive(self.WorkspaceDir)
- if Drive == '':
- (Drive, CwdPath) = os.path.splitdrive(self.CurrentWorkingDir)
- if Drive != '':
- self.WorkspaceDir = Drive + Path
- else:
- self.WorkspaceDir = Drive.upper() + Path
-
- self.WorkspaceRelativeWorkingDir = self.WorkspaceRelativePath (self.CurrentWorkingDir)
-
- try:
- #
- # Load TianoCoreOrgLogo, used for GUI tool
- #
- self.Icon = wx.Icon(self.WorkspaceFile('tools/Python/TianoCoreOrgLogo.gif'), wx.BITMAP_TYPE_GIF)
- except:
- self.Icon = None
-
- self.Verbose = False
- for Arg in sys.argv:
- if Arg.lower() == '-v':
- self.Verbose = True
-
- ## Close build system
- #
- # Close build system and print running time and status
- #
- def Close(self):
- if self.PrintRunTime:
- Seconds = int(time.time() - self.StartTime)
- if Seconds < 60:
- print 'Run Time: %d seconds' % (Seconds)
- else:
- Minutes = Seconds / 60
- Seconds = Seconds % 60
- if Minutes < 60:
- print 'Run Time: %d minutes %d seconds' % (Minutes, Seconds)
- else:
- Hours = Minutes / 60
- Minutes = Minutes % 60
- print 'Run Time: %d hours %d minutes %d seconds' % (Hours, Minutes, Seconds)
- if self.RunStatus != '':
- print self.RunStatus
-
- ## Convert to a workspace relative filename
- #
- # Convert a full path filename to a workspace relative filename.
- #
- # @param FileName: The filename to be Converted
- #
- # @retval None Workspace dir is not found in the full path
- # @retval string The relative filename
- #
- def WorkspaceRelativePath(self, FileName):
- FileName = os.path.realpath(FileName)
- if FileName.find(self.WorkspaceDir) != 0:
- return None
- return FileName.replace (self.WorkspaceDir, '').strip('\\').strip('/')
-
- ## Convert to a full path filename
- #
- # Convert a workspace relative filename to a full path filename.
- #
- # @param FileName: The filename to be Converted
- #
- # @retval string The full path filename
- #
- def WorkspaceFile(self, FileName):
- return os.path.realpath(mws.join(self.WorkspaceDir,FileName))
-
- ## Convert to a real path filename
- #
- # Convert ${WORKSPACE} to real path
- #
- # @param FileName: The filename to be Converted
- #
- # @retval string The full path filename
- #
- def WorkspacePathConvert(self, FileName):
- return os.path.realpath(FileName.replace(TAB_WORKSPACE, self.WorkspaceDir))
-
- ## Convert XML into a DOM
- #
- # Parse an XML file into a DOM and return the DOM.
- #
- # @param FileName: The filename to be parsed
- #
- # @retval XmlParseFile (self.WorkspaceFile(FileName))
- #
- def XmlParseFile (self, FileName):
- if self.Verbose:
- print FileName
- return XmlParseFile (self.WorkspaceFile(FileName))
-
- ## Convert a XML section
- #
- # Parse a section of an XML file into a DOM(Document Object Model) and return the DOM.
- #
- # @param FileName: The filename to be parsed
- # @param SectionTag: The tag name of the section to be parsed
- #
- # @retval XmlParseFileSection (self.WorkspaceFile(FileName), SectionTag)
- #
- def XmlParseFileSection (self, FileName, SectionTag):
- if self.Verbose:
- print FileName
- return XmlParseFileSection (self.WorkspaceFile(FileName), SectionTag)
-
- ## Save a XML file
- #
- # Save a DOM(Document Object Model) into an XML file.
- #
- # @param Dom: The Dom to be saved
- # @param FileName: The filename
- #
- # @retval XmlSaveFile (Dom, self.WorkspaceFile(FileName))
- #
- def XmlSaveFile (self, Dom, FileName):
- if self.Verbose:
- print FileName
- return XmlSaveFile (Dom, self.WorkspaceFile(FileName))
-
- ## Convert Text File To Dictionary
- #
- # Convert a workspace relative text file to a dictionary of (name:value) pairs.
- #
- # @param FileName: Text filename
- # @param Dictionary: Dictionary to store data
- # @param CommentCharacter: Comment char, be used to ignore comment content
- # @param KeySplitCharacter: Key split char, between key name and key value. Key1 = Value1, '=' is the key split char
- # @param ValueSplitFlag: Value split flag, be used to decide if has multiple values
- # @param ValueSplitCharacter: Value split char, be used to split multiple values. Key1 = Value1|Value2, '|' is the value split char
- #
- # @retval ConvertTextFileToDictionary(self.WorkspaceFile(FileName), Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter)
- #
- def ConvertTextFileToDictionary(self, FileName, Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter):
- if self.Verbose:
- print FileName
- return ConvertTextFileToDictionary(self.WorkspaceFile(FileName), Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter)
-
- ## Convert Dictionary To Text File
- #
- # Convert a dictionary of (name:value) pairs to a workspace relative text file.
- #
- # @param FileName: Text filename
- # @param Dictionary: Dictionary to store data
- # @param CommentCharacter: Comment char, be used to ignore comment content
- # @param KeySplitCharacter: Key split char, between key name and key value. Key1 = Value1, '=' is the key split char
- # @param ValueSplitFlag: Value split flag, be used to decide if has multiple values
- # @param ValueSplitCharacter: Value split char, be used to split multiple values. Key1 = Value1|Value2, '|' is the value split char
- #
- # @retval ConvertDictionaryToTextFile(self.WorkspaceFile(FileName), Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter)
- #
- def ConvertDictionaryToTextFile(self, FileName, Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter):
- if self.Verbose:
- print FileName
- return ConvertDictionaryToTextFile(self.WorkspaceFile(FileName), Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter)
-
-## Convert Text File To Dictionary
-#
-# Convert a text file to a dictionary of (name:value) pairs.
-#
-# @param FileName: Text filename
-# @param Dictionary: Dictionary to store data
-# @param CommentCharacter: Comment char, be used to ignore comment content
-# @param KeySplitCharacter: Key split char, between key name and key value. Key1 = Value1, '=' is the key split char
-# @param ValueSplitFlag: Value split flag, be used to decide if has multiple values
-# @param ValueSplitCharacter: Value split char, be used to split multiple values. Key1 = Value1|Value2, '|' is the value split char
-#
-# @retval True Convert successfully
-# @retval False Open file failed
-#
-def ConvertTextFileToDictionary(FileName, Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter):
- try:
- F = open(FileName, 'r')
- except:
- return False
- Keys = []
- for Line in F:
- LineList = Line.split(KeySplitCharacter, 1)
- if len(LineList) >= 2:
- Key = LineList[0].split()
- if len(Key) == 1 and Key[0][0] != CommentCharacter and Key[0] not in Keys:
- if ValueSplitFlag:
- Dictionary[Key[0]] = LineList[1].replace('\\', '/').split(ValueSplitCharacter)
- else:
- Dictionary[Key[0]] = LineList[1].strip().replace('\\', '/')
- Keys += [Key[0]]
- F.close()
- return True
-
-## Convert Dictionary To Text File
-#
-# Convert a dictionary of (name:value) pairs to a text file.
-#
-# @param FileName: Text filename
-# @param Dictionary: Dictionary to store data
-# @param CommentCharacter: Comment char, be used to ignore comment content
-# @param KeySplitCharacter: Key split char, between key name and key value. Key1 = Value1, '=' is the key split char
-# @param ValueSplitFlag: Value split flag, be used to decide if has multiple values
-# @param ValueSplitCharacter: Value split char, be used to split multiple values. Key1 = Value1|Value2, '|' is the value split char
-#
-# @retval True Convert successfully
-# @retval False Open file failed
-#
-def ConvertDictionaryToTextFile(FileName, Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter):
- try:
- F = open(FileName, 'r')
- Lines = []
- Lines = F.readlines()
- F.close()
- except:
- Lines = []
- Keys = Dictionary.keys()
- MaxLength = 0
- for Key in Keys:
- if len(Key) > MaxLength:
- MaxLength = len(Key)
- Index = 0
- for Line in Lines:
- LineList = Line.split(KeySplitCharacter, 1)
- if len(LineList) >= 2:
- Key = LineList[0].split()
- if len(Key) == 1 and Key[0][0] != CommentCharacter and Key[0] in Dictionary:
- if ValueSplitFlag:
- Line = '%-*s %c %s\n' % (MaxLength, Key[0], KeySplitCharacter, ' '.join(Dictionary[Key[0]]))
- else:
- Line = '%-*s %c %s\n' % (MaxLength, Key[0], KeySplitCharacter, Dictionary[Key[0]])
- Lines.pop(Index)
- if Key[0] in Keys:
- Lines.insert(Index, Line)
- Keys.remove(Key[0])
- Index += 1
- for RemainingKey in Keys:
- if ValueSplitFlag:
- Line = '%-*s %c %s\n' % (MaxLength, RemainingKey, KeySplitCharacter, ' '.join(Dictionary[RemainingKey]))
- else:
- Line = '%-*s %c %s\n' % (MaxLength, RemainingKey, KeySplitCharacter, Dictionary[RemainingKey])
- Lines.append(Line)
- try:
- F = open(FileName, 'w')
- except:
- return False
- F.writelines(Lines)
- F.close()
- return True
-
-## Create a new directory
-#
-# @param Directory: Directory to be created
-#
-def CreateDirectory(Directory):
- if not os.access(Directory, os.F_OK):
- os.makedirs (Directory)
-
-## Create a new file
-#
-# @param Directory: Directory to be created
-# @param FileName: Filename to be created
-# @param Mode: The mode of open file, defautl is 'w'
-#
-def CreateFile(Directory, FileName, Mode='w'):
- CreateDirectory (Directory)
- return open(os.path.join(Directory, FileName), Mode)
-
-##
-#
-# This acts like the main() function for the script, unless it is 'import'ed into another
-# script.
-#
-if __name__ == '__main__':
- # Nothing to do here. Could do some unit tests
- pass \ No newline at end of file
diff --git a/BaseTools/Source/Python/Common/EdkIIWorkspaceBuild.py b/BaseTools/Source/Python/Common/EdkIIWorkspaceBuild.py
deleted file mode 100644
index d6df01d4ce..0000000000
--- a/BaseTools/Source/Python/Common/EdkIIWorkspaceBuild.py
+++ /dev/null
@@ -1,1670 +0,0 @@
-## @file
-# This file is used to define each component of the build database
-#
-# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-##
-# Import Modules
-#
-import Common.LongFilePathOs as os, string, copy, pdb, copy
-import EdkLogger
-import DataType
-from InfClassObject import *
-from DecClassObject import *
-from DscClassObject import *
-from String import *
-from BuildToolError import *
-from Misc import sdict
-import Database as Database
-import time as time
-
-## PcdClassObject
-#
-# This Class is used for PcdObject
-#
-# @param object: Inherited from object class
-# @param Name: Input value for Name of Pcd, default is None
-# @param Guid: Input value for Guid of Pcd, default is None
-# @param Type: Input value for Type of Pcd, default is None
-# @param DatumType: Input value for DatumType of Pcd, default is None
-# @param Value: Input value for Value of Pcd, default is None
-# @param Token: Input value for Token of Pcd, default is None
-# @param MaxDatumSize: Input value for MaxDatumSize of Pcd, default is None
-# @param SkuInfoList: Input value for SkuInfoList of Pcd, default is {}
-# @param IsOverrided: Input value for IsOverrided of Pcd, default is False
-#
-# @var TokenCName: To store value for TokenCName
-# @var TokenSpaceGuidCName: To store value for TokenSpaceGuidCName
-# @var Type: To store value for Type
-# @var DatumType: To store value for DatumType
-# @var TokenValue: To store value for TokenValue
-# @var MaxDatumSize: To store value for MaxDatumSize
-# @var SkuInfoList: To store value for SkuInfoList
-# @var IsOverrided: To store value for IsOverrided
-# @var Phase: To store value for Phase, default is "DXE"
-#
-class PcdClassObject(object):
- def __init__(self, Name = None, Guid = None, Type = None, DatumType = None, Value = None, Token = None, MaxDatumSize = None, SkuInfoList = {}, IsOverrided = False):
- self.TokenCName = Name
- self.TokenSpaceGuidCName = Guid
- self.Type = Type
- self.DatumType = DatumType
- self.DefaultValue = Value
- self.TokenValue = Token
- self.MaxDatumSize = MaxDatumSize
- self.SkuInfoList = SkuInfoList
- self.IsOverrided = IsOverrided
- self.Phase = "DXE"
-
- ## Convert the class to a string
- #
- # Convert each member of the class to string
- # Organize to a signle line format string
- #
- # @retval Rtn Formatted String
- #
- def __str__(self):
- Rtn = '\tTokenCName=' + str(self.TokenCName) + ', ' + \
- 'TokenSpaceGuidCName=' + str(self.TokenSpaceGuidCName) + ', ' + \
- 'Type=' + str(self.Type) + ', ' + \
- 'DatumType=' + str(self.DatumType) + ', ' + \
- 'DefaultValue=' + str(self.DefaultValue) + ', ' + \
- 'TokenValue=' + str(self.TokenValue) + ', ' + \
- 'MaxDatumSize=' + str(self.MaxDatumSize) + ', '
- for Item in self.SkuInfoList.values():
- Rtn = Rtn + 'SkuId=' + Item.SkuId + ', ' + 'SkuIdName=' + Item.SkuIdName
- Rtn = Rtn + str(self.IsOverrided)
-
- return Rtn
-
- ## Override __eq__ function
- #
- # Check whether pcds are the same
- #
- # @retval False The two pcds are different
- # @retval True The two pcds are the same
- #
- def __eq__(self, Other):
- return Other != None and self.TokenCName == Other.TokenCName and self.TokenSpaceGuidCName == Other.TokenSpaceGuidCName
-
- ## Override __hash__ function
- #
- # Use (TokenCName, TokenSpaceGuidCName) as key in hash table
- #
- # @retval truple() Key for hash table
- #
- def __hash__(self):
- return hash((self.TokenCName, self.TokenSpaceGuidCName))
-
-## LibraryClassObject
-#
-# This Class defines LibraryClassObject used in BuildDatabase
-#
-# @param object: Inherited from object class
-# @param Name: Input value for LibraryClassName, default is None
-# @param SupModList: Input value for SupModList, default is []
-# @param Type: Input value for Type, default is None
-#
-# @var LibraryClass: To store value for LibraryClass
-# @var SupModList: To store value for SupModList
-# @var Type: To store value for Type
-#
-class LibraryClassObject(object):
- def __init__(self, Name = None, SupModList = [], Type = None):
- self.LibraryClass = Name
- self.SupModList = SupModList
- if Type != None:
- self.SupModList = CleanString(Type).split(DataType.TAB_SPACE_SPLIT)
-
-## ModuleBuildClassObject
-#
-# This Class defines ModuleBuildClass
-#
-# @param object: Inherited from object class
-#
-# @var DescFilePath: To store value for DescFilePath
-# @var BaseName: To store value for BaseName
-# @var ModuleType: To store value for ModuleType
-# @var Guid: To store value for Guid
-# @var Version: To store value for Version
-# @var PcdIsDriver: To store value for PcdIsDriver
-# @var BinaryModule: To store value for BinaryModule
-# @var CustomMakefile: To store value for CustomMakefile
-# @var Specification: To store value for Specification
-# @var Shadow To store value for Shadow
-# @var LibraryClass: To store value for LibraryClass, it is a list structure as
-# [ LibraryClassObject, ...]
-# @var ModuleEntryPointList: To store value for ModuleEntryPointList
-# @var ModuleUnloadImageList: To store value for ModuleUnloadImageList
-# @var ConstructorList: To store value for ConstructorList
-# @var DestructorList: To store value for DestructorList
-# @var Binaries: To store value for Binaries, it is a list structure as
-# [ ModuleBinaryClassObject, ...]
-# @var Sources: To store value for Sources, it is a list structure as
-# [ ModuleSourceFilesClassObject, ... ]
-# @var LibraryClasses: To store value for LibraryClasses, it is a set structure as
-# { [LibraryClassName, ModuleType] : LibraryClassInfFile }
-# @var Protocols: To store value for Protocols, it is a list structure as
-# [ ProtocolName, ... ]
-# @var Ppis: To store value for Ppis, it is a list structure as
-# [ PpiName, ... ]
-# @var Guids: To store value for Guids, it is a list structure as
-# [ GuidName, ... ]
-# @var Includes: To store value for Includes, it is a list structure as
-# [ IncludePath, ... ]
-# @var Packages: To store value for Packages, it is a list structure as
-# [ DecFileName, ... ]
-# @var Pcds: To store value for Pcds, it is a set structure as
-# { [(PcdCName, PcdGuidCName)] : PcdClassObject}
-# @var BuildOptions: To store value for BuildOptions, it is a set structure as
-# { [BuildOptionKey] : BuildOptionValue}
-# @var Depex: To store value for Depex
-#
-class ModuleBuildClassObject(object):
- def __init__(self):
- self.AutoGenVersion = 0
- self.DescFilePath = ''
- self.BaseName = ''
- self.ModuleType = ''
- self.Guid = ''
- self.Version = ''
- self.PcdIsDriver = ''
- self.BinaryModule = ''
- self.Shadow = ''
- self.CustomMakefile = {}
- self.Specification = {}
- self.LibraryClass = []
- self.ModuleEntryPointList = []
- self.ModuleUnloadImageList = []
- self.ConstructorList = []
- self.DestructorList = []
-
- self.Binaries = []
- self.Sources = []
- self.LibraryClasses = sdict()
- self.Libraries = []
- self.Protocols = []
- self.Ppis = []
- self.Guids = []
- self.Includes = []
- self.Packages = []
- self.Pcds = {}
- self.BuildOptions = {}
- self.Depex = ''
-
- ## Convert the class to a string
- #
- # Convert member DescFilePath of the class to a string
- #
- # @retval string Formatted String
- #
- def __str__(self):
- return self.DescFilePath
-
- ## Override __eq__ function
- #
- # Check whether ModuleBuildClassObjects are the same
- #
- # @retval False The two ModuleBuildClassObjects are different
- # @retval True The two ModuleBuildClassObjects are the same
- #
- def __eq__(self, Other):
- return self.DescFilePath == str(Other)
-
- ## Override __hash__ function
- #
- # Use DescFilePath as key in hash table
- #
- # @retval string Key for hash table
- #
- def __hash__(self):
- return hash(self.DescFilePath)
-
-## PackageBuildClassObject
-#
-# This Class defines PackageBuildClass
-#
-# @param object: Inherited from object class
-#
-# @var DescFilePath: To store value for DescFilePath
-# @var PackageName: To store value for PackageName
-# @var Guid: To store value for Guid
-# @var Version: To store value for Version
-# @var Protocols: To store value for Protocols, it is a set structure as
-# { [ProtocolName] : Protocol Guid, ... }
-# @var Ppis: To store value for Ppis, it is a set structure as
-# { [PpiName] : Ppi Guid, ... }
-# @var Guids: To store value for Guids, it is a set structure as
-# { [GuidName] : Guid, ... }
-# @var Includes: To store value for Includes, it is a list structure as
-# [ IncludePath, ... ]
-# @var LibraryClasses: To store value for LibraryClasses, it is a set structure as
-# { [LibraryClassName] : LibraryClassInfFile }
-# @var Pcds: To store value for Pcds, it is a set structure as
-# { [(PcdCName, PcdGuidCName)] : PcdClassObject}
-#
-class PackageBuildClassObject(object):
- def __init__(self):
- self.DescFilePath = ''
- self.PackageName = ''
- self.Guid = ''
- self.Version = ''
-
- self.Protocols = {}
- self.Ppis = {}
- self.Guids = {}
- self.Includes = []
- self.LibraryClasses = {}
- self.Pcds = {}
-
- ## Convert the class to a string
- #
- # Convert member DescFilePath of the class to a string
- #
- # @retval string Formatted String
- #
- def __str__(self):
- return self.DescFilePath
-
- ## Override __eq__ function
- #
- # Check whether PackageBuildClassObjects are the same
- #
- # @retval False The two PackageBuildClassObjects are different
- # @retval True The two PackageBuildClassObjects are the same
- #
- def __eq__(self, Other):
- return self.DescFilePath == str(Other)
-
- ## Override __hash__ function
- #
- # Use DescFilePath as key in hash table
- #
- # @retval string Key for hash table
- #
- def __hash__(self):
- return hash(self.DescFilePath)
-
-## PlatformBuildClassObject
-#
-# This Class defines PlatformBuildClass
-#
-# @param object: Inherited from object class
-#
-# @var DescFilePath: To store value for DescFilePath
-# @var PlatformName: To store value for PlatformName
-# @var Guid: To store value for Guid
-# @var Version: To store value for Version
-# @var DscSpecification: To store value for DscSpecification
-# @var OutputDirectory: To store value for OutputDirectory
-# @var FlashDefinition: To store value for FlashDefinition
-# @var BuildNumber: To store value for BuildNumber
-# @var MakefileName: To store value for MakefileName
-# @var SkuIds: To store value for SkuIds, it is a set structure as
-# { 'SkuName' : SkuId, '!include' : includefilename, ...}
-# @var Modules: To store value for Modules, it is a list structure as
-# [ InfFileName, ... ]
-# @var Libraries: To store value for Libraries, it is a list structure as
-# [ InfFileName, ... ]
-# @var LibraryClasses: To store value for LibraryClasses, it is a set structure as
-# { (LibraryClassName, ModuleType) : LibraryClassInfFile }
-# @var Pcds: To store value for Pcds, it is a set structure as
-# { [(PcdCName, PcdGuidCName)] : PcdClassObject }
-# @var BuildOptions: To store value for BuildOptions, it is a set structure as
-# { [BuildOptionKey] : BuildOptionValue }
-#
-class PlatformBuildClassObject(object):
- def __init__(self):
- self.DescFilePath = ''
- self.PlatformName = ''
- self.Guid = ''
- self.Version = ''
- self.DscSpecification = ''
- self.OutputDirectory = ''
- self.FlashDefinition = ''
- self.BuildNumber = ''
- self.MakefileName = ''
-
- self.SkuIds = {}
- self.Modules = []
- self.LibraryInstances = []
- self.LibraryClasses = {}
- self.Libraries = {}
- self.Pcds = {}
- self.BuildOptions = {}
-
- ## Convert the class to a string
- #
- # Convert member DescFilePath of the class to a string
- #
- # @retval string Formatted String
- #
- def __str__(self):
- return self.DescFilePath
-
- ## Override __eq__ function
- #
- # Check whether PlatformBuildClassObjects are the same
- #
- # @retval False The two PlatformBuildClassObjects are different
- # @retval True The two PlatformBuildClassObjects are the same
- #
- def __eq__(self, other):
- return self.DescFilePath == str(other)
-
- ## Override __hash__ function
- #
- # Use DescFilePath as key in hash table
- #
- # @retval string Key for hash table
- #
- def __hash__(self):
- return hash(self.DescFilePath)
-
-## ItemBuild
-#
-# This Class defines Module/Platform/Package databases for build system
-#
-# @param object: Inherited from object class
-# @param Arch: Build arch
-# @param Platform: Build Platform
-# @param Package: Build Package
-# @param Module: Build Module
-#
-# @var Arch: To store value for Build Arch
-# @var PlatformDatabase: To store value for PlatformDatabase, it is a set structure as
-# { [DscFileName] : PlatformBuildClassObject, ...}
-# @var PackageDatabase: To store value for PackageDatabase, it is a set structure as
-# { [DecFileName] : PacakgeBuildClassObject, ...}
-# @var ModuleDatabase: To store value for ModuleDatabase, it is a list structure as
-# { [InfFileName] : ModuleBuildClassObject, ...}
-#
-class ItemBuild(object):
- def __init__(self, Arch, Platform = None, Package = None, Module = None):
- self.Arch = Arch
- self.PlatformDatabase = {}
- self.PackageDatabase = {}
- self.ModuleDatabase = {}
-
-## WorkspaceBuild
-#
-# This class is used to parse active platform to init all inf/dec/dsc files
-# Generate module/package/platform databases for build
-#
-# @param object: Inherited from object class
-# @param ActivePlatform: Input value for current active platform
-# @param WorkspaceDir: Input value for current WorkspaceDir
-#
-# @var WorkspaceDir: To store value for WorkspaceDir
-# @var SupArchList: To store value for SupArchList, selection scope is in below list
-# EBC | IA32 | X64 | IPF | ARM | PPC | AARCH64
-# @var BuildTarget: To store value for WorkspaceDir, selection scope is in below list
-# RELEASE | DEBUG
-# @var SkuId: To store value for SkuId
-# @var Fdf: To store value for Fdf
-# @var FdTargetList: To store value for FdTargetList
-# @var FvTargetList: To store value for FvTargetList
-# @var TargetTxt: To store value for TargetTxt, it is a set structure as
-# TargetTxtClassObject
-# @var ToolDef: To store value for ToolDef, it is a set structure as
-# ToolDefClassObject
-# @var InfDatabase: To store value for InfDatabase, it is a set structure as
-# { [InfFileName] : InfClassObject}
-# @var DecDatabase: To store value for DecDatabase, it is a set structure as
-# { [DecFileName] : DecClassObject}
-# @var DscDatabase: To store value for DscDatabase, it is a set structure as
-# { [DscFileName] : DscClassObject}
-# @var Build: To store value for DscDatabase, it is a set structure as
-# ItemBuild
-# @var DscFileName: To store value for Active Platform
-# @var UnFoundPcdInDsc: To store values for the pcds defined in INF/DEC but not found in DSC, it is a set structure as
-# { (PcdGuid, PcdCName, Arch) : DecFileName }
-#
-class WorkspaceBuild(object):
- def __init__(self, ActivePlatform, WorkspaceDir):
- self.WorkspaceDir = NormPath(WorkspaceDir)
- self.SupArchList = []
- self.BuildTarget = []
- self.SkuId = ''
- self.Fdf = ''
- self.FdTargetList = []
- self.FvTargetList = []
- self.TargetTxt = None
- self.ToolDef = None
-
- self.InfDatabase = {}
- self.DecDatabase = {}
- self.DscDatabase = {}
-
- self.UnFoundPcdInDsc = {}
-
- #
- # Init build for all arches
- #
- self.Build = {}
- for Arch in DataType.ARCH_LIST:
- self.Build[Arch] = ItemBuild(Arch)
-
- #
- # Init build database
- #
- self.Db = Database.Database(DATABASE_PATH)
- self.Db.InitDatabase()
-
- #
- # Get active platform
- #
- self.DscFileName = NormPath(ActivePlatform)
- File = self.WorkspaceFile(self.DscFileName)
- if os.path.exists(File) and os.path.isfile(File):
- self.DscDatabase[self.DscFileName] = Dsc(File, False, True, self.WorkspaceDir, self.Db)
- else:
- EdkLogger.error("AutoGen", FILE_NOT_FOUND, ExtraData = File)
-
- #
- # Parse platform to get module
- #
- for DscFile in self.DscDatabase.keys():
- Platform = self.DscDatabase[DscFile].Platform
-
- #
- # Get global information
- #
- Tmp = set()
- for Arch in DataType.ARCH_LIST:
- for Item in Platform.Header[Arch].SupArchList:
- Tmp.add(Item)
- self.SupArchList = list(Tmp)
- Tmp = set()
- for Arch in DataType.ARCH_LIST:
- for Item in Platform.Header[Arch].BuildTargets:
- Tmp.add(Item)
- self.BuildTarget = list(Tmp)
- for Arch in self.SupArchList:
- self.SkuId = Platform.Header[Arch].SkuIdName
- self.Fdf = Platform.FlashDefinitionFile.FilePath
-
- #
- # Get all inf files
- #
- for Item in Platform.LibraryClasses.LibraryList:
- for Arch in Item.SupArchList:
- self.AddToInfDatabase(Item.FilePath)
-
- for Item in Platform.Libraries.LibraryList:
- for Arch in Item.SupArchList:
- self.AddToInfDatabase(Item.FilePath)
-
- for Item in Platform.Modules.ModuleList:
- for Arch in Item.SupArchList:
- #
- # Add modules
- #
- Module = Item.FilePath
- self.AddToInfDatabase(Module)
- #
- # Add library used in modules
- #
- for Lib in Item.LibraryClasses.LibraryList:
- self.AddToInfDatabase(Lib.FilePath)
- self.UpdateLibraryClassOfModule(Module, Lib.Name, Arch, Lib.FilePath)
-
- #
- # Parse module to get package
- #
- for InfFile in self.InfDatabase.keys():
- Module = self.InfDatabase[InfFile].Module
- #
- # Get all dec
- #
- for Item in Module.PackageDependencies:
- for Arch in Item.SupArchList:
- self.AddToDecDatabase(Item.FilePath)
- # End of self.Init()
-
- ## Generate PlatformDatabase
- #
- # Go through each arch to get all items in DscDatabase to PlatformDatabase
- #
- def GenPlatformDatabase(self, PcdsSet={}):
- for Dsc in self.DscDatabase.keys():
- Platform = self.DscDatabase[Dsc].Platform
- for Arch in self.SupArchList:
- Pb = PlatformBuildClassObject()
-
- #
- # Defines
- #
- Pb.DescFilePath = Dsc
- Pb.PlatformName = Platform.Header[Arch].Name
- if Pb.PlatformName == '':
- EdkLogger.error("AutoGen", PARSER_ERROR, "The BaseName of platform %s is not defined for arch %s" % (Dsc, Arch))
- Pb.Guid = Platform.Header[Arch].Guid
- Pb.Version = Platform.Header[Arch].Version
- Pb.DscSpecification = Platform.Header[Arch].DscSpecification
- Pb.OutputDirectory = Platform.Header[Arch].OutputDirectory
- Pb.FlashDefinition = Platform.FlashDefinitionFile.FilePath
- Pb.BuildNumber = Platform.Header[Arch].BuildNumber
-
- #
- # SkuId
- #
- for Key in Platform.SkuInfos.SkuInfoList.keys():
- Pb.SkuIds[Key] = Platform.SkuInfos.SkuInfoList[Key]
-
- #
- # Module
- #
- for Item in Platform.Modules.ModuleList:
- if Arch in Item.SupArchList:
- Pb.Modules.append(Item.FilePath)
-
- #
- # BuildOptions
- #
- for Item in Platform.BuildOptions.BuildOptionList:
- if Arch in Item.SupArchList:
- Pb.BuildOptions[(Item.ToolChainFamily, Item.ToolChain)] = Item.Option
-
- #
- # LibraryClass
- #
- for Item in Platform.LibraryClasses.LibraryList:
- SupModuleList = self.FindSupModuleListOfLibraryClass(Item, Platform.LibraryClasses.LibraryList, Arch)
- if Arch in Item.SupArchList:
- for ModuleType in SupModuleList:
- Pb.LibraryClasses[(Item.Name, ModuleType)] = Item.FilePath
-
- #
- # Libraries
- #
- for Item in Platform.Libraries.LibraryList:
- for ItemArch in Item.SupArchList:
- Library = self.InfDatabase[Item.FilePath]
- if ItemArch not in Library.Module.Header:
- continue
- Pb.Libraries[Library.Module.Header[ItemArch].Name] = Item.FilePath
-
- #
- # Pcds
- #
- for Item in Platform.DynamicPcdBuildDefinitions:
- if Arch in Item.SupArchList:
- Name = Item.CName
- Guid = Item.TokenSpaceGuidCName
- Type = Item.ItemType
- DatumType = Item.DatumType
- Value = Item.DefaultValue
- Token = Item.Token
- MaxDatumSize = Item.MaxDatumSize
- SkuInfoList = Item.SkuInfoList
- Pb.Pcds[(Name, Guid)] = PcdClassObject(Name, Guid, Type, DatumType, Value, Token, MaxDatumSize, SkuInfoList, False)
-
- for (Name, Guid) in PcdsSet:
- Value = PcdsSet[Name, Guid]
- for PcdType in ["FixedAtBuild", "PatchableInModule", "FeatureFlag", "Dynamic", "DynamicEx"]:
- for Dec in self.Build[Arch].PackageDatabase:
- Pcds = self.Build[Arch].PackageDatabase[Dec].Pcds
- if (Name, Guid, PcdType) in Pcds:
- Pcd = Pcds[(Name, Guid, PcdType)]
- Type = PcdType
- DatumType = Pcd.DatumType
- Token = Pcd.TokenValue
- MaxDatumSize = Pcd.MaxDatumSize
- SkuInfoList = Pcd.SkuInfoList
- Pb.Pcds[(Name, Guid)] = PcdClassObject(Name, Guid, Type, DatumType, Value, Token, MaxDatumSize, SkuInfoList, False)
- break
- else:
- # nothing found
- continue
- # found in one package, find next PCD
- break
- else:
- EdkLogger.error("AutoGen", PARSER_ERROR, "PCD is not found in any package", ExtraData="%s.%s" % (Guid, Name))
- #
- # Add to database
- #
- self.Build[Arch].PlatformDatabase[Dsc] = Pb
- Pb = None
-
- ## Generate PackageDatabase
- #
- # Go through each arch to get all items in DecDatabase to PackageDatabase
- #
- def GenPackageDatabase(self):
- for Dec in self.DecDatabase.keys():
- Package = self.DecDatabase[Dec].Package
-
- for Arch in self.SupArchList:
- Pb = PackageBuildClassObject()
-
- #
- # Defines
- #
- Pb.DescFilePath = Dec
- Pb.PackageName = Package.Header[Arch].Name
- if Pb.PackageName == '':
- EdkLogger.error("AutoGen", PARSER_ERROR, "The BaseName of package %s is not defined for arch %s" % (Dec, Arch))
-
- Pb.Guid = Package.Header[Arch].Guid
- Pb.Version = Package.Header[Arch].Version
-
- #
- # Protocols
- #
- for Item in Package.ProtocolDeclarations:
- if Arch in Item.SupArchList:
- Pb.Protocols[Item.CName] = Item.Guid
-
- #
- # Ppis
- #
- for Item in Package.PpiDeclarations:
- if Arch in Item.SupArchList:
- Pb.Ppis[Item.CName] = Item.Guid
-
- #
- # Guids
- #
- for Item in Package.GuidDeclarations:
- if Arch in Item.SupArchList:
- Pb.Guids[Item.CName] = Item.Guid
-
- #
- # Includes
- #
- for Item in Package.Includes:
- if Arch in Item.SupArchList:
- Pb.Includes.append(Item.FilePath)
-
- #
- # LibraryClasses
- #
- for Item in Package.LibraryClassDeclarations:
- if Arch in Item.SupArchList:
- Pb.LibraryClasses[Item.LibraryClass] = Item.RecommendedInstance
-
- #
- # Pcds
- #
- for Item in Package.PcdDeclarations:
- if Arch in Item.SupArchList:
- Name = Item.CName
- Guid = Item.TokenSpaceGuidCName
- Type = Item.ItemType
- DatumType = Item.DatumType
- Value = Item.DefaultValue
- Token = Item.Token
- MaxDatumSize = Item.MaxDatumSize
- SkuInfoList = Item.SkuInfoList
- Pb.Pcds[(Name, Guid, Type)] = PcdClassObject(Name, Guid, Type, DatumType, Value, Token, MaxDatumSize, SkuInfoList, False)
-
- #
- # Add to database
- #
- self.Build[Arch].PackageDatabase[Dec] = Pb
- Pb = None
-
- ## Generate ModuleDatabase
- #
- # Go through each arch to get all items in InfDatabase to ModuleDatabase
- #
- def GenModuleDatabase(self, InfList = []):
- for Inf in self.InfDatabase.keys():
- Module = self.InfDatabase[Inf].Module
-
- for Arch in self.SupArchList:
- if not self.IsModuleDefinedInPlatform(Inf, Arch, InfList) or Arch not in Module.Header:
- continue
-
- ModuleHeader = Module.Header[Arch]
- Pb = ModuleBuildClassObject()
-
- #
- # Defines
- #
- Pb.DescFilePath = Inf
- Pb.BaseName = ModuleHeader.Name
- if Pb.BaseName == '':
- EdkLogger.error("AutoGen", PARSER_ERROR, "The BaseName of module %s is not defined for arch %s" % (Inf, Arch))
- Pb.Guid = ModuleHeader.Guid
- Pb.Version = ModuleHeader.Version
- Pb.ModuleType = ModuleHeader.ModuleType
- Pb.PcdIsDriver = ModuleHeader.PcdIsDriver
- Pb.BinaryModule = ModuleHeader.BinaryModule
- Pb.CustomMakefile = ModuleHeader.CustomMakefile
- Pb.Shadow = ModuleHeader.Shadow
-
- #
- # Specs os Defines
- #
- Pb.Specification = ModuleHeader.Specification
- Pb.Specification[TAB_INF_DEFINES_EDK_RELEASE_VERSION] = ModuleHeader.EdkReleaseVersion
- Pb.Specification[TAB_INF_DEFINES_EFI_SPECIFICATION_VERSION] = ModuleHeader.UefiSpecificationVersion
- Pb.Specification[TAB_INF_DEFINES_UEFI_SPECIFICATION_VERSION] = ModuleHeader.UefiSpecificationVersion
- Pb.AutoGenVersion = int(ModuleHeader.InfVersion, 0)
-
- #
- # LibraryClass of Defines
- #
- for Item in ModuleHeader.LibraryClass:
- Pb.LibraryClass.append(LibraryClassObject(Item.LibraryClass, Item.SupModuleList, None))
-
- #
- # Module image and library of Defines
- #
- for Item in Module.ExternImages:
- if Item.ModuleEntryPoint != '' and Item.ModuleEntryPoint not in Pb.ModuleEntryPointList:
- Pb.ModuleEntryPointList.append(Item.ModuleEntryPoint)
- if Item.ModuleUnloadImage != '' and Item.ModuleUnloadImage not in Pb.ModuleUnloadImageList:
- Pb.ModuleUnloadImageList.append(Item.ModuleUnloadImage)
- for Item in Module.ExternLibraries:
- if Item.Constructor != '' and Item.Constructor not in Pb.ConstructorList:
- Pb.ConstructorList.append(Item.Constructor)
- if Item.Destructor != '' and Item.Destructor not in Pb.DestructorList:
- Pb.DestructorList.append(Item.Destructor)
-
- #
- # Binaries
- #
- for Item in Module.Binaries:
- if Arch in Item.SupArchList:
- FileName = Item.BinaryFile
- FileType = Item.FileType
- Target = Item.Target
- FeatureFlag = Item.FeatureFlag
- Pb.Binaries.append(ModuleBinaryFileClass(FileName, FileType, Target, FeatureFlag, Arch.split()))
-
- #
- # Sources
- #
- for Item in Module.Sources:
- if Arch in Item.SupArchList:
- SourceFile = Item.SourceFile
- TagName = Item.TagName
- ToolCode = Item.ToolCode
- ToolChainFamily = Item.ToolChainFamily
- FeatureFlag = Item.FeatureFlag
- Pb.Sources.append(ModuleSourceFileClass(SourceFile, TagName, ToolCode, ToolChainFamily, FeatureFlag))
-
- #
- # Protocols
- #
- for Item in Module.Protocols:
- if Arch in Item.SupArchList:
- Pb.Protocols.append(Item.CName)
-
- #
- # Ppis
- #
- for Item in Module.Ppis:
- if Arch in Item.SupArchList:
- Pb.Ppis.append(Item.CName)
-
- #
- # Guids
- #
- for Item in Module.Guids:
- if Arch in Item.SupArchList:
- Pb.Ppis.append(Item.CName)
-
- #
- # Includes
- #
- for Item in Module.Includes:
- if Arch in Item.SupArchList:
- Pb.Includes.append(Item.FilePath)
-
- #
- # Packages
- #
- for Item in Module.PackageDependencies:
- if Arch in Item.SupArchList:
- Pb.Packages.append(Item.FilePath)
-
- #
- # BuildOptions
- #
- for Item in Module.BuildOptions:
- if Arch in Item.SupArchList:
- if (Item.ToolChainFamily, Item.ToolChain) not in Pb.BuildOptions:
- Pb.BuildOptions[(Item.ToolChainFamily, Item.ToolChain)] = Item.Option
- else:
- OptionString = Pb.BuildOptions[(Item.ToolChainFamily, Item.ToolChain)]
- Pb.BuildOptions[(Item.ToolChainFamily, Item.ToolChain)] = OptionString + " " + Item.Option
- self.FindBuildOptions(Arch, Inf, Pb.BuildOptions)
-
- #
- # Depex
- #
- for Item in Module.Depex:
- if Arch in Item.SupArchList:
- Pb.Depex = Pb.Depex + Item.Depex + ' '
- Pb.Depex = Pb.Depex.strip()
-
- #
- # LibraryClasses
- #
- for Item in Module.LibraryClasses:
- if Arch in Item.SupArchList:
- Lib = Item.LibraryClass
- RecommendedInstance = Item.RecommendedInstance
- if Pb.LibraryClass != []:
- #
- # For Library
- #
- for Libs in Pb.LibraryClass:
- for Type in Libs.SupModList:
- Instance = self.FindLibraryClassInstanceOfLibrary(Lib, Arch, Type)
- if Instance == None:
- Instance = RecommendedInstance
- Pb.LibraryClasses[(Lib, Type)] = Instance
- else:
- #
- # For Module
- #
- Instance = self.FindLibraryClassInstanceOfModule(Lib, Arch, Pb.ModuleType, Inf)
- if Instance == None:
- Instance = RecommendedInstance
- Pb.LibraryClasses[(Lib, Pb.ModuleType)] = Instance
-
- #
- # Libraries
- #
- for Item in Module.Libraries:
- if Arch in Item.SupArchList:
- Pb.Libraries.append(Item.Library)
-
- #
- # Pcds
- #
- for Item in Module.PcdCodes:
- if Arch in Item.SupArchList:
- Name = Item.CName
- Guid = Item.TokenSpaceGuidCName
- Type = Item.ItemType
- Pb.Pcds[(Name, Guid)] = self.FindPcd(Arch, Inf, Name, Guid, Type)
-
- #
- # Add to database
- #
- self.Build[Arch].ModuleDatabase[Inf] = Pb
- Pb = None
-
- ## Update Libraries Of Platform Database
- #
- # @param InfList: A list for all inf files
- #
- def UpdateLibrariesOfPlatform(self, InfList = []):
- for Arch in self.SupArchList:
- PlatformDatabase = self.Build[Arch].PlatformDatabase
- for Dsc in PlatformDatabase:
- Platform = PlatformDatabase[Dsc]
- for Inf in Platform.Modules:
- if not self.IsModuleDefinedInPlatform(Inf, Arch, InfList):
- continue
- Module = self.Build[Arch].ModuleDatabase[Inf]
- if Module.LibraryClass == None or Module.LibraryClass == []:
- self.UpdateLibrariesOfModule(Platform, Module, Arch)
- for Key in Module.LibraryClasses:
- Lib = Module.LibraryClasses[Key]
- if Lib not in Platform.LibraryInstances:
- Platform.LibraryInstances.append(Lib)
-
-
- ## Update Libraries Of Module Database
- #
- # @param Module: The module need to be updated libraries
- # @param Arch: The supportted arch of the module
- #
- def UpdateLibrariesOfModule(self, Platform, Module, Arch):
- ModuleDatabase = self.Build[Arch].ModuleDatabase
- ModuleType = Module.ModuleType
-
- # check Edk module
- if Module.AutoGenVersion < 0x00010005:
- EdkLogger.verbose("")
- EdkLogger.verbose("Library instances of module [%s] [%s]:" % (str(Module), Arch))
- LibraryConsumerList = [Module]
-
- # "CompilerStub" is a must for Edk modules
- Module.Libraries.append("CompilerStub")
- while len(LibraryConsumerList) > 0:
- M = LibraryConsumerList.pop()
- for LibraryName in M.Libraries:
- if LibraryName not in Platform.Libraries:
- EdkLogger.warn("AutoGen", "Library [%s] is not found" % LibraryName,
- ExtraData="\t%s [%s]" % (str(Module), Arch))
- continue
-
- LibraryFile = Platform.Libraries[LibraryName]
- if (LibraryName, ModuleType) not in Module.LibraryClasses:
- Module.LibraryClasses[LibraryName, ModuleType] = LibraryFile
- LibraryConsumerList.append(ModuleDatabase[LibraryFile])
- EdkLogger.verbose("\t" + LibraryName + " : " + LibraryFile)
- return
-
- # EdkII module
- LibraryConsumerList = [Module]
- Constructor = []
- ConsumedByList = sdict()
- LibraryInstance = sdict()
-
- EdkLogger.verbose("")
- EdkLogger.verbose("Library instances of module [%s] [%s]:" % (str(Module), Arch))
- while len(LibraryConsumerList) > 0:
- M = LibraryConsumerList.pop()
- for Key, LibraryPath in M.LibraryClasses.iteritems():
- # The "Key" is in format of (library_class_name, supported_module_type)
- if ModuleType != "USER_DEFINED" and ModuleType not in Key:
- EdkLogger.debug(EdkLogger.DEBUG_3, "%s for module type %s is not supported (%s)" % (Key + (LibraryPath,)))
- continue
-
- LibraryClassName = Key[0]
- if LibraryClassName not in LibraryInstance or LibraryInstance[LibraryClassName] == None:
- if LibraryPath == None or LibraryPath == "":
- LibraryInstance[LibraryClassName] = None
- continue
- LibraryModule = ModuleDatabase[LibraryPath]
- LibraryInstance[LibraryClassName] = LibraryModule
- LibraryConsumerList.append(LibraryModule)
- EdkLogger.verbose("\t" + LibraryClassName + " : " + str(LibraryModule))
- elif LibraryPath == None or LibraryPath == "":
- continue
- else:
- LibraryModule = LibraryInstance[LibraryClassName]
-
- if LibraryModule.ConstructorList != [] and LibraryModule not in Constructor:
- Constructor.append(LibraryModule)
-
- if LibraryModule not in ConsumedByList:
- ConsumedByList[LibraryModule] = []
- if M != Module:
- if M in ConsumedByList[LibraryModule]:
- continue
- ConsumedByList[LibraryModule].append(M)
- #
- # Initialize the sorted output list to the empty set
- #
- SortedLibraryList = []
- #
- # Q <- Set of all nodes with no incoming edges
- #
- LibraryList = [] #LibraryInstance.values()
- Q = []
- for LibraryClassName in LibraryInstance:
- M = LibraryInstance[LibraryClassName]
- if M == None:
- EdkLogger.error("AutoGen", AUTOGEN_ERROR,
- "Library instance for library class [%s] is not found" % LibraryClassName,
- ExtraData="\t%s [%s]" % (str(Module), Arch))
- LibraryList.append(M)
- #
- # check if there're duplicate library classes
- #
- for Lc in M.LibraryClass:
- if Lc.SupModList != None and ModuleType not in Lc.SupModList:
- EdkLogger.error("AutoGen", AUTOGEN_ERROR,
- "Module type [%s] is not supported by library instance [%s]" % (ModuleType, str(M)),
- ExtraData="\t%s" % str(Module))
-
- if Lc.LibraryClass in LibraryInstance and str(M) != str(LibraryInstance[Lc.LibraryClass]):
- EdkLogger.error("AutoGen", AUTOGEN_ERROR,
- "More than one library instance found for library class [%s] in module [%s]" % (Lc.LibraryClass, Module),
- ExtraData="\t%s\n\t%s" % (LibraryInstance[Lc.LibraryClass], str(M))
- )
- if ConsumedByList[M] == []:
- Q.insert(0, M)
- #
- # while Q is not empty do
- #
- while Q != []:
- #
- # remove node from Q
- #
- Node = Q.pop()
- #
- # output Node
- #
- SortedLibraryList.append(Node)
- #
- # for each node Item with an edge e from Node to Item do
- #
- for Item in LibraryList:
- if Node not in ConsumedByList[Item]:
- continue
- #
- # remove edge e from the graph
- #
- ConsumedByList[Item].remove(Node)
- #
- # If Item has no other incoming edges then
- #
- if ConsumedByList[Item] == []:
- #
- # insert Item into Q
- #
- Q.insert(0, Item)
-
- EdgeRemoved = True
- while Q == [] and EdgeRemoved:
- EdgeRemoved = False
- #
- # for each node Item with a Constructor
- #
- for Item in LibraryList:
- if Item in Constructor:
- #
- # for each Node without a constructor with an edge e from Item to Node
- #
- for Node in ConsumedByList[Item]:
- if Node not in Constructor:
- #
- # remove edge e from the graph
- #
- ConsumedByList[Item].remove(Node)
- EdgeRemoved = True
- if ConsumedByList[Item] == []:
- #
- # insert Item into Q
- #
- Q.insert(0, Item)
- break
- if Q != []:
- break
-
- #
- # if any remaining node Item in the graph has a constructor and an incoming edge, then the graph has a cycle
- #
- for Item in LibraryList:
- if ConsumedByList[Item] != [] and Item in Constructor and len(Constructor) > 1:
- ErrorMessage = 'Library [%s] with constructors has a cycle' % str(Item)
- EdkLogger.error("AutoGen", AUTOGEN_ERROR, ErrorMessage,
- "\tconsumed by " + "\n\tconsumed by ".join([str(L) for L in ConsumedByList[Item]]))
- if Item not in SortedLibraryList:
- SortedLibraryList.append(Item)
-
- #
- # Build the list of constructor and destructir names
- # The DAG Topo sort produces the destructor order, so the list of constructors must generated in the reverse order
- #
- SortedLibraryList.reverse()
- Module.LibraryClasses = sdict()
- for L in SortedLibraryList:
- for Lc in L.LibraryClass:
- Module.LibraryClasses[Lc.LibraryClass, ModuleType] = str(L)
- #
- # Merge PCDs from library instance
- #
- for Key in L.Pcds:
- if Key not in Module.Pcds:
- LibPcd = L.Pcds[Key]
- Module.Pcds[Key] = self.FindPcd(Arch, str(Module), LibPcd.TokenCName, LibPcd.TokenSpaceGuidCName, LibPcd.Type)
- #
- # Merge GUIDs from library instance
- #
- for CName in L.Guids:
- if CName not in Module.Guids:
- Module.Guids.append(CName)
- #
- # Merge Protocols from library instance
- #
- for CName in L.Protocols:
- if CName not in Module.Protocols:
- Module.Protocols.append(CName)
- #
- # Merge Ppis from library instance
- #
- for CName in L.Ppis:
- if CName not in Module.Ppis:
- Module.Ppis.append(CName)
-
- ## GenBuildDatabase
- #
- # Generate build database for all arches
- #
- # @param PcdsSet: Pcd list for override from Fdf parse result
- # @param InfList: Inf list for override from Fdf parse result
- #
- def GenBuildDatabase(self, PcdsSet = {}, InfList = []):
- #
- # Add additional inf file defined in Fdf file
- #
- for InfFile in InfList:
- self.AddToInfDatabase(NormPath(InfFile))
-
- #
- # Generate PlatformDatabase, PackageDatabase and ModuleDatabase
- #
- self.GenPackageDatabase()
- self.GenPlatformDatabase(PcdsSet)
- self.GenModuleDatabase(InfList)
-
- self.Db.Close()
-
- #
- # Update Libraries Of Platform
- #
- self.UpdateLibrariesOfPlatform(InfList)
-
- #
- # Output used Pcds not found in DSC file
- #
- self.ShowUnFoundPcds()
-
- ## ShowUnFoundPcds()
- #
- # If there is any pcd used but not defined in DSC
- # Print warning message on screen and output a list of pcds
- #
- def ShowUnFoundPcds(self):
- if self.UnFoundPcdInDsc != {}:
- WrnMessage = '**** WARNING ****\n'
- WrnMessage += 'The following Pcds were not defined in the DSC file: %s\n' % self.DscFileName
- WrnMessage += 'The default values were obtained from the DEC file that declares the PCD and the PCD default value\n'
- for (Guid, Name, Type, Arch) in self.UnFoundPcdInDsc:
- Dec = self.UnFoundPcdInDsc[(Guid, Name, Type, Arch)]
- Pcds = self.Build[Arch].PackageDatabase[Dec].Pcds
- if (Name, Guid, Type) in Pcds:
- Pcd = Pcds[(Name, Guid, Type)]
- PcdItemTypeUsed = Pcd.Type
- DefaultValue = Pcd.DefaultValue
- WrnMessage += '%s.%s: Defined in file %s, PcdItemType is Pcds%s, DefaultValue is %s\n' % (Guid, Name, Dec, PcdItemTypeUsed, DefaultValue)
- EdkLogger.verbose(WrnMessage)
-
- ## Create a full path with workspace dir
- #
- # Convert Filename with workspace dir to create a full path
- #
- # @param Filename: The filename need to be added workspace dir
- #
- # @retval string Full path
- #
- def WorkspaceFile(self, Filename):
- return WorkspaceFile(self.WorkspaceDir, Filename)
-
- ## Update LibraryClass of Module
- #
- # If a module of a platform has its own override libraryclass but the libraryclass not defined in the module
- # Add this libraryclass to the module
- #
- # @param InfFileName: InfFileName specificed in platform
- # @param LibraryClass: LibraryClass specificed in platform
- # @param Arch: Supportted Arch
- # @param InstanceFilePath: InstanceFilePath specificed in platform
- #
- def UpdateLibraryClassOfModule(self, InfFileName, LibraryClass, Arch, InstanceFilePath):
- #
- # Update the library instance itself to add this libraryclass name
- #
- LibraryModule = self.InfDatabase[InstanceFilePath].Module
- LibList = LibraryModule.Header[Arch].LibraryClass
- NotFound = True
- for Lib in LibList:
- #
- # Find this LibraryClass
- #
- if Lib.LibraryClass == LibraryClass:
- NotFound = False;
- break;
- if NotFound:
- NewLib = LibraryClassClass()
- NewLib.LibraryClass = LibraryClass
- NewLib.SupModuleList = DataType.SUP_MODULE_LIST # LibraryModule.Header[Arch].ModuleType.split()
- LibraryModule.Header[Arch].LibraryClass.append(NewLib)
-
- #
- # Add it to LibraryClasses Section for the module which is using the library
- #
- Module = self.InfDatabase[InfFileName].Module
- LibList = Module.LibraryClasses
- NotFound = True
- for Lib in LibList:
- #
- # Find this LibraryClass
- #
- if Lib.LibraryClass == LibraryClass:
- if Arch in Lib.SupArchList:
- return
- else:
- Lib.SupArchList.append(Arch)
- return
- if NotFound:
- Lib = LibraryClassClass()
- Lib.LibraryClass = LibraryClass
- Lib.SupArchList = [Arch]
- Module.LibraryClasses.append(Lib)
-
- ## Add Inf file to InfDatabase
- #
- # Create a Inf instance for input inf file and add it to InfDatabase
- #
- # @param InfFileName: The InfFileName need to be added to database
- #
- def AddToInfDatabase(self, InfFileName):
- File = self.WorkspaceFile(InfFileName)
- if os.path.exists(File) and os.path.isfile(File):
- if InfFileName not in self.InfDatabase:
- self.InfDatabase[InfFileName] = Inf(File, False, True, self.WorkspaceDir, self.Db, self.SupArchList)
- else:
- EdkLogger.error("AutoGen", FILE_NOT_FOUND, ExtraData=File)
-
- ## Add Dec file to DecDatabase
- #
- # Create a Dec instance for input dec file and add it to DecDatabase
- #
- # @param DecFileName: The DecFileName need to be added to database
- #
- def AddToDecDatabase(self, DecFileName):
- File = self.WorkspaceFile(DecFileName)
- if os.path.exists(File) and os.path.isfile(File):
- if DecFileName not in self.DecDatabase:
- self.DecDatabase[DecFileName] = Dec(File, False, True, self.WorkspaceDir, self.Db, self.SupArchList)
- else:
- EdkLogger.error("AutoGen", FILE_NOT_FOUND, ExtraData=File)
-
- ## Search LibraryClass Instance for Module
- #
- # Search PlatformBuildDatabase to find LibraryClass Instance for Module
- # Return the instance if found
- #
- # @param Lib: Input value for Library Class Name
- # @param Arch: Supportted Arch
- # @param ModuleType: Supportted ModuleType
- # @param ModuleName: Input value for Module Name
- #
- # @retval string Found LibraryClass Instance file path
- #
- def FindLibraryClassInstanceOfModule(self, Lib, Arch, ModuleType, ModuleName):
- #
- # First find if exist in <LibraryClass> of <Components> from dsc file
- #
- for Dsc in self.DscDatabase.keys():
- Platform = self.DscDatabase[Dsc].Platform
- for Module in Platform.Modules.ModuleList:
- if Arch in Module.SupArchList:
- if Module.FilePath == ModuleName:
- for LibraryClass in Module.LibraryClasses.LibraryList:
- if LibraryClass.Name == Lib:
- return LibraryClass.FilePath
- #
- #Second find if exist in <LibraryClass> of <LibraryClasses> from dsc file
- #
- return self.FindLibraryClassInstanceOfLibrary(Lib, Arch, ModuleType)
-
- ## Search LibraryClass Instance for Library
- #
- # Search PlatformBuildDatabase to find LibraryClass Instance for Library
- # Return the instance if found
- #
- # @param Lib: Input value for Library Class Name
- # @param Arch: Supportted Arch
- # @param Type: Supportted Library Usage Type
- #
- # @retval string Found LibraryClass Instance file path
- # @retval None Not Found
- #
- def FindLibraryClassInstanceOfLibrary(self, Lib, Arch, Type):
- for Dsc in self.DscDatabase.keys():
- Platform = self.DscDatabase[Dsc].Platform
- if (Lib, Type) in self.Build[Arch].PlatformDatabase[Dsc].LibraryClasses:
- return self.Build[Arch].PlatformDatabase[Dsc].LibraryClasses[(Lib, Type)]
- elif (Lib, '') in self.Build[Arch].PlatformDatabase[Dsc].LibraryClasses:
- return self.Build[Arch].PlatformDatabase[Dsc].LibraryClasses[(Lib, '')]
- return None
-
- ## Find BuildOptions
- #
- # Search DscDatabase to find component definition of ModuleName
- # Override BuildOption if it is defined in component
- #
- # @param Arch: Supportted Arch
- # @param ModuleName: The module which has buildoption definition in component of platform
- # @param BuildOptions: The set of all buildopitons
- #
- def FindBuildOptions(self, Arch, ModuleName, BuildOptions):
- for Dsc in self.DscDatabase.keys():
- #
- # First find if exist in <BuildOptions> of <Components> from dsc file
- # if find, use that override the one defined in inf file
- #
- Platform = self.DscDatabase[Dsc].Platform
- for Module in Platform.Modules.ModuleList:
- if Arch in Module.SupArchList:
- if Module.FilePath == ModuleName:
- for BuildOption in Module.ModuleSaBuildOption.BuildOptionList:
- #
- # Add to BuildOptions
- #
- BuildOptions[(BuildOption.ToolChainFamily, BuildOption.ToolChain)] = BuildOption.Option
-
- ## Find Pcd
- #
- # Search platform database, package database, module database and PcdsSet from Fdf
- # Return found Pcd
- #
- # @param Arch: Supportted Arch
- # @param ModuleName: The module which has pcd definition in component of platform
- # @param Name: Name of Pcd
- # @param Guid: Guid of Pcd
- # @param Type: Type of Pcd
- #
- # @retval PcdClassObject An instance for PcdClassObject with all members filled
- #
- def FindPcd(self, Arch, ModuleName, Name, Guid, Type):
- NewType = ''
- DatumType = ''
- Value = ''
- Token = ''
- MaxDatumSize = ''
- SkuInfoList = {}
- IsOverrided = False
- IsFoundInDsc = False
- IsFoundInDec = False
- FoundInDecFile = ''
-
- #
- # Second get information from platform database
- #
- OwnerPlatform = ''
- for Dsc in self.Build[Arch].PlatformDatabase.keys():
- Pcds = self.Build[Arch].PlatformDatabase[Dsc].Pcds
- if (Name, Guid) in Pcds:
- OwnerPlatform = Dsc
- Pcd = Pcds[(Name, Guid)]
- if Pcd.Type != '' and Pcd.Type != None:
- NewType = Pcd.Type
- if NewType in DataType.PCD_DYNAMIC_TYPE_LIST:
- NewType = DataType.TAB_PCDS_DYNAMIC
- elif NewType in DataType.PCD_DYNAMIC_EX_TYPE_LIST:
- NewType = DataType.TAB_PCDS_DYNAMIC_EX
- else:
- NewType = Type
-
- if Type != '' and Type != NewType:
- ErrorMsg = "PCD %s.%s is declared as [%s] in module\n\t%s\n\n"\
- " But it's used as [%s] in platform\n\t%s"\
- % (Guid, Name, Type, ModuleName, NewType, OwnerPlatform)
- EdkLogger.error("AutoGen", PARSER_ERROR, ErrorMsg)
-
-
- if Pcd.DatumType != '' and Pcd.DatumType != None:
- DatumType = Pcd.DatumType
- if Pcd.TokenValue != '' and Pcd.TokenValue != None:
- Token = Pcd.TokenValue
- if Pcd.DefaultValue != '' and Pcd.DefaultValue != None:
- Value = Pcd.DefaultValue
- if Pcd.MaxDatumSize != '' and Pcd.MaxDatumSize != None:
- MaxDatumSize = Pcd.MaxDatumSize
- SkuInfoList = Pcd.SkuInfoList
-
- IsOverrided = True
- IsFoundInDsc = True
- break
-
- #
- # Third get information from <Pcd> of <Compontents> from module database
- #
- for Dsc in self.DscDatabase.keys():
- for Module in self.DscDatabase[Dsc].Platform.Modules.ModuleList:
- if Arch in Module.SupArchList:
- if Module.FilePath == ModuleName:
- for Pcd in Module.PcdBuildDefinitions:
- if (Name, Guid) == (Pcd.CName, Pcd.TokenSpaceGuidCName):
- if Pcd.DefaultValue != '':
- Value = Pcd.DefaultValue
- if Pcd.MaxDatumSize != '':
- MaxDatumSize = Pcd.MaxDatumSize
-
- IsFoundInDsc = True
- IsOverrided = True
- break
-
- #
- # First get information from package database
- #
- Pcd = None
- if NewType == '':
- if Type != '':
- PcdTypeList = [Type]
- else:
- PcdTypeList = ["FixedAtBuild", "PatchableInModule", "FeatureFlag", "Dynamic", "DynamicEx"]
-
- for Dec in self.Build[Arch].PackageDatabase.keys():
- Pcds = self.Build[Arch].PackageDatabase[Dec].Pcds
- for PcdType in PcdTypeList:
- if (Name, Guid, PcdType) in Pcds:
- Pcd = Pcds[(Name, Guid, PcdType)]
- NewType = PcdType
- IsOverrided = True
- IsFoundInDec = True
- FoundInDecFile = Dec
- break
- else:
- continue
- break
- else:
- for Dec in self.Build[Arch].PackageDatabase.keys():
- Pcds = self.Build[Arch].PackageDatabase[Dec].Pcds
- if (Name, Guid, NewType) in Pcds:
- Pcd = Pcds[(Name, Guid, NewType)]
- IsOverrided = True
- IsFoundInDec = True
- FoundInDecFile = Dec
- break
-
- if not IsFoundInDec:
- ErrorMsg = "Pcd '%s.%s [%s]' defined in module '%s' is not found in any package for Arch '%s'" % (Guid, Name, NewType, ModuleName, Arch)
- EdkLogger.error("AutoGen", PARSER_ERROR, ErrorMsg)
-
- #
- # Not found in any platform and fdf
- #
- if not IsFoundInDsc:
- Value = Pcd.DefaultValue
- if NewType.startswith("Dynamic") and SkuInfoList == {}:
- SkuIds = self.Build[Arch].PlatformDatabase.values()[0].SkuIds
- SkuInfoList['DEFAULT'] = SkuInfoClass(SkuIdName='DEFAULT', SkuId=SkuIds['DEFAULT'], DefaultValue=Value)
- self.UnFoundPcdInDsc[(Guid, Name, NewType, Arch)] = FoundInDecFile
- #elif Type != '' and NewType.startswith("Dynamic"):
- # NewType = Pcd.Type
- DatumType = Pcd.DatumType
- if Token in [None, '']:
- Token = Pcd.TokenValue
- if DatumType == "VOID*" and MaxDatumSize in ['', None]:
- EdkLogger.verbose("No MaxDatumSize specified for PCD %s.%s in module [%s]" % (Guid, Name, ModuleName))
- if Value[0] == 'L':
- MaxDatumSize = str(len(Value) * 2)
- elif Value[0] == '{':
- MaxDatumSize = str(len(Value.split(',')))
- else:
- MaxDatumSize = str(len(Value))
-
- return PcdClassObject(Name, Guid, NewType, DatumType, Value, Token, MaxDatumSize, SkuInfoList, IsOverrided)
-
- ## Find Supportted Module List Of LibraryClass
- #
- # Search in InfDatabase, find the supmodulelist of the libraryclass
- #
- # @param LibraryClass: LibraryClass name for search
- # @param OverridedLibraryClassList: A list of all LibraryClass
- # @param Arch: Supportted Arch
- #
- # @retval list SupModuleList
- #
- def FindSupModuleListOfLibraryClass(self, LibraryClass, OverridedLibraryClassList, Arch):
- Name = LibraryClass.Name
- FilePath = LibraryClass.FilePath
- SupModuleList = copy.copy(LibraryClass.SupModuleList)
-
- #
- # If the SupModuleList means all, remove overrided module types of platform
- #
- if SupModuleList == DataType.SUP_MODULE_LIST:
- EdkLogger.debug(EdkLogger.DEBUG_3, "\tLibraryClass %s supports all module types" % Name)
- for Item in OverridedLibraryClassList:
- #
- # Find a library class (Item) with the same name
- #
- if Item.Name == Name:
- #
- # Do nothing if it is itself
- #
- if Item.SupModuleList == DataType.SUP_MODULE_LIST:
- continue
- #
- # If not itself, check arch first
- #
- if Arch in LibraryClass.SupArchList:
- #
- # If arch is supportted, remove all related module type
- #
- if Arch in Item.SupArchList:
- for ModuleType in Item.SupModuleList:
- EdkLogger.debug(EdkLogger.DEBUG_3, "\tLibraryClass %s has specific defined module types" % Name)
- if ModuleType in SupModuleList:
- SupModuleList.remove(ModuleType)
-
- return SupModuleList
-
- ## Find Module inf Platform
- #
- # Check if the module is defined in <Compentent> of <Platform>
- #
- # @param Inf: Inf file (Module) need to be searched
- # @param Arch: Supportted Arch
- # @param InfList: A list for all Inf file
- #
- # @retval True Mudule Found
- # @retval Flase Module Not Found
- #
- def IsModuleDefinedInPlatform(self, Inf, Arch, InfList):
- for Dsc in self.DscDatabase.values():
- for LibraryClass in Dsc.Platform.LibraryClasses.LibraryList:
- if Inf == LibraryClass.FilePath and Arch in LibraryClass.SupArchList:
- return True
- for Module in Dsc.Platform.Modules.ModuleList:
- if Inf == Module.FilePath and Arch in Module.SupArchList:
- return True
- for Item in Module.LibraryClasses.LibraryList:
- if Inf == Item.FilePath:
- return True
- for Library in Dsc.Platform.Libraries.LibraryList:
- if Inf == Library.FilePath and Arch in Library.SupArchList:
- return True
-
- return False
-
- ## Show all content of the workspacebuild
- #
- # Print each item of the workspacebuild with (Key = Value) pair
- #
- def ShowWorkspaceBuild(self):
- print self.DscDatabase
- print self.InfDatabase
- print self.DecDatabase
- print 'SupArchList', self.SupArchList
- print 'BuildTarget', self.BuildTarget
- print 'SkuId', self.SkuId
-
- for Arch in self.SupArchList:
- print Arch
- print 'Platform'
- for Platform in self.Build[Arch].PlatformDatabase.keys():
- P = self.Build[Arch].PlatformDatabase[Platform]
- print 'DescFilePath = ', P.DescFilePath
- print 'PlatformName = ', P.PlatformName
- print 'Guid = ', P.Guid
- print 'Version = ', P.Version
- print 'OutputDirectory = ', P.OutputDirectory
- print 'FlashDefinition = ', P.FlashDefinition
- print 'SkuIds = ', P.SkuIds
- print 'Modules = ', P.Modules
- print 'LibraryClasses = ', P.LibraryClasses
- print 'Pcds = ', P.Pcds
- for item in P.Pcds.keys():
- print P.Pcds[item]
- print 'BuildOptions = ', P.BuildOptions
- print ''
- # End of Platform
-
- print 'package'
- for Package in self.Build[Arch].PackageDatabase.keys():
- P = self.Build[Arch].PackageDatabase[Package]
- print 'DescFilePath = ', P.DescFilePath
- print 'PackageName = ', P.PackageName
- print 'Guid = ', P.Guid
- print 'Version = ', P.Version
- print 'Protocols = ', P.Protocols
- print 'Ppis = ', P.Ppis
- print 'Guids = ', P.Guids
- print 'Includes = ', P.Includes
- print 'LibraryClasses = ', P.LibraryClasses
- print 'Pcds = ', P.Pcds
- for item in P.Pcds.keys():
- print P.Pcds[item]
- print ''
- # End of Package
-
- print 'module'
- for Module in self.Build[Arch].ModuleDatabase.keys():
- P = self.Build[Arch].ModuleDatabase[Module]
- print 'DescFilePath = ', P.DescFilePath
- print 'BaseName = ', P.BaseName
- print 'ModuleType = ', P.ModuleType
- print 'Guid = ', P.Guid
- print 'Version = ', P.Version
- print 'CustomMakefile = ', P.CustomMakefile
- print 'Specification = ', P.Specification
- print 'Shadow = ', P.Shadow
- print 'PcdIsDriver = ', P.PcdIsDriver
- for Lib in P.LibraryClass:
- print 'LibraryClassDefinition = ', Lib.LibraryClass, 'SupModList = ', Lib.SupModList
- print 'ModuleEntryPointList = ', P.ModuleEntryPointList
- print 'ModuleUnloadImageList = ', P.ModuleUnloadImageList
- print 'ConstructorList = ', P.ConstructorList
- print 'DestructorList = ', P.DestructorList
-
- print 'Binaries = '
- for item in P.Binaries:
- print item.BinaryFile, item.FeatureFlag, item.SupArchList
- print 'Sources = '
- for item in P.Sources:
- print item.SourceFile
- print 'LibraryClasses = ', P.LibraryClasses
- print 'Protocols = ', P.Protocols
- print 'Ppis = ', P.Ppis
- print 'Guids = ', P.Guids
- print 'Includes = ', P.Includes
- print 'Packages = ', P.Packages
- print 'Pcds = ', P.Pcds
- for item in P.Pcds.keys():
- print P.Pcds[item]
- print 'BuildOptions = ', P.BuildOptions
- print 'Depex = ', P.Depex
- print ''
- # End of Module
-
-##
-#
-# This acts like the main() function for the script, unless it is 'import'ed into another
-# script.
-#
-if __name__ == '__main__':
- print 'Start!', time.strftime('%H:%M:%S', time.localtime())
- EdkLogger.Initialize()
- EdkLogger.SetLevel(EdkLogger.QUIET)
-
- W = os.getenv('WORKSPACE')
- Ewb = WorkspaceBuild('Nt32Pkg/Nt32Pkg.dsc', W)
- Ewb.GenBuildDatabase({('PcdDevicePathSupportDevicePathFromText', 'gEfiMdeModulePkgTokenSpaceGuid') : 'KKKKKKKKKKKKKKKKKKKKK'}, ['Test.Inf'])
- print 'Done!', time.strftime('%H:%M:%S', time.localtime())
- Ewb.ShowWorkspaceBuild()
diff --git a/BaseTools/Source/Python/Common/EdkLogger.py b/BaseTools/Source/Python/Common/EdkLogger.py
deleted file mode 100644
index ac1c8edc4f..0000000000
--- a/BaseTools/Source/Python/Common/EdkLogger.py
+++ /dev/null
@@ -1,276 +0,0 @@
-## @file
-# This file implements the log mechanism for Python tools.
-#
-# Copyright (c) 2007 - 2015, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-## Import modules
-import Common.LongFilePathOs as os, sys, logging
-import traceback
-from BuildToolError import *
-
-## Log level constants
-DEBUG_0 = 1
-DEBUG_1 = 2
-DEBUG_2 = 3
-DEBUG_3 = 4
-DEBUG_4 = 5
-DEBUG_5 = 6
-DEBUG_6 = 7
-DEBUG_7 = 8
-DEBUG_8 = 9
-DEBUG_9 = 10
-VERBOSE = 15
-INFO = 20
-WARN = 30
-QUIET = 40
-ERROR = 50
-SILENT = 99
-
-IsRaiseError = True
-
-# Tool name
-_ToolName = os.path.basename(sys.argv[0])
-
-# For validation purpose
-_LogLevels = [DEBUG_0, DEBUG_1, DEBUG_2, DEBUG_3, DEBUG_4, DEBUG_5,
- DEBUG_6, DEBUG_7, DEBUG_8, DEBUG_9, VERBOSE, WARN, INFO,
- ERROR, QUIET, SILENT]
-
-# For DEBUG level (All DEBUG_0~9 are applicable)
-_DebugLogger = logging.getLogger("tool_debug")
-_DebugFormatter = logging.Formatter("[%(asctime)s.%(msecs)d]: %(message)s", datefmt="%H:%M:%S")
-
-# For VERBOSE, INFO, WARN level
-_InfoLogger = logging.getLogger("tool_info")
-_InfoFormatter = logging.Formatter("%(message)s")
-
-# For ERROR level
-_ErrorLogger = logging.getLogger("tool_error")
-_ErrorFormatter = logging.Formatter("%(message)s")
-
-# String templates for ERROR/WARN/DEBUG log message
-_ErrorMessageTemplate = '\n\n%(tool)s...\n%(file)s(%(line)s): error %(errorcode)04X: %(msg)s\n\t%(extra)s'
-_ErrorMessageTemplateWithoutFile = '\n\n%(tool)s...\n : error %(errorcode)04X: %(msg)s\n\t%(extra)s'
-_WarningMessageTemplate = '%(tool)s...\n%(file)s(%(line)s): warning: %(msg)s'
-_WarningMessageTemplateWithoutFile = '%(tool)s: : warning: %(msg)s'
-_DebugMessageTemplate = '%(file)s(%(line)s): debug: \n %(msg)s'
-
-#
-# Flag used to take WARN as ERROR.
-# By default, only ERROR message will break the tools execution.
-#
-_WarningAsError = False
-
-## Log debug message
-#
-# @param Level DEBUG level (DEBUG0~9)
-# @param Message Debug information
-# @param ExtraData More information associated with "Message"
-#
-def debug(Level, Message, ExtraData=None):
- if _DebugLogger.level > Level:
- return
- if Level > DEBUG_9:
- return
-
- # Find out the caller method information
- CallerStack = traceback.extract_stack()[-2]
- TemplateDict = {
- "file" : CallerStack[0],
- "line" : CallerStack[1],
- "msg" : Message,
- }
-
- if ExtraData != None:
- LogText = _DebugMessageTemplate % TemplateDict + "\n %s" % ExtraData
- else:
- LogText = _DebugMessageTemplate % TemplateDict
-
- _DebugLogger.log(Level, LogText)
-
-## Log verbose message
-#
-# @param Message Verbose information
-#
-def verbose(Message):
- return _InfoLogger.log(VERBOSE, Message)
-
-## Log warning message
-#
-# Warning messages are those which might be wrong but won't fail the tool.
-#
-# @param ToolName The name of the tool. If not given, the name of caller
-# method will be used.
-# @param Message Warning information
-# @param File The name of file which caused the warning.
-# @param Line The line number in the "File" which caused the warning.
-# @param ExtraData More information associated with "Message"
-#
-def warn(ToolName, Message, File=None, Line=None, ExtraData=None):
- if _InfoLogger.level > WARN:
- return
-
- # if no tool name given, use caller's source file name as tool name
- if ToolName == None or ToolName == "":
- ToolName = os.path.basename(traceback.extract_stack()[-2][0])
-
- if Line == None:
- Line = "..."
- else:
- Line = "%d" % Line
-
- TemplateDict = {
- "tool" : ToolName,
- "file" : File,
- "line" : Line,
- "msg" : Message,
- }
-
- if File != None:
- LogText = _WarningMessageTemplate % TemplateDict
- else:
- LogText = _WarningMessageTemplateWithoutFile % TemplateDict
-
- if ExtraData != None:
- LogText += "\n %s" % ExtraData
-
- _InfoLogger.log(WARN, LogText)
-
- # Raise an execption if indicated
- if _WarningAsError == True:
- raise FatalError(WARNING_AS_ERROR)
-
-## Log INFO message
-info = _InfoLogger.info
-
-## Log ERROR message
-#
-# Once an error messages is logged, the tool's execution will be broken by raising
-# an execption. If you don't want to break the execution later, you can give
-# "RaiseError" with "False" value.
-#
-# @param ToolName The name of the tool. If not given, the name of caller
-# method will be used.
-# @param ErrorCode The error code
-# @param Message Warning information
-# @param File The name of file which caused the error.
-# @param Line The line number in the "File" which caused the warning.
-# @param ExtraData More information associated with "Message"
-# @param RaiseError Raise an exception to break the tool's executuion if
-# it's True. This is the default behavior.
-#
-def error(ToolName, ErrorCode, Message=None, File=None, Line=None, ExtraData=None, RaiseError=IsRaiseError):
- if Line == None:
- Line = "..."
- else:
- Line = "%d" % Line
-
- if Message == None:
- if ErrorCode in gErrorMessage:
- Message = gErrorMessage[ErrorCode]
- else:
- Message = gErrorMessage[UNKNOWN_ERROR]
-
- if ExtraData == None:
- ExtraData = ""
-
- TemplateDict = {
- "tool" : _ToolName,
- "file" : File,
- "line" : Line,
- "errorcode" : ErrorCode,
- "msg" : Message,
- "extra" : ExtraData
- }
-
- if File != None:
- LogText = _ErrorMessageTemplate % TemplateDict
- else:
- LogText = _ErrorMessageTemplateWithoutFile % TemplateDict
-
- _ErrorLogger.log(ERROR, LogText)
- if RaiseError:
- raise FatalError(ErrorCode)
-
-# Log information which should be always put out
-quiet = _ErrorLogger.error
-
-## Initialize log system
-def Initialize():
- #
- # Since we use different format to log different levels of message into different
- # place (stdout or stderr), we have to use different "Logger" objects to do this.
- #
- # For DEBUG level (All DEBUG_0~9 are applicable)
- _DebugLogger.setLevel(INFO)
- _DebugChannel = logging.StreamHandler(sys.stdout)
- _DebugChannel.setFormatter(_DebugFormatter)
- _DebugLogger.addHandler(_DebugChannel)
-
- # For VERBOSE, INFO, WARN level
- _InfoLogger.setLevel(INFO)
- _InfoChannel = logging.StreamHandler(sys.stdout)
- _InfoChannel.setFormatter(_InfoFormatter)
- _InfoLogger.addHandler(_InfoChannel)
-
- # For ERROR level
- _ErrorLogger.setLevel(INFO)
- _ErrorCh = logging.StreamHandler(sys.stderr)
- _ErrorCh.setFormatter(_ErrorFormatter)
- _ErrorLogger.addHandler(_ErrorCh)
-
-## Set log level
-#
-# @param Level One of log level in _LogLevel
-def SetLevel(Level):
- if Level not in _LogLevels:
- info("Not supported log level (%d). Use default level instead." % Level)
- Level = INFO
- _DebugLogger.setLevel(Level)
- _InfoLogger.setLevel(Level)
- _ErrorLogger.setLevel(Level)
-
-def InitializeForUnitTest():
- Initialize()
- SetLevel(SILENT)
-
-## Get current log level
-def GetLevel():
- return _InfoLogger.getEffectiveLevel()
-
-## Raise up warning as error
-def SetWarningAsError():
- global _WarningAsError
- _WarningAsError = True
-
-## Specify a file to store the log message as well as put on console
-#
-# @param LogFile The file path used to store the log message
-#
-def SetLogFile(LogFile):
- if os.path.exists(LogFile):
- os.remove(LogFile)
-
- _Ch = logging.FileHandler(LogFile)
- _Ch.setFormatter(_DebugFormatter)
- _DebugLogger.addHandler(_Ch)
-
- _Ch= logging.FileHandler(LogFile)
- _Ch.setFormatter(_InfoFormatter)
- _InfoLogger.addHandler(_Ch)
-
- _Ch = logging.FileHandler(LogFile)
- _Ch.setFormatter(_ErrorFormatter)
- _ErrorLogger.addHandler(_Ch)
-
-if __name__ == '__main__':
- pass
-
diff --git a/BaseTools/Source/Python/Common/Expression.py b/BaseTools/Source/Python/Common/Expression.py
deleted file mode 100644
index 6d002f5676..0000000000
--- a/BaseTools/Source/Python/Common/Expression.py
+++ /dev/null
@@ -1,652 +0,0 @@
-## @file
-# This file is used to parse and evaluate expression in directive or PCD value.
-#
-# Copyright (c) 2011 - 2017, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-
-## Import Modules
-#
-from Common.GlobalData import *
-from CommonDataClass.Exceptions import BadExpression
-from CommonDataClass.Exceptions import WrnExpression
-from Misc import GuidStringToGuidStructureString
-
-ERR_STRING_EXPR = 'This operator cannot be used in string expression: [%s].'
-ERR_SNYTAX = 'Syntax error, the rest of expression cannot be evaluated: [%s].'
-ERR_MATCH = 'No matching right parenthesis.'
-ERR_STRING_TOKEN = 'Bad string token: [%s].'
-ERR_MACRO_TOKEN = 'Bad macro token: [%s].'
-ERR_EMPTY_TOKEN = 'Empty token is not allowed.'
-ERR_PCD_RESOLVE = 'PCD token cannot be resolved: [%s].'
-ERR_VALID_TOKEN = 'No more valid token found from rest of string: [%s].'
-ERR_EXPR_TYPE = 'Different types found in expression.'
-ERR_OPERATOR_UNSUPPORT = 'Unsupported operator: [%s]'
-ERR_REL_NOT_IN = 'Expect "IN" after "not" operator.'
-WRN_BOOL_EXPR = 'Operand of boolean type cannot be used in arithmetic expression.'
-WRN_EQCMP_STR_OTHERS = '== Comparison between Operand of string type and Boolean/Number Type always return False.'
-WRN_NECMP_STR_OTHERS = '!= Comparison between Operand of string type and Boolean/Number Type always return True.'
-ERR_RELCMP_STR_OTHERS = 'Operator taking Operand of string type and Boolean/Number Type is not allowed: [%s].'
-ERR_STRING_CMP = 'Unicode string and general string cannot be compared: [%s %s %s]'
-ERR_ARRAY_TOKEN = 'Bad C array or C format GUID token: [%s].'
-ERR_ARRAY_ELE = 'This must be HEX value for NList or Array: [%s].'
-ERR_EMPTY_EXPR = 'Empty expression is not allowed.'
-ERR_IN_OPERAND = 'Macro after IN operator can only be: $(FAMILY), $(ARCH), $(TOOL_CHAIN_TAG) and $(TARGET).'
-
-## SplitString
-# Split string to list according double quote
-# For example: abc"de\"f"ghi"jkl"mn will be: ['abc', '"de\"f"', 'ghi', '"jkl"', 'mn']
-#
-def SplitString(String):
- # There might be escaped quote: "abc\"def\\\"ghi"
- Str = String.replace('\\\\', '//').replace('\\\"', '\\\'')
- RetList = []
- InQuote = False
- Item = ''
- for i, ch in enumerate(Str):
- if ch == '"':
- InQuote = not InQuote
- if not InQuote:
- Item += String[i]
- RetList.append(Item)
- Item = ''
- continue
- if Item:
- RetList.append(Item)
- Item = ''
- Item += String[i]
- if InQuote:
- raise BadExpression(ERR_STRING_TOKEN % Item)
- if Item:
- RetList.append(Item)
- return RetList
-
-## ReplaceExprMacro
-#
-def ReplaceExprMacro(String, Macros, ExceptionList = None):
- StrList = SplitString(String)
- for i, String in enumerate(StrList):
- InQuote = False
- if String.startswith('"'):
- InQuote = True
- MacroStartPos = String.find('$(')
- if MacroStartPos < 0:
- for Pcd in gPlatformPcds.keys():
- if Pcd in String:
- if Pcd not in gConditionalPcds:
- gConditionalPcds.append(Pcd)
- continue
- RetStr = ''
- while MacroStartPos >= 0:
- RetStr = String[0:MacroStartPos]
- MacroEndPos = String.find(')', MacroStartPos)
- if MacroEndPos < 0:
- raise BadExpression(ERR_MACRO_TOKEN % String[MacroStartPos:])
- Macro = String[MacroStartPos+2:MacroEndPos]
- if Macro not in Macros:
- # From C reference manual:
- # If an undefined macro name appears in the constant-expression of
- # !if or !elif, it is replaced by the integer constant 0.
- RetStr += '0'
- elif not InQuote:
- Tklst = RetStr.split()
- if Tklst and Tklst[-1] in ['IN', 'in'] and ExceptionList and Macro not in ExceptionList:
- raise BadExpression(ERR_IN_OPERAND)
- # Make sure the macro in exception list is encapsulated by double quote
- # For example: DEFINE ARCH = IA32 X64
- # $(ARCH) is replaced with "IA32 X64"
- if ExceptionList and Macro in ExceptionList:
- RetStr += '"' + Macros[Macro] + '"'
- elif Macros[Macro].strip():
- RetStr += Macros[Macro]
- else:
- RetStr += '""'
- else:
- RetStr += Macros[Macro]
- RetStr += String[MacroEndPos+1:]
- String = RetStr
- MacroStartPos = String.find('$(')
- StrList[i] = RetStr
- return ''.join(StrList)
-
-SupportedInMacroList = ['TARGET', 'TOOL_CHAIN_TAG', 'ARCH', 'FAMILY']
-
-class ValueExpression(object):
- # Logical operator mapping
- LogicalOperators = {
- '&&' : 'and', '||' : 'or',
- '!' : 'not', 'AND': 'and',
- 'OR' : 'or' , 'NOT': 'not',
- 'XOR': '^' , 'xor': '^',
- 'EQ' : '==' , 'NE' : '!=',
- 'GT' : '>' , 'LT' : '<',
- 'GE' : '>=' , 'LE' : '<=',
- 'IN' : 'in'
- }
-
- NonLetterOpLst = ['+', '-', '*', '/', '%', '&', '|', '^', '~', '<<', '>>', '!', '=', '>', '<']
-
- PcdPattern = re.compile(r'[_a-zA-Z][0-9A-Za-z_]*\.[_a-zA-Z][0-9A-Za-z_]*$')
- HexPattern = re.compile(r'0[xX][0-9a-fA-F]+$')
- RegGuidPattern = re.compile(r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}')
-
- SymbolPattern = re.compile("("
- "\$\([A-Z][A-Z0-9_]*\)|\$\(\w+\.\w+\)|\w+\.\w+|"
- "&&|\|\||!(?!=)|"
- "(?<=\W)AND(?=\W)|(?<=\W)OR(?=\W)|(?<=\W)NOT(?=\W)|(?<=\W)XOR(?=\W)|"
- "(?<=\W)EQ(?=\W)|(?<=\W)NE(?=\W)|(?<=\W)GT(?=\W)|(?<=\W)LT(?=\W)|(?<=\W)GE(?=\W)|(?<=\W)LE(?=\W)"
- ")")
-
- @staticmethod
- def Eval(Operator, Oprand1, Oprand2 = None):
- WrnExp = None
-
- if Operator not in ["==", "!=", ">=", "<=", ">", "<", "in", "not in"] and \
- (type(Oprand1) == type('') or type(Oprand2) == type('')):
- raise BadExpression(ERR_STRING_EXPR % Operator)
-
- TypeDict = {
- type(0) : 0,
- type(0L) : 0,
- type('') : 1,
- type(True) : 2
- }
-
- EvalStr = ''
- if Operator in ["!", "NOT", "not"]:
- if type(Oprand1) == type(''):
- raise BadExpression(ERR_STRING_EXPR % Operator)
- EvalStr = 'not Oprand1'
- elif Operator in ["~"]:
- if type(Oprand1) == type(''):
- raise BadExpression(ERR_STRING_EXPR % Operator)
- EvalStr = '~ Oprand1'
- else:
- if Operator in ["+", "-"] and (type(True) in [type(Oprand1), type(Oprand2)]):
- # Boolean in '+'/'-' will be evaluated but raise warning
- WrnExp = WrnExpression(WRN_BOOL_EXPR)
- elif type('') in [type(Oprand1), type(Oprand2)] and type(Oprand1)!= type(Oprand2):
- # == between string and number/boolean will always return False, != return True
- if Operator == "==":
- WrnExp = WrnExpression(WRN_EQCMP_STR_OTHERS)
- WrnExp.result = False
- raise WrnExp
- elif Operator == "!=":
- WrnExp = WrnExpression(WRN_NECMP_STR_OTHERS)
- WrnExp.result = True
- raise WrnExp
- else:
- raise BadExpression(ERR_RELCMP_STR_OTHERS % Operator)
- elif TypeDict[type(Oprand1)] != TypeDict[type(Oprand2)]:
- if Operator in ["==", "!=", ">=", "<=", ">", "<"] and set((TypeDict[type(Oprand1)], TypeDict[type(Oprand2)])) == set((TypeDict[type(True)], TypeDict[type(0)])):
- # comparison between number and boolean is allowed
- pass
- elif Operator in ['&', '|', '^', "and", "or"] and set((TypeDict[type(Oprand1)], TypeDict[type(Oprand2)])) == set((TypeDict[type(True)], TypeDict[type(0)])):
- # bitwise and logical operation between number and boolean is allowed
- pass
- else:
- raise BadExpression(ERR_EXPR_TYPE)
- if type(Oprand1) == type('') and type(Oprand2) == type(''):
- if (Oprand1.startswith('L"') and not Oprand2.startswith('L"')) or \
- (not Oprand1.startswith('L"') and Oprand2.startswith('L"')):
- raise BadExpression(ERR_STRING_CMP % (Oprand1, Operator, Oprand2))
- if 'in' in Operator and type(Oprand2) == type(''):
- Oprand2 = Oprand2.split()
- EvalStr = 'Oprand1 ' + Operator + ' Oprand2'
-
- # Local symbols used by built in eval function
- Dict = {
- 'Oprand1' : Oprand1,
- 'Oprand2' : Oprand2
- }
- try:
- Val = eval(EvalStr, {}, Dict)
- except Exception, Excpt:
- raise BadExpression(str(Excpt))
-
- if Operator in ['and', 'or']:
- if Val:
- Val = True
- else:
- Val = False
-
- if WrnExp:
- WrnExp.result = Val
- raise WrnExp
- return Val
-
- def __init__(self, Expression, SymbolTable={}):
- self._NoProcess = False
- if type(Expression) != type(''):
- self._Expr = Expression
- self._NoProcess = True
- return
-
- self._Expr = ReplaceExprMacro(Expression.strip(),
- SymbolTable,
- SupportedInMacroList)
-
- if not self._Expr.strip():
- raise BadExpression(ERR_EMPTY_EXPR)
-
- #
- # The symbol table including PCD and macro mapping
- #
- self._Symb = SymbolTable
- self._Symb.update(self.LogicalOperators)
- self._Idx = 0
- self._Len = len(self._Expr)
- self._Token = ''
- self._WarnExcept = None
-
- # Literal token without any conversion
- self._LiteralToken = ''
-
- # Public entry for this class
- # @param RealValue: False: only evaluate if the expression is true or false, used for conditional expression
- # True : return the evaluated str(value), used for PCD value
- #
- # @return: True or False if RealValue is False
- # Evaluated value of string format if RealValue is True
- #
- def __call__(self, RealValue=False, Depth=0):
- if self._NoProcess:
- return self._Expr
-
- self._Depth = Depth
-
- self._Expr = self._Expr.strip()
- if RealValue and Depth == 0:
- self._Token = self._Expr
- if self.__IsNumberToken():
- return self._Expr
-
- try:
- Token = self._GetToken()
- if type(Token) == type('') and Token.startswith('{') and Token.endswith('}') and self._Idx >= self._Len:
- return self._Expr
- except BadExpression:
- pass
-
- self._Idx = 0
- self._Token = ''
-
- Val = self._OrExpr()
- RealVal = Val
- if type(Val) == type(''):
- if Val == 'L""':
- Val = False
- elif not Val:
- Val = False
- RealVal = '""'
- elif not Val.startswith('L"') and not Val.startswith('{'):
- Val = True
- RealVal = '"' + RealVal + '"'
-
- # The expression has been parsed, but the end of expression is not reached
- # It means the rest does not comply EBNF of <Expression>
- if self._Idx != self._Len:
- raise BadExpression(ERR_SNYTAX % self._Expr[self._Idx:])
-
- if RealValue:
- RetVal = str(RealVal)
- elif Val:
- RetVal = True
- else:
- RetVal = False
-
- if self._WarnExcept:
- self._WarnExcept.result = RetVal
- raise self._WarnExcept
- else:
- return RetVal
-
- # Template function to parse binary operators which have same precedence
- # Expr [Operator Expr]*
- def _ExprFuncTemplate(self, EvalFunc, OpLst):
- Val = EvalFunc()
- while self._IsOperator(OpLst):
- Op = self._Token
- try:
- Val = self.Eval(Op, Val, EvalFunc())
- except WrnExpression, Warn:
- self._WarnExcept = Warn
- Val = Warn.result
- return Val
-
- # A [|| B]*
- def _OrExpr(self):
- return self._ExprFuncTemplate(self._AndExpr, ["OR", "or", "||"])
-
- # A [&& B]*
- def _AndExpr(self):
- return self._ExprFuncTemplate(self._BitOr, ["AND", "and", "&&"])
-
- # A [ | B]*
- def _BitOr(self):
- return self._ExprFuncTemplate(self._BitXor, ["|"])
-
- # A [ ^ B]*
- def _BitXor(self):
- return self._ExprFuncTemplate(self._BitAnd, ["XOR", "xor", "^"])
-
- # A [ & B]*
- def _BitAnd(self):
- return self._ExprFuncTemplate(self._EqExpr, ["&"])
-
- # A [ == B]*
- def _EqExpr(self):
- Val = self._RelExpr()
- while self._IsOperator(["==", "!=", "EQ", "NE", "IN", "in", "!", "NOT", "not"]):
- Op = self._Token
- if Op in ["!", "NOT", "not"]:
- if not self._IsOperator(["IN", "in"]):
- raise BadExpression(ERR_REL_NOT_IN)
- Op += ' ' + self._Token
- try:
- Val = self.Eval(Op, Val, self._RelExpr())
- except WrnExpression, Warn:
- self._WarnExcept = Warn
- Val = Warn.result
- return Val
-
- # A [ > B]*
- def _RelExpr(self):
- return self._ExprFuncTemplate(self._ShiftExpr, ["<=", ">=", "<", ">", "LE", "GE", "LT", "GT"])
-
- def _ShiftExpr(self):
- return self._ExprFuncTemplate(self._AddExpr, ["<<", ">>"])
-
- # A [ + B]*
- def _AddExpr(self):
- return self._ExprFuncTemplate(self._MulExpr, ["+", "-"])
-
- # A [ * B]*
- def _MulExpr(self):
- return self._ExprFuncTemplate(self._UnaryExpr, ["*", "/", "%"])
-
- # [!]*A
- def _UnaryExpr(self):
- if self._IsOperator(["!", "NOT", "not"]):
- Val = self._UnaryExpr()
- try:
- return self.Eval('not', Val)
- except WrnExpression, Warn:
- self._WarnExcept = Warn
- return Warn.result
- if self._IsOperator(["~"]):
- Val = self._UnaryExpr()
- try:
- return self.Eval('~', Val)
- except WrnExpression, Warn:
- self._WarnExcept = Warn
- return Warn.result
- return self._IdenExpr()
-
- # Parse identifier or encapsulated expression
- def _IdenExpr(self):
- Tk = self._GetToken()
- if Tk == '(':
- Val = self._OrExpr()
- try:
- # _GetToken may also raise BadExpression
- if self._GetToken() != ')':
- raise BadExpression(ERR_MATCH)
- except BadExpression:
- raise BadExpression(ERR_MATCH)
- return Val
- return Tk
-
- # Skip whitespace or tab
- def __SkipWS(self):
- for Char in self._Expr[self._Idx:]:
- if Char not in ' \t':
- break
- self._Idx += 1
-
- # Try to convert string to number
- def __IsNumberToken(self):
- Radix = 10
- if self._Token.lower()[0:2] == '0x' and len(self._Token) > 2:
- Radix = 16
- try:
- self._Token = int(self._Token, Radix)
- return True
- except ValueError:
- return False
- except TypeError:
- return False
-
- # Parse array: {...}
- def __GetArray(self):
- Token = '{'
- self._Idx += 1
- self.__GetNList(True)
- Token += self._LiteralToken
- if self._Idx >= self._Len or self._Expr[self._Idx] != '}':
- raise BadExpression(ERR_ARRAY_TOKEN % Token)
- Token += '}'
-
- # All whitespace and tabs in array are already stripped.
- IsArray = IsGuid = False
- if len(Token.split(',')) == 11 and len(Token.split(',{')) == 2 \
- and len(Token.split('},')) == 1:
- HexLen = [11,6,6,5,4,4,4,4,4,4,6]
- HexList= Token.split(',')
- if HexList[3].startswith('{') and \
- not [Index for Index, Hex in enumerate(HexList) if len(Hex) > HexLen[Index]]:
- IsGuid = True
- if Token.lstrip('{').rstrip('}').find('{') == -1:
- if not [Hex for Hex in Token.lstrip('{').rstrip('}').split(',') if len(Hex) > 4]:
- IsArray = True
- if not IsArray and not IsGuid:
- raise BadExpression(ERR_ARRAY_TOKEN % Token)
- self._Idx += 1
- self._Token = self._LiteralToken = Token
- return self._Token
-
- # Parse string, the format must be: "..."
- def __GetString(self):
- Idx = self._Idx
-
- # Skip left quote
- self._Idx += 1
-
- # Replace escape \\\", \"
- Expr = self._Expr[self._Idx:].replace('\\\\', '//').replace('\\\"', '\\\'')
- for Ch in Expr:
- self._Idx += 1
- if Ch == '"':
- break
- self._Token = self._LiteralToken = self._Expr[Idx:self._Idx]
- if not self._Token.endswith('"'):
- raise BadExpression(ERR_STRING_TOKEN % self._Token)
- self._Token = self._Token[1:-1]
- return self._Token
-
- # Get token that is comprised by alphanumeric, underscore or dot(used by PCD)
- # @param IsAlphaOp: Indicate if parsing general token or script operator(EQ, NE...)
- def __GetIdToken(self, IsAlphaOp = False):
- IdToken = ''
- for Ch in self._Expr[self._Idx:]:
- if not self.__IsIdChar(Ch):
- break
- self._Idx += 1
- IdToken += Ch
-
- self._Token = self._LiteralToken = IdToken
- if not IsAlphaOp:
- self.__ResolveToken()
- return self._Token
-
- # Try to resolve token
- def __ResolveToken(self):
- if not self._Token:
- raise BadExpression(ERR_EMPTY_TOKEN)
-
- # PCD token
- if self.PcdPattern.match(self._Token):
- if self._Token not in self._Symb:
- Ex = BadExpression(ERR_PCD_RESOLVE % self._Token)
- Ex.Pcd = self._Token
- raise Ex
- self._Token = ValueExpression(self._Symb[self._Token], self._Symb)(True, self._Depth+1)
- if type(self._Token) != type(''):
- self._LiteralToken = hex(self._Token)
- return
-
- if self._Token.startswith('"'):
- self._Token = self._Token[1:-1]
- elif self._Token in ["FALSE", "false", "False"]:
- self._Token = False
- elif self._Token in ["TRUE", "true", "True"]:
- self._Token = True
- else:
- self.__IsNumberToken()
-
- def __GetNList(self, InArray=False):
- self._GetSingleToken()
- if not self.__IsHexLiteral():
- if InArray:
- raise BadExpression(ERR_ARRAY_ELE % self._Token)
- return self._Token
-
- self.__SkipWS()
- Expr = self._Expr[self._Idx:]
- if not Expr.startswith(','):
- return self._Token
-
- NList = self._LiteralToken
- while Expr.startswith(','):
- NList += ','
- self._Idx += 1
- self.__SkipWS()
- self._GetSingleToken()
- if not self.__IsHexLiteral():
- raise BadExpression(ERR_ARRAY_ELE % self._Token)
- NList += self._LiteralToken
- self.__SkipWS()
- Expr = self._Expr[self._Idx:]
- self._Token = self._LiteralToken = NList
- return self._Token
-
- def __IsHexLiteral(self):
- if self._LiteralToken.startswith('{') and \
- self._LiteralToken.endswith('}'):
- return True
-
- if self.HexPattern.match(self._LiteralToken):
- Token = self._LiteralToken[2:]
- Token = Token.lstrip('0')
- if not Token:
- self._LiteralToken = '0x0'
- else:
- self._LiteralToken = '0x' + Token.lower()
- return True
- return False
-
- def _GetToken(self):
- return self.__GetNList()
-
- @staticmethod
- def __IsIdChar(Ch):
- return Ch in '._:' or Ch.isalnum()
-
- # Parse operand
- def _GetSingleToken(self):
- self.__SkipWS()
- Expr = self._Expr[self._Idx:]
- if Expr.startswith('L"'):
- # Skip L
- self._Idx += 1
- UStr = self.__GetString()
- self._Token = 'L"' + UStr + '"'
- return self._Token
-
- self._Token = ''
- if Expr:
- Ch = Expr[0]
- Match = self.RegGuidPattern.match(Expr)
- if Match and not Expr[Match.end():Match.end()+1].isalnum() \
- and Expr[Match.end():Match.end()+1] != '_':
- self._Idx += Match.end()
- self._Token = ValueExpression(GuidStringToGuidStructureString(Expr[0:Match.end()]))(True, self._Depth+1)
- return self._Token
- elif self.__IsIdChar(Ch):
- return self.__GetIdToken()
- elif Ch == '"':
- return self.__GetString()
- elif Ch == '{':
- return self.__GetArray()
- elif Ch == '(' or Ch == ')':
- self._Idx += 1
- self._Token = Ch
- return self._Token
-
- raise BadExpression(ERR_VALID_TOKEN % Expr)
-
- # Parse operator
- def _GetOperator(self):
- self.__SkipWS()
- LegalOpLst = ['&&', '||', '!=', '==', '>=', '<='] + self.NonLetterOpLst
-
- self._Token = ''
- Expr = self._Expr[self._Idx:]
-
- # Reach end of expression
- if not Expr:
- return ''
-
- # Script operator: LT, GT, LE, GE, EQ, NE, and, or, xor, not
- if Expr[0].isalpha():
- return self.__GetIdToken(True)
-
- # Start to get regular operator: +, -, <, > ...
- if Expr[0] not in self.NonLetterOpLst:
- return ''
-
- OpToken = ''
- for Ch in Expr:
- if Ch in self.NonLetterOpLst:
- if '!' == Ch and OpToken:
- break
- self._Idx += 1
- OpToken += Ch
- else:
- break
-
- if OpToken not in LegalOpLst:
- raise BadExpression(ERR_OPERATOR_UNSUPPORT % OpToken)
- self._Token = OpToken
- return OpToken
-
- # Check if current token matches the operators given from OpList
- def _IsOperator(self, OpList):
- Idx = self._Idx
- self._GetOperator()
- if self._Token in OpList:
- if self._Token in self.LogicalOperators:
- self._Token = self.LogicalOperators[self._Token]
- return True
- self._Idx = Idx
- return False
-
-if __name__ == '__main__':
- pass
- while True:
- input = raw_input('Input expr: ')
- if input in 'qQ':
- break
- try:
- print ValueExpression(input)(True)
- print ValueExpression(input)(False)
- except WrnExpression, Ex:
- print Ex.result
- print str(Ex)
- except Exception, Ex:
- print str(Ex)
diff --git a/BaseTools/Source/Python/Common/FdfClassObject.py b/BaseTools/Source/Python/Common/FdfClassObject.py
deleted file mode 100644
index 3e7d44954c..0000000000
--- a/BaseTools/Source/Python/Common/FdfClassObject.py
+++ /dev/null
@@ -1,116 +0,0 @@
-## @file
-# This file is used to define each component of FDF file
-#
-# Copyright (c) 2008, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-##
-# Import Modules
-#
-from FdfParserLite import FdfParser
-from Table.TableFdf import TableFdf
-from CommonDataClass.DataClass import MODEL_FILE_FDF, MODEL_PCD, MODEL_META_DATA_COMPONENT
-from String import NormPath
-
-## FdfObject
-#
-# This class defined basic Fdf object which is used by inheriting
-#
-# @param object: Inherited from object class
-#
-class FdfObject(object):
- def __init__(self):
- object.__init__()
-
-## Fdf
-#
-# This class defined the structure used in Fdf object
-#
-# @param FdfObject: Inherited from FdfObject class
-# @param Filename: Input value for Ffilename of Fdf file, default is None
-# @param WorkspaceDir: Input value for current workspace directory, default is None
-#
-class Fdf(FdfObject):
- def __init__(self, Filename = None, IsToDatabase = False, WorkspaceDir = None, Database = None):
- self.WorkspaceDir = WorkspaceDir
- self.IsToDatabase = IsToDatabase
-
- self.Cur = Database.Cur
- self.TblFile = Database.TblFile
- self.TblFdf = Database.TblFdf
- self.FileID = -1
- self.FileList = {}
-
- #
- # Load Fdf file if filename is not None
- #
- if Filename != None:
- self.LoadFdfFile(Filename)
-
- #
- # Insert a FDF file record into database
- #
- def InsertFile(self, Filename):
- FileID = -1
- Filename = NormPath(Filename)
- if Filename not in self.FileList:
- FileID = self.TblFile.InsertFile(Filename, MODEL_FILE_FDF)
- self.FileList[Filename] = FileID
-
- return self.FileList[Filename]
-
-
- ## Load Fdf file
- #
- # Load the file if it exists
- #
- # @param Filename: Input value for filename of Fdf file
- #
- def LoadFdfFile(self, Filename):
- FileList = []
- #
- # Parse Fdf file
- #
- Filename = NormPath(Filename)
- Fdf = FdfParser(Filename)
- Fdf.ParseFile()
-
- #
- # Insert inf file and pcd information
- #
- if self.IsToDatabase:
- (Model, Value1, Value2, Value3, Arch, BelongsToItem, BelongsToFile, StartLine, StartColumn, EndLine, EndColumn, Enabled) = \
- (0, '', '', '', 'COMMON', -1, -1, -1, -1, -1, -1, 0)
- for Index in range(0, len(Fdf.Profile.PcdDict)):
- pass
- for Key in Fdf.Profile.PcdDict.keys():
- Model = MODEL_PCD
- Value1 = ''
- Value2 = ".".join((Key[1], Key[0]))
- FileName = Fdf.Profile.PcdFileLineDict[Key][0]
- StartLine = Fdf.Profile.PcdFileLineDict[Key][1]
- BelongsToFile = self.InsertFile(FileName)
- self.TblFdf.Insert(Model, Value1, Value2, Value3, Arch, BelongsToItem, BelongsToFile, StartLine, StartColumn, EndLine, EndColumn, Enabled)
- for Index in range(0, len(Fdf.Profile.InfList)):
- Model = MODEL_META_DATA_COMPONENT
- Value1 = Fdf.Profile.InfList[Index]
- Value2 = ''
- FileName = Fdf.Profile.InfFileLineList[Index][0]
- StartLine = Fdf.Profile.InfFileLineList[Index][1]
- BelongsToFile = self.InsertFile(FileName)
- self.TblFdf.Insert(Model, Value1, Value2, Value3, Arch, BelongsToItem, BelongsToFile, StartLine, StartColumn, EndLine, EndColumn, Enabled)
-
-##
-#
-# This acts like the main() function for the script, unless it is 'import'ed into another
-# script.
-#
-if __name__ == '__main__':
- pass
diff --git a/BaseTools/Source/Python/Common/FdfParserLite.py b/BaseTools/Source/Python/Common/FdfParserLite.py
deleted file mode 100644
index a8cce26120..0000000000
--- a/BaseTools/Source/Python/Common/FdfParserLite.py
+++ /dev/null
@@ -1,3668 +0,0 @@
-## @file
-# parse FDF file
-#
-# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
-#
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-##
-# Import Modules
-#
-import re
-import Common.LongFilePathOs as os
-
-import CommonDataClass.FdfClass
-from Common.LongFilePathSupport import OpenLongFilePath as open
-from Common.MultipleWorkspace import MultipleWorkspace as mws
-
-##define T_CHAR_SPACE ' '
-##define T_CHAR_NULL '\0'
-##define T_CHAR_CR '\r'
-##define T_CHAR_TAB '\t'
-##define T_CHAR_LF '\n'
-##define T_CHAR_SLASH '/'
-##define T_CHAR_BACKSLASH '\\'
-##define T_CHAR_DOUBLE_QUOTE '\"'
-##define T_CHAR_SINGLE_QUOTE '\''
-##define T_CHAR_STAR '*'
-##define T_CHAR_HASH '#'
-
-(T_CHAR_SPACE, T_CHAR_NULL, T_CHAR_CR, T_CHAR_TAB, T_CHAR_LF, T_CHAR_SLASH, \
-T_CHAR_BACKSLASH, T_CHAR_DOUBLE_QUOTE, T_CHAR_SINGLE_QUOTE, T_CHAR_STAR, T_CHAR_HASH) = \
-(' ', '\0', '\r', '\t', '\n', '/', '\\', '\"', '\'', '*', '#')
-
-SEPERATOR_TUPLE = ('=', '|', ',', '{', '}')
-
-IncludeFileList = []
-# Macro passed from command line, which has greatest priority and can NOT be overridden by those in FDF
-InputMacroDict = {}
-# All Macro values when parsing file, not replace existing Macro
-AllMacroList = []
-
-def GetRealFileLine (File, Line):
-
- InsertedLines = 0
- for Profile in IncludeFileList:
- if Line >= Profile.InsertStartLineNumber and Line < Profile.InsertStartLineNumber + Profile.InsertAdjust + len(Profile.FileLinesList):
- return (Profile.FileName, Line - Profile.InsertStartLineNumber + 1)
- if Line >= Profile.InsertStartLineNumber + Profile.InsertAdjust + len(Profile.FileLinesList):
- InsertedLines += Profile.InsertAdjust + len(Profile.FileLinesList)
-
- return (File, Line - InsertedLines)
-
-## The exception class that used to report error messages when parsing FDF
-#
-# Currently the "ToolName" is set to be "FDF Parser".
-#
-class Warning (Exception):
- ## The constructor
- #
- # @param self The object pointer
- # @param Str The message to record
- # @param File The FDF name
- # @param Line The Line number that error occurs
- #
- def __init__(self, Str, File=None, Line=None):
-
- FileLineTuple = GetRealFileLine(File, Line)
- self.FileName = FileLineTuple[0]
- self.LineNumber = FileLineTuple[1]
- self.message = Str + str(self.LineNumber)
- self.ToolName = 'FDF Parser'
-
-## The MACRO class that used to record macro value data when parsing include file
-#
-#
-class MacroProfile :
- ## The constructor
- #
- # @param self The object pointer
- # @param FileName The file that to be parsed
- #
- def __init__(self, FileName, Line):
- self.FileName = FileName
- self.DefinedAtLine = Line
- self.MacroName = None
- self.MacroValue = None
-
-## The Include file content class that used to record file data when parsing include file
-#
-# May raise Exception when opening file.
-#
-class IncludeFileProfile :
- ## The constructor
- #
- # @param self The object pointer
- # @param FileName The file that to be parsed
- #
- def __init__(self, FileName):
- self.FileName = FileName
- self.FileLinesList = []
- try:
- fsock = open(FileName, "rb", 0)
- try:
- self.FileLinesList = fsock.readlines()
- finally:
- fsock.close()
-
- except IOError:
- raise Warning("Error when opening file %s" % FileName)
-
- self.InsertStartLineNumber = None
- self.InsertAdjust = 0
-
-## The FDF content class that used to record file data when parsing FDF
-#
-# May raise Exception when opening file.
-#
-class FileProfile :
- ## The constructor
- #
- # @param self The object pointer
- # @param FileName The file that to be parsed
- #
- def __init__(self, FileName):
- self.FileLinesList = []
- try:
- fsock = open(FileName, "rb", 0)
- try:
- self.FileLinesList = fsock.readlines()
- finally:
- fsock.close()
-
- except IOError:
- raise Warning("Error when opening file %s" % FileName)
-
- self.PcdDict = {}
- self.InfList = []
-
- self.PcdFileLineDict = {}
- self.InfFileLineList = []
-
- self.FdDict = {}
- self.FvDict = {}
- self.CapsuleList = []
-# self.VtfList = []
-# self.RuleDict = {}
-
-## The syntax parser for FDF
-#
-# PreprocessFile method should be called prior to ParseFile
-# CycleReferenceCheck method can detect cycles in FDF contents
-#
-# GetNext*** procedures mean these procedures will get next token first, then make judgement.
-# Get*** procedures mean these procedures will make judgement on current token only.
-#
-class FdfParser(object):
- ## The constructor
- #
- # @param self The object pointer
- # @param FileName The file that to be parsed
- #
- def __init__(self, FileName):
- self.Profile = FileProfile(FileName)
- self.FileName = FileName
- self.CurrentLineNumber = 1
- self.CurrentOffsetWithinLine = 0
- self.CurrentFdName = None
- self.CurrentFvName = None
- self.__Token = ""
- self.__SkippedChars = ""
-
- self.__WipeOffArea = []
-
- ## __IsWhiteSpace() method
- #
- # Whether char at current FileBufferPos is whitespace
- #
- # @param self The object pointer
- # @param Char The char to test
- # @retval True The char is a kind of white space
- # @retval False The char is NOT a kind of white space
- #
- def __IsWhiteSpace(self, Char):
- if Char in (T_CHAR_NULL, T_CHAR_CR, T_CHAR_SPACE, T_CHAR_TAB, T_CHAR_LF):
- return True
- else:
- return False
-
- ## __SkipWhiteSpace() method
- #
- # Skip white spaces from current char, return number of chars skipped
- #
- # @param self The object pointer
- # @retval Count The number of chars skipped
- #
- def __SkipWhiteSpace(self):
- Count = 0
- while not self.__EndOfFile():
- Count += 1
- if self.__CurrentChar() in (T_CHAR_NULL, T_CHAR_CR, T_CHAR_LF, T_CHAR_SPACE, T_CHAR_TAB):
- self.__SkippedChars += str(self.__CurrentChar())
- self.__GetOneChar()
-
- else:
- Count = Count - 1
- return Count
-
- ## __EndOfFile() method
- #
- # Judge current buffer pos is at file end
- #
- # @param self The object pointer
- # @retval True Current File buffer position is at file end
- # @retval False Current File buffer position is NOT at file end
- #
- def __EndOfFile(self):
- NumberOfLines = len(self.Profile.FileLinesList)
- SizeOfLastLine = len(self.Profile.FileLinesList[-1])
- if self.CurrentLineNumber == NumberOfLines and self.CurrentOffsetWithinLine >= SizeOfLastLine - 1:
- return True
- elif self.CurrentLineNumber > NumberOfLines:
- return True
- else:
- return False
-
- ## __EndOfLine() method
- #
- # Judge current buffer pos is at line end
- #
- # @param self The object pointer
- # @retval True Current File buffer position is at line end
- # @retval False Current File buffer position is NOT at line end
- #
- def __EndOfLine(self):
- if self.CurrentLineNumber > len(self.Profile.FileLinesList):
- return True
- SizeOfCurrentLine = len(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
- if self.CurrentOffsetWithinLine >= SizeOfCurrentLine:
- return True
- else:
- return False
-
- ## Rewind() method
- #
- # Reset file data buffer to the initial state
- #
- # @param self The object pointer
- #
- def Rewind(self):
- self.CurrentLineNumber = 1
- self.CurrentOffsetWithinLine = 0
-
- ## __UndoOneChar() method
- #
- # Go back one char in the file buffer
- #
- # @param self The object pointer
- # @retval True Successfully go back one char
- # @retval False Not able to go back one char as file beginning reached
- #
- def __UndoOneChar(self):
-
- if self.CurrentLineNumber == 1 and self.CurrentOffsetWithinLine == 0:
- return False
- elif self.CurrentOffsetWithinLine == 0:
- self.CurrentLineNumber -= 1
- self.CurrentOffsetWithinLine = len(self.__CurrentLine()) - 1
- else:
- self.CurrentOffsetWithinLine -= 1
- return True
-
- ## __GetOneChar() method
- #
- # Move forward one char in the file buffer
- #
- # @param self The object pointer
- #
- def __GetOneChar(self):
- if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
- self.CurrentLineNumber += 1
- self.CurrentOffsetWithinLine = 0
- else:
- self.CurrentOffsetWithinLine += 1
-
- ## __CurrentChar() method
- #
- # Get the char pointed to by the file buffer pointer
- #
- # @param self The object pointer
- # @retval Char Current char
- #
- def __CurrentChar(self):
- return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine]
-
- ## __NextChar() method
- #
- # Get the one char pass the char pointed to by the file buffer pointer
- #
- # @param self The object pointer
- # @retval Char Next char
- #
- def __NextChar(self):
- if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
- return self.Profile.FileLinesList[self.CurrentLineNumber][0]
- else:
- return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine + 1]
-
- ## __SetCurrentCharValue() method
- #
- # Modify the value of current char
- #
- # @param self The object pointer
- # @param Value The new value of current char
- #
- def __SetCurrentCharValue(self, Value):
- self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine] = Value
-
- ## __CurrentLine() method
- #
- # Get the list that contains current line contents
- #
- # @param self The object pointer
- # @retval List current line contents
- #
- def __CurrentLine(self):
- return self.Profile.FileLinesList[self.CurrentLineNumber - 1]
-
- def __StringToList(self):
- self.Profile.FileLinesList = [list(s) for s in self.Profile.FileLinesList]
- self.Profile.FileLinesList[-1].append(' ')
-
- def __ReplaceMacros(self, Str, File, Line):
- MacroEnd = 0
- while Str.find('$(', MacroEnd) >= 0:
- MacroStart = Str.find('$(', MacroEnd)
- if Str.find(')', MacroStart) > 0:
- MacroEnd = Str.find(')', MacroStart)
- Name = Str[MacroStart + 2 : MacroEnd]
- Value = None
- if Name in InputMacroDict:
- Value = InputMacroDict[Name]
-
- else:
- for Profile in AllMacroList:
- if Profile.FileName == File and Profile.MacroName == Name and Profile.DefinedAtLine <= Line:
- Value = Profile.MacroValue
-
- if Value != None:
- Str = Str.replace('$(' + Name + ')', Value)
- MacroEnd = MacroStart + len(Value)
-
- else:
- raise Warning("Macro not complete At Line ", self.FileName, self.CurrentLineNumber)
- return Str
-
- def __ReplaceFragment(self, StartPos, EndPos, Value=' '):
- if StartPos[0] == EndPos[0]:
- Offset = StartPos[1]
- while Offset <= EndPos[1]:
- self.Profile.FileLinesList[StartPos[0]][Offset] = Value
- Offset += 1
- return
-
- Offset = StartPos[1]
- while self.Profile.FileLinesList[StartPos[0]][Offset] not in ('\r', '\n'):
- self.Profile.FileLinesList[StartPos[0]][Offset] = Value
- Offset += 1
-
- Line = StartPos[0]
- while Line < EndPos[0]:
- Offset = 0
- while self.Profile.FileLinesList[Line][Offset] not in ('\r', '\n'):
- self.Profile.FileLinesList[Line][Offset] = Value
- Offset += 1
- Line += 1
-
- Offset = 0
- while Offset <= EndPos[1]:
- self.Profile.FileLinesList[EndPos[0]][Offset] = Value
- Offset += 1
-
-
- def __GetMacroName(self):
- if not self.__GetNextToken():
- raise Warning("expected Macro name", self.FileName, self.CurrentLineNumber)
- MacroName = self.__Token
- NotFlag = False
- if MacroName.startswith('!'):
- NotFlag = True
- MacroName = MacroName[1:].strip()
-
- if not MacroName.startswith('$(') or not MacroName.endswith(')'):
- raise Warning("Macro name expected(Please use '$(%(Token)s)' if '%(Token)s' is a macro.)" % {"Token" : MacroName},
- self.FileName, self.CurrentLineNumber)
- MacroName = MacroName[2:-1]
- return MacroName, NotFlag
-
- ## PreprocessFile() method
- #
- # Preprocess file contents, replace comments with spaces.
- # In the end, rewind the file buffer pointer to the beginning
- # BUGBUG: No !include statement processing contained in this procedure
- # !include statement should be expanded at the same FileLinesList[CurrentLineNumber - 1]
- #
- # @param self The object pointer
- #
- def PreprocessFile(self):
-
- self.Rewind()
- InComment = False
- DoubleSlashComment = False
- HashComment = False
- # HashComment in quoted string " " is ignored.
- InString = False
-
- while not self.__EndOfFile():
-
- if self.__CurrentChar() == T_CHAR_DOUBLE_QUOTE and not InComment:
- InString = not InString
- # meet new line, then no longer in a comment for // and '#'
- if self.__CurrentChar() == T_CHAR_LF:
- self.CurrentLineNumber += 1
- self.CurrentOffsetWithinLine = 0
- if InComment and DoubleSlashComment:
- InComment = False
- DoubleSlashComment = False
- if InComment and HashComment:
- InComment = False
- HashComment = False
- # check for */ comment end
- elif InComment and not DoubleSlashComment and not HashComment and self.__CurrentChar() == T_CHAR_STAR and self.__NextChar() == T_CHAR_SLASH:
- self.__SetCurrentCharValue(T_CHAR_SPACE)
- self.__GetOneChar()
- self.__SetCurrentCharValue(T_CHAR_SPACE)
- self.__GetOneChar()
- InComment = False
- # set comments to spaces
- elif InComment:
- self.__SetCurrentCharValue(T_CHAR_SPACE)
- self.__GetOneChar()
- # check for // comment
- elif self.__CurrentChar() == T_CHAR_SLASH and self.__NextChar() == T_CHAR_SLASH and not self.__EndOfLine():
- InComment = True
- DoubleSlashComment = True
- # check for '#' comment
- elif self.__CurrentChar() == T_CHAR_HASH and not self.__EndOfLine() and not InString:
- InComment = True
- HashComment = True
- # check for /* comment start
- elif self.__CurrentChar() == T_CHAR_SLASH and self.__NextChar() == T_CHAR_STAR:
- self.__SetCurrentCharValue( T_CHAR_SPACE)
- self.__GetOneChar()
- self.__SetCurrentCharValue( T_CHAR_SPACE)
- self.__GetOneChar()
- InComment = True
- else:
- self.__GetOneChar()
-
- # restore from ListOfList to ListOfString
- self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
- self.Rewind()
-
- ## PreprocessIncludeFile() method
- #
- # Preprocess file contents, replace !include statements with file contents.
- # In the end, rewind the file buffer pointer to the beginning
- #
- # @param self The object pointer
- #
- def PreprocessIncludeFile(self):
-
- while self.__GetNextToken():
-
- if self.__Token == '!include':
- IncludeLine = self.CurrentLineNumber
- IncludeOffset = self.CurrentOffsetWithinLine - len('!include')
- if not self.__GetNextToken():
- raise Warning("expected include file name At Line ", self.FileName, self.CurrentLineNumber)
- IncFileName = self.__Token
- if not os.path.isabs(IncFileName):
- if IncFileName.startswith('$(WORKSPACE)'):
- Str = mws.handleWsMacro(IncFileName)
- Str = Str.replace('$(WORKSPACE)', os.environ.get('WORKSPACE'))
- if os.path.exists(Str):
- if not os.path.isabs(Str):
- Str = os.path.abspath(Str)
- IncFileName = Str
- else:
- # file is in the same dir with FDF file
- FullFdf = self.FileName
- if not os.path.isabs(self.FileName):
- FullFdf = mws.join(os.environ.get('WORKSPACE'), self.FileName)
-
- IncFileName = os.path.join(os.path.dirname(FullFdf), IncFileName)
-
- if not os.path.exists(os.path.normpath(IncFileName)):
- raise Warning("Include file not exists At Line ", self.FileName, self.CurrentLineNumber)
-
- IncFileProfile = IncludeFileProfile(os.path.normpath(IncFileName))
-
- CurrentLine = self.CurrentLineNumber
- CurrentOffset = self.CurrentOffsetWithinLine
- # list index of the insertion, note that line number is 'CurrentLine + 1'
- InsertAtLine = CurrentLine
- IncFileProfile.InsertStartLineNumber = InsertAtLine + 1
- # deal with remaining portions after "!include filename", if exists.
- if self.__GetNextToken():
- if self.CurrentLineNumber == CurrentLine:
- RemainingLine = self.__CurrentLine()[CurrentOffset:]
- self.Profile.FileLinesList.insert(self.CurrentLineNumber, RemainingLine)
- IncFileProfile.InsertAdjust += 1
- self.CurrentLineNumber += 1
- self.CurrentOffsetWithinLine = 0
-
- for Line in IncFileProfile.FileLinesList:
- self.Profile.FileLinesList.insert(InsertAtLine, Line)
- self.CurrentLineNumber += 1
- InsertAtLine += 1
-
- IncludeFileList.append(IncFileProfile)
-
- # comment out the processed include file statement
- TempList = list(self.Profile.FileLinesList[IncludeLine - 1])
- TempList.insert(IncludeOffset, '#')
- self.Profile.FileLinesList[IncludeLine - 1] = ''.join(TempList)
-
- self.Rewind()
-
- ## PreprocessIncludeFile() method
- #
- # Preprocess file contents, replace !include statements with file contents.
- # In the end, rewind the file buffer pointer to the beginning
- #
- # @param self The object pointer
- #
- def PreprocessConditionalStatement(self):
- # IfList is a stack of if branches with elements of list [Pos, CondSatisfied, BranchDetermined]
- IfList = []
- while self.__GetNextToken():
- if self.__Token == 'DEFINE':
- DefineLine = self.CurrentLineNumber - 1
- DefineOffset = self.CurrentOffsetWithinLine - len('DEFINE')
- if not self.__GetNextToken():
- raise Warning("expected Macro name At Line ", self.FileName, self.CurrentLineNumber)
- Macro = self.__Token
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected value At Line ", self.FileName, self.CurrentLineNumber)
-
- if self.__GetStringData():
- pass
- Value = self.__Token
- if not Macro in InputMacroDict:
- FileLineTuple = GetRealFileLine(self.FileName, DefineLine + 1)
- MacProfile = MacroProfile(FileLineTuple[0], FileLineTuple[1])
- MacProfile.MacroName = Macro
- MacProfile.MacroValue = Value
- AllMacroList.append(MacProfile)
- self.__WipeOffArea.append(((DefineLine, DefineOffset), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
-
- elif self.__Token in ('!ifdef', '!ifndef', '!if'):
- IfStartPos = (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len(self.__Token))
- IfList.append([IfStartPos, None, None])
- CondLabel = self.__Token
-
- MacroName, NotFlag = self.__GetMacroName()
- NotDefineFlag = False
- if CondLabel == '!ifndef':
- NotDefineFlag = True
- if CondLabel == '!ifdef' or CondLabel == '!ifndef':
- if NotFlag:
- raise Warning("'NOT' operation not allowed for Macro name At Line ", self.FileName, self.CurrentLineNumber)
-
- if CondLabel == '!if':
-
- if not self.__GetNextOp():
- raise Warning("expected !endif At Line ", self.FileName, self.CurrentLineNumber)
-
- if self.__Token in ('!=', '==', '>', '<', '>=', '<='):
- Op = self.__Token
- if not self.__GetNextToken():
- raise Warning("expected value At Line ", self.FileName, self.CurrentLineNumber)
- if self.__GetStringData():
- pass
- MacroValue = self.__Token
- ConditionSatisfied = self.__EvaluateConditional(MacroName, IfList[-1][0][0] + 1, Op, MacroValue)
- if NotFlag:
- ConditionSatisfied = not ConditionSatisfied
- BranchDetermined = ConditionSatisfied
- else:
- self.CurrentOffsetWithinLine -= len(self.__Token)
- ConditionSatisfied = self.__EvaluateConditional(MacroName, IfList[-1][0][0] + 1, None, 'Bool')
- if NotFlag:
- ConditionSatisfied = not ConditionSatisfied
- BranchDetermined = ConditionSatisfied
- IfList[-1] = [IfList[-1][0], ConditionSatisfied, BranchDetermined]
- if ConditionSatisfied:
- self.__WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
-
- else:
- ConditionSatisfied = self.__EvaluateConditional(MacroName, IfList[-1][0][0] + 1)
- if NotDefineFlag:
- ConditionSatisfied = not ConditionSatisfied
- BranchDetermined = ConditionSatisfied
- IfList[-1] = [IfList[-1][0], ConditionSatisfied, BranchDetermined]
- if ConditionSatisfied:
- self.__WipeOffArea.append((IfStartPos, (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
-
- elif self.__Token in ('!elseif', '!else'):
- ElseStartPos = (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len(self.__Token))
- if len(IfList) <= 0:
- raise Warning("Missing !if statement At Line ", self.FileName, self.CurrentLineNumber)
- if IfList[-1][1]:
- IfList[-1] = [ElseStartPos, False, True]
- self.__WipeOffArea.append((ElseStartPos, (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
- else:
- self.__WipeOffArea.append((IfList[-1][0], ElseStartPos))
- IfList[-1] = [ElseStartPos, True, IfList[-1][2]]
- if self.__Token == '!elseif':
- MacroName, NotFlag = self.__GetMacroName()
- if not self.__GetNextOp():
- raise Warning("expected !endif At Line ", self.FileName, self.CurrentLineNumber)
-
- if self.__Token in ('!=', '==', '>', '<', '>=', '<='):
- Op = self.__Token
- if not self.__GetNextToken():
- raise Warning("expected value At Line ", self.FileName, self.CurrentLineNumber)
- if self.__GetStringData():
- pass
- MacroValue = self.__Token
- ConditionSatisfied = self.__EvaluateConditional(MacroName, IfList[-1][0][0] + 1, Op, MacroValue)
- if NotFlag:
- ConditionSatisfied = not ConditionSatisfied
-
- else:
- self.CurrentOffsetWithinLine -= len(self.__Token)
- ConditionSatisfied = self.__EvaluateConditional(MacroName, IfList[-1][0][0] + 1, None, 'Bool')
- if NotFlag:
- ConditionSatisfied = not ConditionSatisfied
-
- IfList[-1] = [IfList[-1][0], ConditionSatisfied, IfList[-1][2]]
-
- if IfList[-1][1]:
- if IfList[-1][2]:
- IfList[-1][1] = False
- else:
- IfList[-1][2] = True
- self.__WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
-
-
- elif self.__Token == '!endif':
- if IfList[-1][1]:
- self.__WipeOffArea.append(((self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len('!endif')), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
- else:
- self.__WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
-
- IfList.pop()
-
-
- if len(IfList) > 0:
- raise Warning("Missing !endif At Line ", self.FileName, self.CurrentLineNumber)
- self.Rewind()
-
- def __EvaluateConditional(self, Name, Line, Op = None, Value = None):
-
- FileLineTuple = GetRealFileLine(self.FileName, Line)
- if Name in InputMacroDict:
- MacroValue = InputMacroDict[Name]
- if Op == None:
- if Value == 'Bool' and MacroValue == None or MacroValue.upper() == 'FALSE':
- return False
- return True
- elif Op == '!=':
- if Value != MacroValue:
- return True
- else:
- return False
- elif Op == '==':
- if Value == MacroValue:
- return True
- else:
- return False
- else:
- if (self.__IsHex(Value) or Value.isdigit()) and (self.__IsHex(MacroValue) or (MacroValue != None and MacroValue.isdigit())):
- InputVal = long(Value, 0)
- MacroVal = long(MacroValue, 0)
- if Op == '>':
- if MacroVal > InputVal:
- return True
- else:
- return False
- elif Op == '>=':
- if MacroVal >= InputVal:
- return True
- else:
- return False
- elif Op == '<':
- if MacroVal < InputVal:
- return True
- else:
- return False
- elif Op == '<=':
- if MacroVal <= InputVal:
- return True
- else:
- return False
- else:
- return False
- else:
- raise Warning("Value %s is not a number At Line ", self.FileName, Line)
-
- for Profile in AllMacroList:
- if Profile.FileName == FileLineTuple[0] and Profile.MacroName == Name and Profile.DefinedAtLine <= FileLineTuple[1]:
- if Op == None:
- if Value == 'Bool' and Profile.MacroValue == None or Profile.MacroValue.upper() == 'FALSE':
- return False
- return True
- elif Op == '!=':
- if Value != Profile.MacroValue:
- return True
- else:
- return False
- elif Op == '==':
- if Value == Profile.MacroValue:
- return True
- else:
- return False
- else:
- if (self.__IsHex(Value) or Value.isdigit()) and (self.__IsHex(Profile.MacroValue) or (Profile.MacroValue != None and Profile.MacroValue.isdigit())):
- InputVal = long(Value, 0)
- MacroVal = long(Profile.MacroValue, 0)
- if Op == '>':
- if MacroVal > InputVal:
- return True
- else:
- return False
- elif Op == '>=':
- if MacroVal >= InputVal:
- return True
- else:
- return False
- elif Op == '<':
- if MacroVal < InputVal:
- return True
- else:
- return False
- elif Op == '<=':
- if MacroVal <= InputVal:
- return True
- else:
- return False
- else:
- return False
- else:
- raise Warning("Value %s is not a number At Line ", self.FileName, Line)
-
- return False
-
- ## __IsToken() method
- #
- # Check whether input string is found from current char position along
- # If found, the string value is put into self.__Token
- #
- # @param self The object pointer
- # @param String The string to search
- # @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
- # @retval True Successfully find string, file buffer pointer moved forward
- # @retval False Not able to find string, file buffer pointer not changed
- #
- def __IsToken(self, String, IgnoreCase = False):
- self.__SkipWhiteSpace()
-
- # Only consider the same line, no multi-line token allowed
- StartPos = self.CurrentOffsetWithinLine
- index = -1
- if IgnoreCase:
- index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].upper().find(String.upper())
- else:
- index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].find(String)
- if index == 0:
- self.CurrentOffsetWithinLine += len(String)
- self.__Token = self.__CurrentLine()[StartPos : self.CurrentOffsetWithinLine]
- return True
- return False
-
- ## __IsKeyword() method
- #
- # Check whether input keyword is found from current char position along, whole word only!
- # If found, the string value is put into self.__Token
- #
- # @param self The object pointer
- # @param Keyword The string to search
- # @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
- # @retval True Successfully find string, file buffer pointer moved forward
- # @retval False Not able to find string, file buffer pointer not changed
- #
- def __IsKeyword(self, KeyWord, IgnoreCase = False):
- self.__SkipWhiteSpace()
-
- # Only consider the same line, no multi-line token allowed
- StartPos = self.CurrentOffsetWithinLine
- index = -1
- if IgnoreCase:
- index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].upper().find(KeyWord.upper())
- else:
- index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].find(KeyWord)
- if index == 0:
- followingChar = self.__CurrentLine()[self.CurrentOffsetWithinLine + len(KeyWord)]
- if not str(followingChar).isspace() and followingChar not in SEPERATOR_TUPLE:
- return False
- self.CurrentOffsetWithinLine += len(KeyWord)
- self.__Token = self.__CurrentLine()[StartPos : self.CurrentOffsetWithinLine]
- return True
- return False
-
- ## __GetNextWord() method
- #
- # Get next C name from file lines
- # If found, the string value is put into self.__Token
- #
- # @param self The object pointer
- # @retval True Successfully find a C name string, file buffer pointer moved forward
- # @retval False Not able to find a C name string, file buffer pointer not changed
- #
- def __GetNextWord(self):
- self.__SkipWhiteSpace()
- if self.__EndOfFile():
- return False
-
- TempChar = self.__CurrentChar()
- StartPos = self.CurrentOffsetWithinLine
- if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') or TempChar == '_':
- self.__GetOneChar()
- while not self.__EndOfLine():
- TempChar = self.__CurrentChar()
- if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') \
- or (TempChar >= '0' and TempChar <= '9') or TempChar == '_' or TempChar == '-':
- self.__GetOneChar()
-
- else:
- break
-
- self.__Token = self.__CurrentLine()[StartPos : self.CurrentOffsetWithinLine]
- return True
-
- return False
-
- ## __GetNextToken() method
- #
- # Get next token unit before a seperator
- # If found, the string value is put into self.__Token
- #
- # @param self The object pointer
- # @retval True Successfully find a token unit, file buffer pointer moved forward
- # @retval False Not able to find a token unit, file buffer pointer not changed
- #
- def __GetNextToken(self):
- # Skip leading spaces, if exist.
- self.__SkipWhiteSpace()
- if self.__EndOfFile():
- return False
- # Record the token start position, the position of the first non-space char.
- StartPos = self.CurrentOffsetWithinLine
- StartLine = self.CurrentLineNumber
- while not self.__EndOfLine():
- TempChar = self.__CurrentChar()
- # Try to find the end char that is not a space and not in seperator tuple.
- # That is, when we got a space or any char in the tuple, we got the end of token.
- if not str(TempChar).isspace() and TempChar not in SEPERATOR_TUPLE:
- self.__GetOneChar()
- # if we happen to meet a seperator as the first char, we must proceed to get it.
- # That is, we get a token that is a seperator char. nomally it is the boundary of other tokens.
- elif StartPos == self.CurrentOffsetWithinLine and TempChar in SEPERATOR_TUPLE:
- self.__GetOneChar()
- break
- else:
- break
-# else:
-# return False
-
- EndPos = self.CurrentOffsetWithinLine
- if self.CurrentLineNumber != StartLine:
- EndPos = len(self.Profile.FileLinesList[StartLine-1])
- self.__Token = self.Profile.FileLinesList[StartLine-1][StartPos : EndPos]
- if StartPos != self.CurrentOffsetWithinLine:
- return True
- else:
- return False
-
- def __GetNextOp(self):
- # Skip leading spaces, if exist.
- self.__SkipWhiteSpace()
- if self.__EndOfFile():
- return False
- # Record the token start position, the position of the first non-space char.
- StartPos = self.CurrentOffsetWithinLine
- while not self.__EndOfLine():
- TempChar = self.__CurrentChar()
- # Try to find the end char that is not a space
- if not str(TempChar).isspace():
- self.__GetOneChar()
- else:
- break
- else:
- return False
-
- if StartPos != self.CurrentOffsetWithinLine:
- self.__Token = self.__CurrentLine()[StartPos : self.CurrentOffsetWithinLine]
- return True
- else:
- return False
- ## __GetNextGuid() method
- #
- # Get next token unit before a seperator
- # If found, the GUID string is put into self.__Token
- #
- # @param self The object pointer
- # @retval True Successfully find a registry format GUID, file buffer pointer moved forward
- # @retval False Not able to find a registry format GUID, file buffer pointer not changed
- #
- def __GetNextGuid(self):
-
- if not self.__GetNextToken():
- return False
- p = re.compile('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')
- if p.match(self.__Token) != None:
- return True
- else:
- self.__UndoToken()
- return False
-
- ## __UndoToken() method
- #
- # Go back one token unit in file buffer
- #
- # @param self The object pointer
- #
- def __UndoToken(self):
- self.__UndoOneChar()
- while self.__CurrentChar().isspace():
- if not self.__UndoOneChar():
- self.__GetOneChar()
- return
-
-
- StartPos = self.CurrentOffsetWithinLine
- CurrentLine = self.CurrentLineNumber
- while CurrentLine == self.CurrentLineNumber:
-
- TempChar = self.__CurrentChar()
- # Try to find the end char that is not a space and not in seperator tuple.
- # That is, when we got a space or any char in the tuple, we got the end of token.
- if not str(TempChar).isspace() and not TempChar in SEPERATOR_TUPLE:
- if not self.__UndoOneChar():
- break
- # if we happen to meet a seperator as the first char, we must proceed to get it.
- # That is, we get a token that is a seperator char. nomally it is the boundary of other tokens.
- elif StartPos == self.CurrentOffsetWithinLine and TempChar in SEPERATOR_TUPLE:
- return
- else:
- break
-
- self.__GetOneChar()
-
- ## __HexDigit() method
- #
- # Whether char input is a Hex data bit
- #
- # @param self The object pointer
- # @param TempChar The char to test
- # @retval True The char is a Hex data bit
- # @retval False The char is NOT a Hex data bit
- #
- def __HexDigit(self, TempChar):
- if (TempChar >= 'a' and TempChar <= 'f') or (TempChar >= 'A' and TempChar <= 'F') \
- or (TempChar >= '0' and TempChar <= '9'):
- return True
- else:
- return False
-
- def __IsHex(self, HexStr):
- if not HexStr.upper().startswith("0X"):
- return False
- if len(self.__Token) <= 2:
- return False
- charList = [c for c in HexStr[2 : ] if not self.__HexDigit( c)]
- if len(charList) == 0:
- return True
- else:
- return False
- ## __GetNextHexNumber() method
- #
- # Get next HEX data before a seperator
- # If found, the HEX data is put into self.__Token
- #
- # @param self The object pointer
- # @retval True Successfully find a HEX data, file buffer pointer moved forward
- # @retval False Not able to find a HEX data, file buffer pointer not changed
- #
- def __GetNextHexNumber(self):
- if not self.__GetNextToken():
- return False
- if self.__IsHex(self.__Token):
- return True
- else:
- self.__UndoToken()
- return False
-
- ## __GetNextDecimalNumber() method
- #
- # Get next decimal data before a seperator
- # If found, the decimal data is put into self.__Token
- #
- # @param self The object pointer
- # @retval True Successfully find a decimal data, file buffer pointer moved forward
- # @retval False Not able to find a decimal data, file buffer pointer not changed
- #
- def __GetNextDecimalNumber(self):
- if not self.__GetNextToken():
- return False
- if self.__Token.isdigit():
- return True
- else:
- self.__UndoToken()
- return False
-
- ## __GetNextPcdName() method
- #
- # Get next PCD token space C name and PCD C name pair before a seperator
- # If found, the decimal data is put into self.__Token
- #
- # @param self The object pointer
- # @retval Tuple PCD C name and PCD token space C name pair
- #
- def __GetNextPcdName(self):
- if not self.__GetNextWord():
- raise Warning("expected PcdTokenSpaceCName.PcdCName At Line ", self.FileName, self.CurrentLineNumber)
- pcdTokenSpaceCName = self.__Token
-
- if not self.__IsToken( "."):
- raise Warning("expected PcdTokenSpaceCName.PcdCName At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextWord():
- raise Warning("expected PcdTokenSpaceCName.PcdCName At Line ", self.FileName, self.CurrentLineNumber)
- pcdCName = self.__Token
-
- return (pcdCName, pcdTokenSpaceCName)
-
- ## __GetStringData() method
- #
- # Get string contents quoted in ""
- # If found, the decimal data is put into self.__Token
- #
- # @param self The object pointer
- # @retval True Successfully find a string data, file buffer pointer moved forward
- # @retval False Not able to find a string data, file buffer pointer not changed
- #
- def __GetStringData(self):
- if self.__Token.startswith("\"") or self.__Token.startswith("L\""):
- self.__UndoToken()
- self.__SkipToToken("\"")
- currentLineNumber = self.CurrentLineNumber
-
- if not self.__SkipToToken("\""):
- raise Warning("Missing Quote \" for String At Line ", self.FileName, self.CurrentLineNumber)
- if currentLineNumber != self.CurrentLineNumber:
- raise Warning("Missing Quote \" for String At Line ", self.FileName, self.CurrentLineNumber)
- self.__Token = self.__SkippedChars.rstrip('\"')
- return True
-
- elif self.__Token.startswith("\'") or self.__Token.startswith("L\'"):
- self.__UndoToken()
- self.__SkipToToken("\'")
- currentLineNumber = self.CurrentLineNumber
-
- if not self.__SkipToToken("\'"):
- raise Warning("Missing Quote \' for String At Line ", self.FileName, self.CurrentLineNumber)
- if currentLineNumber != self.CurrentLineNumber:
- raise Warning("Missing Quote \' for String At Line ", self.FileName, self.CurrentLineNumber)
- self.__Token = self.__SkippedChars.rstrip('\'')
- return True
-
- else:
- return False
-
- ## __SkipToToken() method
- #
- # Search forward in file buffer for the string
- # The skipped chars are put into self.__SkippedChars
- #
- # @param self The object pointer
- # @param String The string to search
- # @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
- # @retval True Successfully find the string, file buffer pointer moved forward
- # @retval False Not able to find the string, file buffer pointer not changed
- #
- def __SkipToToken(self, String, IgnoreCase = False):
- StartPos = self.GetFileBufferPos()
-
- self.__SkippedChars = ""
- while not self.__EndOfFile():
- index = -1
- if IgnoreCase:
- index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].upper().find(String.upper())
- else:
- index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].find(String)
- if index == 0:
- self.CurrentOffsetWithinLine += len(String)
- self.__SkippedChars += String
- return True
- self.__SkippedChars += str(self.__CurrentChar())
- self.__GetOneChar()
-
- self.SetFileBufferPos( StartPos)
- self.__SkippedChars = ""
- return False
-
- ## GetFileBufferPos() method
- #
- # Return the tuple of current line and offset within the line
- #
- # @param self The object pointer
- # @retval Tuple Line number and offset pair
- #
- def GetFileBufferPos(self):
- return (self.CurrentLineNumber, self.CurrentOffsetWithinLine)
-
- ## SetFileBufferPos() method
- #
- # Restore the file buffer position
- #
- # @param self The object pointer
- # @param Pos The new file buffer position
- #
- def SetFileBufferPos(self, Pos):
- (self.CurrentLineNumber, self.CurrentOffsetWithinLine) = Pos
-
- ## ParseFile() method
- #
- # Parse the file profile buffer to extract fd, fv ... information
- # Exception will be raised if syntax error found
- #
- # @param self The object pointer
- #
- def ParseFile(self):
-
- try:
- self.__StringToList()
- self.PreprocessFile()
- self.PreprocessIncludeFile()
- self.__StringToList()
- self.PreprocessFile()
- self.PreprocessConditionalStatement()
- self.__StringToList()
- for Pos in self.__WipeOffArea:
- self.__ReplaceFragment(Pos[0], Pos[1])
- self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
-
- while self.__GetDefines():
- pass
-
- Index = 0
- while Index < len(self.Profile.FileLinesList):
- FileLineTuple = GetRealFileLine(self.FileName, Index + 1)
- self.Profile.FileLinesList[Index] = self.__ReplaceMacros(self.Profile.FileLinesList[Index], FileLineTuple[0], FileLineTuple[1])
- Index += 1
-
- while self.__GetFd():
- pass
-
- while self.__GetFv():
- pass
-
- while self.__GetCapsule():
- pass
-
-# while self.__GetVtf():
-# pass
-#
-# while self.__GetRule():
-# pass
-
-
- except Warning, X:
- self.__UndoToken()
- FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
- X.message += '\nGot Token: \"%s\" from File %s\n' % (self.__Token, FileLineTuple[0]) + \
- 'Previous Token: \"%s\" At line: %d, Offset Within Line: %d\n' \
- % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :].rstrip('\n').rstrip('\r'), FileLineTuple[1], self.CurrentOffsetWithinLine)
- raise
-
- ## __GetDefines() method
- #
- # Get Defines section contents and store its data into AllMacrosList
- #
- # @param self The object pointer
- # @retval True Successfully find a Defines
- # @retval False Not able to find a Defines
- #
- def __GetDefines(self):
-
- if not self.__GetNextToken():
- return False
-
- S = self.__Token.upper()
- if S.startswith("[") and not S.startswith("[DEFINES"):
- if not S.startswith("[FD.") and not S.startswith("[FV.") and not S.startswith("[CAPSULE.") \
- and not S.startswith("[VTF.") and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
- raise Warning("Unknown section or section appear sequence error (The correct sequence should be [DEFINES], [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
- self.__UndoToken()
- return False
-
- self.__UndoToken()
- if not self.__IsToken("[DEFINES", True):
- FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
- #print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
- # % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
- raise Warning("expected [DEFINES", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken( "]"):
- raise Warning("expected ']'", self.FileName, self.CurrentLineNumber)
-
- while self.__GetNextWord():
- Macro = self.__Token
-
- if not self.__IsToken("="):
- raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
- if not self.__GetNextToken() or self.__Token.startswith('['):
- raise Warning("expected MACRO value", self.FileName, self.CurrentLineNumber)
- Value = self.__Token
- FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
- MacProfile = MacroProfile(FileLineTuple[0], FileLineTuple[1])
- MacProfile.MacroName = Macro
- MacProfile.MacroValue = Value
- AllMacroList.append(MacProfile)
-
- return False
-
- ## __GetFd() method
- #
- # Get FD section contents and store its data into FD dictionary of self.Profile
- #
- # @param self The object pointer
- # @retval True Successfully find a FD
- # @retval False Not able to find a FD
- #
- def __GetFd(self):
-
- if not self.__GetNextToken():
- return False
-
- S = self.__Token.upper()
- if S.startswith("[") and not S.startswith("[FD."):
- if not S.startswith("[FV.") and not S.startswith("[CAPSULE.") \
- and not S.startswith("[VTF.") and not S.startswith("[RULE."):
- raise Warning("Unknown section At Line ", self.FileName, self.CurrentLineNumber)
- self.__UndoToken()
- return False
-
- self.__UndoToken()
- if not self.__IsToken("[FD.", True):
- FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
- print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
- % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
- raise Warning("expected [FD.] At Line ", self.FileName, self.CurrentLineNumber)
-
- FdName = self.__GetUiName()
- self.CurrentFdName = FdName.upper()
-
- if not self.__IsToken( "]"):
- raise Warning("expected ']' At Line ", self.FileName, self.CurrentLineNumber)
-
- FdObj = CommonDataClass.FdfClass.FDClassObject()
- FdObj.FdUiName = self.CurrentFdName
- self.Profile.FdDict[self.CurrentFdName] = FdObj
- Status = self.__GetCreateFile(FdObj)
- if not Status:
- raise Warning("FD name error At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetTokenStatements(FdObj):
- return False
-
- self.__GetDefineStatements(FdObj)
-
- self.__GetSetStatements(FdObj)
-
- if not self.__GetRegionLayout(FdObj):
- raise Warning("expected region layout At Line ", self.FileName, self.CurrentLineNumber)
-
- while self.__GetRegionLayout(FdObj):
- pass
- return True
-
- ## __GetUiName() method
- #
- # Return the UI name of a section
- #
- # @param self The object pointer
- # @retval FdName UI name
- #
- def __GetUiName(self):
- FdName = ""
- if self.__GetNextWord():
- FdName = self.__Token
-
- return FdName
-
- ## __GetCreateFile() method
- #
- # Return the output file name of object
- #
- # @param self The object pointer
- # @param Obj object whose data will be stored in file
- # @retval FdName UI name
- #
- def __GetCreateFile(self, Obj):
-
- if self.__IsKeyword( "CREATE_FILE"):
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected file name At Line ", self.FileName, self.CurrentLineNumber)
-
- FileName = self.__Token
- Obj.CreateFileName = FileName
-
- return True
-
- ## __GetTokenStatements() method
- #
- # Get token statements
- #
- # @param self The object pointer
- # @param Obj for whom token statement is got
- # @retval True Successfully find a token statement
- # @retval False Not able to find a token statement
- #
- def __GetTokenStatements(self, Obj):
- if not self.__IsKeyword( "BaseAddress"):
- raise Warning("BaseAddress missing At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextHexNumber():
- raise Warning("expected Hex base address At Line ", self.FileName, self.CurrentLineNumber)
-
- Obj.BaseAddress = self.__Token
-
- if self.__IsToken( "|"):
- pcdPair = self.__GetNextPcdName()
- Obj.BaseAddressPcd = pcdPair
- self.Profile.PcdDict[pcdPair] = long(Obj.BaseAddress, 0)
- FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
- self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
-
- if not self.__IsKeyword( "Size"):
- raise Warning("Size missing At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextHexNumber():
- raise Warning("expected Hex size At Line ", self.FileName, self.CurrentLineNumber)
-
-
- Obj.Size = long(self.__Token, 0)
-
- if self.__IsToken( "|"):
- pcdPair = self.__GetNextPcdName()
- Obj.SizePcd = pcdPair
- self.Profile.PcdDict[pcdPair] = Obj.Size
- FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
- self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
-
- if not self.__IsKeyword( "ErasePolarity"):
- raise Warning("ErasePolarity missing At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected Erase Polarity At Line ", self.FileName, self.CurrentLineNumber)
-
- if self.__Token != "1" and self.__Token != "0":
- raise Warning("expected 1 or 0 Erase Polarity At Line ", self.FileName, self.CurrentLineNumber)
-
- Obj.ErasePolarity = self.__Token
-
- Status = self.__GetBlockStatements(Obj)
- return Status
-
- ## __GetAddressStatements() method
- #
- # Get address statements
- #
- # @param self The object pointer
- # @param Obj for whom address statement is got
- # @retval True Successfully find
- # @retval False Not able to find
- #
- def __GetAddressStatements(self, Obj):
-
- if self.__IsKeyword("BsBaseAddress"):
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextDecimalNumber() and not self.__GetNextHexNumber():
- raise Warning("expected address At Line ", self.FileName, self.CurrentLineNumber)
-
- BsAddress = long(self.__Token, 0)
- Obj.BsBaseAddress = BsAddress
-
- if self.__IsKeyword("RtBaseAddress"):
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextDecimalNumber() and not self.__GetNextHexNumber():
- raise Warning("expected address At Line ", self.FileName, self.CurrentLineNumber)
-
- RtAddress = long(self.__Token, 0)
- Obj.RtBaseAddress = RtAddress
-
- ## __GetBlockStatements() method
- #
- # Get block statements
- #
- # @param self The object pointer
- # @param Obj for whom block statement is got
- # @retval True Successfully find
- # @retval False Not able to find
- #
- def __GetBlockStatements(self, Obj):
-
- if not self.__GetBlockStatement(Obj):
- #set default block size is 1
- Obj.BlockSizeList.append((1, Obj.Size, None))
- return True
-
- while self.__GetBlockStatement(Obj):
- pass
-
- for Item in Obj.BlockSizeList:
- if Item[0] == None or Item[1] == None:
- raise Warning("expected block statement for Fd Section", self.FileName, self.CurrentLineNumber)
-
- return True
-
- ## __GetBlockStatement() method
- #
- # Get block statement
- #
- # @param self The object pointer
- # @param Obj for whom block statement is got
- # @retval True Successfully find
- # @retval False Not able to find
- #
- def __GetBlockStatement(self, Obj):
- if not self.__IsKeyword( "BlockSize"):
- return False
-
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextHexNumber() and not self.__GetNextDecimalNumber():
- raise Warning("expected Hex block size At Line ", self.FileName, self.CurrentLineNumber)
-
- BlockSize = long(self.__Token, 0)
- BlockSizePcd = None
- if self.__IsToken( "|"):
- PcdPair = self.__GetNextPcdName()
- BlockSizePcd = PcdPair
- self.Profile.PcdDict[PcdPair] = BlockSize
- FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
- self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
-
- BlockNumber = None
- if self.__IsKeyword( "NumBlocks"):
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextDecimalNumber() and not self.__GetNextHexNumber():
- raise Warning("expected block numbers At Line ", self.FileName, self.CurrentLineNumber)
-
- BlockNumber = long(self.__Token, 0)
-
- Obj.BlockSizeList.append((BlockSize, BlockNumber, BlockSizePcd))
- return True
-
- ## __GetDefineStatements() method
- #
- # Get define statements
- #
- # @param self The object pointer
- # @param Obj for whom define statement is got
- # @retval True Successfully find
- # @retval False Not able to find
- #
- def __GetDefineStatements(self, Obj):
- while self.__GetDefineStatement( Obj):
- pass
-
- ## __GetDefineStatement() method
- #
- # Get define statement
- #
- # @param self The object pointer
- # @param Obj for whom define statement is got
- # @retval True Successfully find
- # @retval False Not able to find
- #
- def __GetDefineStatement(self, Obj):
- if self.__IsKeyword("DEFINE"):
- self.__GetNextToken()
- Macro = self.__Token
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected value At Line ", self.FileName, self.CurrentLineNumber)
-
- Value = self.__Token
- Macro = '$(' + Macro + ')'
- Obj.DefineVarDict[Macro] = Value
- return True
-
- return False
-
- ## __GetSetStatements() method
- #
- # Get set statements
- #
- # @param self The object pointer
- # @param Obj for whom set statement is got
- # @retval True Successfully find
- # @retval False Not able to find
- #
- def __GetSetStatements(self, Obj):
- while self.__GetSetStatement(Obj):
- pass
-
- ## __GetSetStatement() method
- #
- # Get set statement
- #
- # @param self The object pointer
- # @param Obj for whom set statement is got
- # @retval True Successfully find
- # @retval False Not able to find
- #
- def __GetSetStatement(self, Obj):
- if self.__IsKeyword("SET"):
- PcdPair = self.__GetNextPcdName()
-
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected value At Line ", self.FileName, self.CurrentLineNumber)
-
- Value = self.__Token
- if Value.startswith("{"):
- # deal with value with {}
- if not self.__SkipToToken( "}"):
- raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
- Value += self.__SkippedChars
-
- Obj.SetVarDict[PcdPair] = Value
- self.Profile.PcdDict[PcdPair] = Value
- FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
- self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
- return True
-
- return False
-
- ## __GetRegionLayout() method
- #
- # Get region layout for FD
- #
- # @param self The object pointer
- # @param Fd for whom region is got
- # @retval True Successfully find
- # @retval False Not able to find
- #
- def __GetRegionLayout(self, Fd):
- if not self.__GetNextHexNumber():
- return False
-
- RegionObj = CommonDataClass.FdfClass.RegionClassObject()
- RegionObj.Offset = long(self.__Token, 0)
- Fd.RegionList.append(RegionObj)
-
- if not self.__IsToken( "|"):
- raise Warning("expected '|' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextHexNumber():
- raise Warning("expected Region Size At Line ", self.FileName, self.CurrentLineNumber)
- RegionObj.Size = long(self.__Token, 0)
-
- if not self.__GetNextWord():
- return True
-
- if not self.__Token in ("SET", "FV", "FILE", "DATA", "CAPSULE"):
- self.__UndoToken()
- RegionObj.PcdOffset = self.__GetNextPcdName()
- self.Profile.PcdDict[RegionObj.PcdOffset] = RegionObj.Offset + long(Fd.BaseAddress, 0)
- FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
- self.Profile.PcdFileLineDict[RegionObj.PcdOffset] = FileLineTuple
- if self.__IsToken( "|"):
- RegionObj.PcdSize = self.__GetNextPcdName()
- self.Profile.PcdDict[RegionObj.PcdSize] = RegionObj.Size
- FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
- self.Profile.PcdFileLineDict[RegionObj.PcdSize] = FileLineTuple
-
- if not self.__GetNextWord():
- return True
-
- if self.__Token == "SET":
- self.__UndoToken()
- self.__GetSetStatements( RegionObj)
- if not self.__GetNextWord():
- return True
-
- elif self.__Token == "FV":
- self.__UndoToken()
- self.__GetRegionFvType( RegionObj)
-
- elif self.__Token == "CAPSULE":
- self.__UndoToken()
- self.__GetRegionCapType( RegionObj)
-
- elif self.__Token == "FILE":
- self.__UndoToken()
- self.__GetRegionFileType( RegionObj)
-
- else:
- self.__UndoToken()
- self.__GetRegionDataType( RegionObj)
-
- return True
-
- ## __GetRegionFvType() method
- #
- # Get region fv data for region
- #
- # @param self The object pointer
- # @param RegionObj for whom region data is got
- #
- def __GetRegionFvType(self, RegionObj):
-
- if not self.__IsKeyword( "FV"):
- raise Warning("expected Keyword 'FV' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected FV name At Line ", self.FileName, self.CurrentLineNumber)
-
- RegionObj.RegionType = "FV"
- RegionObj.RegionDataList.append(self.__Token)
-
- while self.__IsKeyword( "FV"):
-
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected FV name At Line ", self.FileName, self.CurrentLineNumber)
-
- RegionObj.RegionDataList.append(self.__Token)
-
- ## __GetRegionCapType() method
- #
- # Get region capsule data for region
- #
- # @param self The object pointer
- # @param RegionObj for whom region data is got
- #
- def __GetRegionCapType(self, RegionObj):
-
- if not self.__IsKeyword("CAPSULE"):
- raise Warning("expected Keyword 'CAPSULE' at line", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken("="):
- raise Warning("expected '=' at line", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected CAPSULE name at line", self.FileName, self.CurrentLineNumber)
-
- RegionObj.RegionType = "CAPSULE"
- RegionObj.RegionDataList.append(self.__Token)
-
- while self.__IsKeyword("CAPSULE"):
-
- if not self.__IsToken("="):
- raise Warning("expected '=' at line", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected CAPSULE name at line", self.FileName, self.CurrentLineNumber)
-
- RegionObj.RegionDataList.append(self.__Token)
-
- ## __GetRegionFileType() method
- #
- # Get region file data for region
- #
- # @param self The object pointer
- # @param RegionObj for whom region data is got
- #
- def __GetRegionFileType(self, RegionObj):
-
- if not self.__IsKeyword( "FILE"):
- raise Warning("expected Keyword 'FILE' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected File name At Line ", self.FileName, self.CurrentLineNumber)
-
- RegionObj.RegionType = "FILE"
- RegionObj.RegionDataList.append( self.__Token)
-
- while self.__IsKeyword( "FILE"):
-
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected FILE name At Line ", self.FileName, self.CurrentLineNumber)
-
- RegionObj.RegionDataList.append(self.__Token)
-
- ## __GetRegionDataType() method
- #
- # Get region array data for region
- #
- # @param self The object pointer
- # @param RegionObj for whom region data is got
- #
- def __GetRegionDataType(self, RegionObj):
-
- if not self.__IsKeyword( "DATA"):
- raise Warning("expected Region Data type At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken( "{"):
- raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextHexNumber():
- raise Warning("expected Hex byte At Line ", self.FileName, self.CurrentLineNumber)
-
- if len(self.__Token) > 18:
- raise Warning("Hex string can't be converted to a valid UINT64 value", self.FileName, self.CurrentLineNumber)
-
- DataString = self.__Token
- DataString += ","
-
- while self.__IsToken(","):
- if not self.__GetNextHexNumber():
- raise Warning("Invalid Hex number At Line ", self.FileName, self.CurrentLineNumber)
- if len(self.__Token) > 4:
- raise Warning("Hex byte(must be 2 digits) too long At Line ", self.FileName, self.CurrentLineNumber)
- DataString += self.__Token
- DataString += ","
-
- if not self.__IsToken( "}"):
- raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
-
- DataString = DataString.rstrip(",")
- RegionObj.RegionType = "DATA"
- RegionObj.RegionDataList.append( DataString)
-
- while self.__IsKeyword( "DATA"):
-
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken( "{"):
- raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextHexNumber():
- raise Warning("expected Hex byte At Line ", self.FileName, self.CurrentLineNumber)
-
- if len(self.__Token) > 18:
- raise Warning("Hex string can't be converted to a valid UINT64 value", self.FileName, self.CurrentLineNumber)
-
- DataString = self.__Token
- DataString += ","
-
- while self.__IsToken(","):
- self.__GetNextHexNumber()
- if len(self.__Token) > 4:
- raise Warning("Hex byte(must be 2 digits) too long At Line ", self.FileName, self.CurrentLineNumber)
- DataString += self.__Token
- DataString += ","
-
- if not self.__IsToken( "}"):
- raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
-
- DataString = DataString.rstrip(",")
- RegionObj.RegionDataList.append( DataString)
-
- ## __GetFv() method
- #
- # Get FV section contents and store its data into FV dictionary of self.Profile
- #
- # @param self The object pointer
- # @retval True Successfully find a FV
- # @retval False Not able to find a FV
- #
- def __GetFv(self):
- if not self.__GetNextToken():
- return False
-
- S = self.__Token.upper()
- if S.startswith("[") and not S.startswith("[FV."):
- if not S.startswith("[CAPSULE.") \
- and not S.startswith("[VTF.") and not S.startswith("[RULE."):
- raise Warning("Unknown section or section appear sequence error \n(The correct sequence should be [FD.], [FV.], [Capsule.], [VTF.], [Rule.]) At Line ", self.FileName, self.CurrentLineNumber)
- self.__UndoToken()
- return False
-
- self.__UndoToken()
- if not self.__IsToken("[FV.", True):
- FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
- print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
- % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
- raise Warning("Unknown Keyword At Line ", self.FileName, self.CurrentLineNumber)
-
- FvName = self.__GetUiName()
- self.CurrentFvName = FvName.upper()
-
- if not self.__IsToken( "]"):
- raise Warning("expected ']' At Line ", self.FileName, self.CurrentLineNumber)
-
- FvObj = CommonDataClass.FdfClass.FvClassObject()
- FvObj.UiFvName = self.CurrentFvName
- self.Profile.FvDict[self.CurrentFvName] = FvObj
-
- Status = self.__GetCreateFile(FvObj)
- if not Status:
- raise Warning("FV name error At Line ", self.FileName, self.CurrentLineNumber)
-
- self.__GetDefineStatements(FvObj)
-
- self.__GetAddressStatements(FvObj)
-
- self.__GetBlockStatement(FvObj)
-
- self.__GetSetStatements(FvObj)
-
- self.__GetFvAlignment(FvObj)
-
- self.__GetFvAttributes(FvObj)
-
- self.__GetFvNameGuid(FvObj)
-
- self.__GetAprioriSection(FvObj, FvObj.DefineVarDict.copy())
- self.__GetAprioriSection(FvObj, FvObj.DefineVarDict.copy())
-
- while True:
- isInf = self.__GetInfStatement(FvObj, MacroDict = FvObj.DefineVarDict.copy())
- isFile = self.__GetFileStatement(FvObj, MacroDict = FvObj.DefineVarDict.copy())
- if not isInf and not isFile:
- break
-
- return True
-
- ## __GetFvAlignment() method
- #
- # Get alignment for FV
- #
- # @param self The object pointer
- # @param Obj for whom alignment is got
- # @retval True Successfully find a alignment statement
- # @retval False Not able to find a alignment statement
- #
- def __GetFvAlignment(self, Obj):
-
- if not self.__IsKeyword( "FvAlignment"):
- return False
-
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected alignment value At Line ", self.FileName, self.CurrentLineNumber)
-
- if self.__Token.upper() not in ("1", "2", "4", "8", "16", "32", "64", "128", "256", "512", \
- "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", \
- "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", \
- "1G", "2G"):
- raise Warning("Unknown alignment value At Line ", self.FileName, self.CurrentLineNumber)
- Obj.FvAlignment = self.__Token
- return True
-
- ## __GetFvAttributes() method
- #
- # Get attributes for FV
- #
- # @param self The object pointer
- # @param Obj for whom attribute is got
- # @retval None
- #
- def __GetFvAttributes(self, FvObj):
-
- while self.__GetNextWord():
- name = self.__Token
- if name not in ("ERASE_POLARITY", "MEMORY_MAPPED", \
- "STICKY_WRITE", "LOCK_CAP", "LOCK_STATUS", "WRITE_ENABLED_CAP", \
- "WRITE_DISABLED_CAP", "WRITE_STATUS", "READ_ENABLED_CAP", \
- "READ_DISABLED_CAP", "READ_STATUS", "READ_LOCK_CAP", \
- "READ_LOCK_STATUS", "WRITE_LOCK_CAP", "WRITE_LOCK_STATUS", \
- "WRITE_POLICY_RELIABLE"):
- self.__UndoToken()
- return
-
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken() or self.__Token.upper() not in ("TRUE", "FALSE", "1", "0"):
- raise Warning("expected TRUE/FALSE (1/0) At Line ", self.FileName, self.CurrentLineNumber)
-
- FvObj.FvAttributeDict[name] = self.__Token
-
- return
-
- ## __GetFvNameGuid() method
- #
- # Get FV GUID for FV
- #
- # @param self The object pointer
- # @param Obj for whom GUID is got
- # @retval None
- #
- def __GetFvNameGuid(self, FvObj):
-
- if not self.__IsKeyword( "FvNameGuid"):
- return
-
- if not self.__IsToken( "="):
- raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextGuid():
- raise Warning("expected FV GUID value", self.FileName, self.CurrentLineNumber)
-
- FvObj.FvNameGuid = self.__Token
-
- return
-
- ## __GetAprioriSection() method
- #
- # Get token statements
- #
- # @param self The object pointer
- # @param FvObj for whom apriori is got
- # @param MacroDict dictionary used to replace macro
- # @retval True Successfully find apriori statement
- # @retval False Not able to find apriori statement
- #
- def __GetAprioriSection(self, FvObj, MacroDict = {}):
-
- if not self.__IsKeyword( "APRIORI"):
- return False
-
- if not self.__IsKeyword("PEI") and not self.__IsKeyword("DXE"):
- raise Warning("expected Apriori file type At Line ", self.FileName, self.CurrentLineNumber)
- AprType = self.__Token
-
- if not self.__IsToken( "{"):
- raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
-
- AprSectionObj = CommonDataClass.FdfClass.AprioriSectionClassObject()
- AprSectionObj.AprioriType = AprType
-
- self.__GetDefineStatements(AprSectionObj)
- MacroDict.update(AprSectionObj.DefineVarDict)
-
- while True:
- IsInf = self.__GetInfStatement( AprSectionObj, MacroDict = MacroDict)
- IsFile = self.__GetFileStatement( AprSectionObj)
- if not IsInf and not IsFile:
- break
-
- if not self.__IsToken( "}"):
- raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
-
- FvObj.AprioriSectionList.append(AprSectionObj)
- return True
-
- ## __GetInfStatement() method
- #
- # Get INF statements
- #
- # @param self The object pointer
- # @param Obj for whom inf statement is got
- # @param MacroDict dictionary used to replace macro
- # @retval True Successfully find inf statement
- # @retval False Not able to find inf statement
- #
- def __GetInfStatement(self, Obj, ForCapsule = False, MacroDict = {}):
-
- if not self.__IsKeyword( "INF"):
- return False
-
- ffsInf = CommonDataClass.FdfClass.FfsInfStatementClassObject()
- self.__GetInfOptions( ffsInf)
-
- if not self.__GetNextToken():
- raise Warning("expected INF file path At Line ", self.FileName, self.CurrentLineNumber)
- ffsInf.InfFileName = self.__Token
-
-# if ffsInf.InfFileName.find('$') >= 0:
-# ffsInf.InfFileName = GenFdsGlobalVariable.GenFdsGlobalVariable.MacroExtend(ffsInf.InfFileName, MacroDict)
-
- if not ffsInf.InfFileName in self.Profile.InfList:
- self.Profile.InfList.append(ffsInf.InfFileName)
- FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
- self.Profile.InfFileLineList.append(FileLineTuple)
-
- if self.__IsToken('|'):
- if self.__IsKeyword('RELOCS_STRIPPED'):
- ffsInf.KeepReloc = False
- elif self.__IsKeyword('RELOCS_RETAINED'):
- ffsInf.KeepReloc = True
- else:
- raise Warning("Unknown reloc strip flag At Line ", self.FileName, self.CurrentLineNumber)
-
- if ForCapsule:
- capsuleFfs = CapsuleData.CapsuleFfs()
- capsuleFfs.Ffs = ffsInf
- Obj.CapsuleDataList.append(capsuleFfs)
- else:
- Obj.FfsList.append(ffsInf)
- return True
-
- ## __GetInfOptions() method
- #
- # Get options for INF
- #
- # @param self The object pointer
- # @param FfsInfObj for whom option is got
- #
- def __GetInfOptions(self, FfsInfObj):
-
- if self.__IsKeyword( "RuleOverride"):
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
- if not self.__GetNextToken():
- raise Warning("expected Rule name At Line ", self.FileName, self.CurrentLineNumber)
- FfsInfObj.Rule = self.__Token
-
- if self.__IsKeyword( "VERSION"):
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
- if not self.__GetNextToken():
- raise Warning("expected Version At Line ", self.FileName, self.CurrentLineNumber)
-
- if self.__GetStringData():
- FfsInfObj.Version = self.__Token
-
- if self.__IsKeyword( "UI"):
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
- if not self.__GetNextToken():
- raise Warning("expected UI name At Line ", self.FileName, self.CurrentLineNumber)
-
- if self.__GetStringData():
- FfsInfObj.Ui = self.__Token
-
- if self.__IsKeyword( "USE"):
- if not self.__IsToken( "="):
- raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
- if not self.__GetNextToken():
- raise Warning("expected ARCH name", self.FileName, self.CurrentLineNumber)
- FfsInfObj.UseArch = self.__Token
-
-
- if self.__GetNextToken():
- p = re.compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\)|\*)')
- if p.match(self.__Token):
- FfsInfObj.KeyStringList.append(self.__Token)
- if not self.__IsToken(","):
- return
- else:
- self.__UndoToken()
- return
-
- while self.__GetNextToken():
- if not p.match(self.__Token):
- raise Warning("expected KeyString \"Target_Tag_Arch\" At Line ", self.FileName, self.CurrentLineNumber)
- FfsInfObj.KeyStringList.append(self.__Token)
-
- if not self.__IsToken(","):
- break
-
- ## __GetFileStatement() method
- #
- # Get FILE statements
- #
- # @param self The object pointer
- # @param Obj for whom FILE statement is got
- # @param MacroDict dictionary used to replace macro
- # @retval True Successfully find FILE statement
- # @retval False Not able to find FILE statement
- #
- def __GetFileStatement(self, Obj, ForCapsule = False, MacroDict = {}):
-
- if not self.__IsKeyword( "FILE"):
- return False
-
- FfsFileObj = CommonDataClass.FdfClass.FileStatementClassObject()
-
- if not self.__GetNextWord():
- raise Warning("expected FFS type At Line ", self.FileName, self.CurrentLineNumber)
- FfsFileObj.FvFileType = self.__Token
-
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextGuid():
- if not self.__GetNextWord():
- raise Warning("expected File GUID", self.FileName, self.CurrentLineNumber)
- if self.__Token == 'PCD':
- if not self.__IsToken( "("):
- raise Warning("expected '('", self.FileName, self.CurrentLineNumber)
- PcdPair = self.__GetNextPcdName()
- if not self.__IsToken( ")"):
- raise Warning("expected ')'", self.FileName, self.CurrentLineNumber)
- self.__Token = 'PCD('+PcdPair[1]+'.'+PcdPair[0]+')'
-
- FfsFileObj.NameGuid = self.__Token
-
- self.__GetFilePart( FfsFileObj, MacroDict.copy())
-
- if ForCapsule:
- capsuleFfs = CapsuleData.CapsuleFfs()
- capsuleFfs.Ffs = FfsFileObj
- Obj.CapsuleDataList.append(capsuleFfs)
- else:
- Obj.FfsList.append(FfsFileObj)
-
- return True
-
- ## __FileCouldHaveRelocFlag() method
- #
- # Check whether reloc strip flag can be set for a file type.
- #
- # @param self The object pointer
- # @param FileType The file type to check with
- # @retval True This type could have relocation strip flag
- # @retval False No way to have it
- #
-
- def __FileCouldHaveRelocFlag (self, FileType):
- if FileType in ('SEC', 'PEI_CORE', 'PEIM', 'PEI_DXE_COMBO'):
- return True
- else:
- return False
-
- ## __SectionCouldHaveRelocFlag() method
- #
- # Check whether reloc strip flag can be set for a section type.
- #
- # @param self The object pointer
- # @param SectionType The section type to check with
- # @retval True This type could have relocation strip flag
- # @retval False No way to have it
- #
-
- def __SectionCouldHaveRelocFlag (self, SectionType):
- if SectionType in ('TE', 'PE32'):
- return True
- else:
- return False
-
- ## __GetFilePart() method
- #
- # Get components for FILE statement
- #
- # @param self The object pointer
- # @param FfsFileObj for whom component is got
- # @param MacroDict dictionary used to replace macro
- #
- def __GetFilePart(self, FfsFileObj, MacroDict = {}):
-
- self.__GetFileOpts( FfsFileObj)
-
- if not self.__IsToken("{"):
-# if self.__IsKeyword('RELOCS_STRIPPED') or self.__IsKeyword('RELOCS_RETAINED'):
-# if self.__FileCouldHaveRelocFlag(FfsFileObj.FvFileType):
-# if self.__Token == 'RELOCS_STRIPPED':
-# FfsFileObj.KeepReloc = False
-# else:
-# FfsFileObj.KeepReloc = True
-# else:
-# raise Warning("File type %s could not have reloc strip flag At Line %d" % (FfsFileObj.FvFileType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
-#
-# if not self.__IsToken("{"):
- raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected File name or section data At Line ", self.FileName, self.CurrentLineNumber)
-
- if self.__Token == "FV":
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
- if not self.__GetNextToken():
- raise Warning("expected FV name At Line ", self.FileName, self.CurrentLineNumber)
- FfsFileObj.FvName = self.__Token
-
- elif self.__Token == "FD":
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
- if not self.__GetNextToken():
- raise Warning("expected FD name At Line ", self.FileName, self.CurrentLineNumber)
- FfsFileObj.FdName = self.__Token
-
- elif self.__Token in ("DEFINE", "APRIORI", "SECTION"):
- self.__UndoToken()
- self.__GetSectionData( FfsFileObj, MacroDict)
- else:
- FfsFileObj.FileName = self.__Token
-
- if not self.__IsToken( "}"):
- raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
-
- ## __GetFileOpts() method
- #
- # Get options for FILE statement
- #
- # @param self The object pointer
- # @param FfsFileObj for whom options is got
- #
- def __GetFileOpts(self, FfsFileObj):
-
- if self.__GetNextToken():
- Pattern = re.compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\)|\*)')
- if Pattern.match(self.__Token):
- FfsFileObj.KeyStringList.append(self.__Token)
- if self.__IsToken(","):
- while self.__GetNextToken():
- if not Pattern.match(self.__Token):
- raise Warning("expected KeyString \"Target_Tag_Arch\" At Line ", self.FileName, self.CurrentLineNumber)
- FfsFileObj.KeyStringList.append(self.__Token)
-
- if not self.__IsToken(","):
- break
-
- else:
- self.__UndoToken()
-
- if self.__IsKeyword( "FIXED", True):
- FfsFileObj.Fixed = True
-
- if self.__IsKeyword( "CHECKSUM", True):
- FfsFileObj.CheckSum = True
-
- if self.__GetAlignment():
- FfsFileObj.Alignment = self.__Token
-
-
-
- ## __GetAlignment() method
- #
- # Return the alignment value
- #
- # @param self The object pointer
- # @retval True Successfully find alignment
- # @retval False Not able to find alignment
- #
- def __GetAlignment(self):
- if self.__IsKeyword( "Align", True):
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected alignment value At Line ", self.FileName, self.CurrentLineNumber)
- return True
-
- return False
-
- ## __GetFilePart() method
- #
- # Get section data for FILE statement
- #
- # @param self The object pointer
- # @param FfsFileObj for whom section is got
- # @param MacroDict dictionary used to replace macro
- #
- def __GetSectionData(self, FfsFileObj, MacroDict = {}):
- Dict = {}
- Dict.update(MacroDict)
-
- self.__GetDefineStatements(FfsFileObj)
-
- Dict.update(FfsFileObj.DefineVarDict)
- self.__GetAprioriSection(FfsFileObj, Dict.copy())
- self.__GetAprioriSection(FfsFileObj, Dict.copy())
-
- while True:
- IsLeafSection = self.__GetLeafSection(FfsFileObj, Dict)
- IsEncapSection = self.__GetEncapsulationSec(FfsFileObj)
- if not IsLeafSection and not IsEncapSection:
- break
-
- ## __GetLeafSection() method
- #
- # Get leaf section for Obj
- #
- # @param self The object pointer
- # @param Obj for whom leaf section is got
- # @param MacroDict dictionary used to replace macro
- # @retval True Successfully find section statement
- # @retval False Not able to find section statement
- #
- def __GetLeafSection(self, Obj, MacroDict = {}):
-
- OldPos = self.GetFileBufferPos()
-
- if not self.__IsKeyword( "SECTION"):
- if len(Obj.SectionList) == 0:
- raise Warning("expected SECTION At Line ", self.FileName, self.CurrentLineNumber)
- else:
- return False
-
- AlignValue = None
- if self.__GetAlignment():
- if self.__Token not in ("Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
- raise Warning("Incorrect alignment '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
- AlignValue = self.__Token
-
- BuildNum = None
- if self.__IsKeyword( "BUILD_NUM"):
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected Build number value At Line ", self.FileName, self.CurrentLineNumber)
-
- BuildNum = self.__Token
-
- if self.__IsKeyword( "VERSION"):
- if AlignValue == 'Auto':
- raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
- if not self.__GetNextToken():
- raise Warning("expected version At Line ", self.FileName, self.CurrentLineNumber)
- VerSectionObj = CommonDataClass.FdfClass.VerSectionClassObject()
- VerSectionObj.Alignment = AlignValue
- VerSectionObj.BuildNum = BuildNum
- if self.__GetStringData():
- VerSectionObj.StringData = self.__Token
- else:
- VerSectionObj.FileName = self.__Token
- Obj.SectionList.append(VerSectionObj)
-
- elif self.__IsKeyword( "UI"):
- if AlignValue == 'Auto':
- raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
- if not self.__GetNextToken():
- raise Warning("expected UI At Line ", self.FileName, self.CurrentLineNumber)
- UiSectionObj = CommonDataClass.FdfClass.UiSectionClassObject()
- UiSectionObj.Alignment = AlignValue
- if self.__GetStringData():
- UiSectionObj.StringData = self.__Token
- else:
- UiSectionObj.FileName = self.__Token
- Obj.SectionList.append(UiSectionObj)
-
- elif self.__IsKeyword( "FV_IMAGE"):
- if AlignValue == 'Auto':
- raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
- if not self.__GetNextWord():
- raise Warning("expected FV name At Line ", self.FileName, self.CurrentLineNumber)
-
- FvName = self.__Token.upper()
- FvObj = None
-
- if self.__IsToken( "{"):
- FvObj = Fv.FV()
- FvObj.UiFvName = FvName
- self.__GetDefineStatements(FvObj)
- MacroDict.update(FvObj.DefineVarDict)
- self.__GetBlockStatement(FvObj)
- self.__GetSetStatements(FvObj)
- self.__GetFvAlignment(FvObj)
- self.__GetFvAttributes(FvObj)
- self.__GetAprioriSection(FvObj, MacroDict.copy())
- self.__GetAprioriSection(FvObj, MacroDict.copy())
-
- while True:
- IsInf = self.__GetInfStatement(FvObj, MacroDict.copy())
- IsFile = self.__GetFileStatement(FvObj, MacroDict.copy())
- if not IsInf and not IsFile:
- break
-
- if not self.__IsToken( "}"):
- raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
-
- FvImageSectionObj = CommonDataClass.FdfClass.FvImageSectionClassObject()
- FvImageSectionObj.Alignment = AlignValue
- if FvObj != None:
- FvImageSectionObj.Fv = FvObj
- FvImageSectionObj.FvName = None
- else:
- FvImageSectionObj.FvName = FvName
-
- Obj.SectionList.append(FvImageSectionObj)
-
- elif self.__IsKeyword("PEI_DEPEX_EXP") or self.__IsKeyword("DXE_DEPEX_EXP") or self.__IsKeyword("SMM_DEPEX_EXP"):
- if AlignValue == 'Auto':
- raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
- DepexSectionObj = CommonDataClass.FdfClass.DepexSectionClassObject()
- DepexSectionObj.Alignment = AlignValue
- DepexSectionObj.DepexType = self.__Token
-
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
- if not self.__IsToken( "{"):
- raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
- if not self.__SkipToToken( "}"):
- raise Warning("expected Depex expression ending '}' At Line ", self.FileName, self.CurrentLineNumber)
-
- DepexSectionObj.Expression = self.__SkippedChars.rstrip('}')
- Obj.SectionList.append(DepexSectionObj)
-
- else:
-
- if not self.__GetNextWord():
- raise Warning("expected section type At Line ", self.FileName, self.CurrentLineNumber)
-
- # Encapsulation section appear, UndoToken and return
- if self.__Token == "COMPRESS" or self.__Token == "GUIDED":
- self.SetFileBufferPos(OldPos)
- return False
-
- if self.__Token not in ("COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
- "UI", "VERSION", "PEI_DEPEX", "SUBTYPE_GUID", "SMM_DEPEX"):
- raise Warning("Unknown section type '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
- if AlignValue == 'Auto'and (not self.__Token == 'PE32') and (not self.__Token == 'TE'):
- raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
- # DataSection
- DataSectionObj = CommonDataClass.FdfClass.DataSectionClassObject()
- DataSectionObj.Alignment = AlignValue
- DataSectionObj.SecType = self.__Token
-
- if self.__IsKeyword('RELOCS_STRIPPED') or self.__IsKeyword('RELOCS_RETAINED'):
- if self.__FileCouldHaveRelocFlag(Obj.FvFileType) and self.__SectionCouldHaveRelocFlag(DataSectionObj.SecType):
- if self.__Token == 'RELOCS_STRIPPED':
- DataSectionObj.KeepReloc = False
- else:
- DataSectionObj.KeepReloc = True
- else:
- raise Warning("File type %s, section type %s, could not have reloc strip flag At Line %d" % (Obj.FvFileType, DataSectionObj.SecType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
-
- if self.__IsToken("="):
- if not self.__GetNextToken():
- raise Warning("expected section file path At Line ", self.FileName, self.CurrentLineNumber)
- DataSectionObj.SectFileName = self.__Token
- else:
- if not self.__GetCglSection(DataSectionObj):
- return False
-
- Obj.SectionList.append(DataSectionObj)
-
- return True
-
- ## __GetCglSection() method
- #
- # Get compressed or GUIDed section for Obj
- #
- # @param self The object pointer
- # @param Obj for whom leaf section is got
- # @param AlignValue alignment value for complex section
- # @retval True Successfully find section statement
- # @retval False Not able to find section statement
- #
- def __GetCglSection(self, Obj, AlignValue = None):
-
- if self.__IsKeyword( "COMPRESS"):
- type = "PI_STD"
- if self.__IsKeyword("PI_STD") or self.__IsKeyword("PI_NONE"):
- type = self.__Token
-
- if not self.__IsToken("{"):
- raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
-
- CompressSectionObj = CommonDataClass.FdfClass.CompressSectionClassObject()
- CompressSectionObj.Alignment = AlignValue
- CompressSectionObj.CompType = type
- # Recursive sections...
- while True:
- IsLeafSection = self.__GetLeafSection(CompressSectionObj)
- IsEncapSection = self.__GetEncapsulationSec(CompressSectionObj)
- if not IsLeafSection and not IsEncapSection:
- break
-
-
- if not self.__IsToken( "}"):
- raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
- Obj.SectionList.append(CompressSectionObj)
-
-# else:
-# raise Warning("Compress type not known At Line ")
-
- return True
-
- elif self.__IsKeyword( "GUIDED"):
- GuidValue = None
- if self.__GetNextGuid():
- GuidValue = self.__Token
-
- AttribDict = self.__GetGuidAttrib()
- if not self.__IsToken("{"):
- raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
- GuidSectionObj = CommonDataClass.FdfClass.GuidSectionClassObject()
- GuidSectionObj.Alignment = AlignValue
- GuidSectionObj.NameGuid = GuidValue
- GuidSectionObj.SectionType = "GUIDED"
- GuidSectionObj.ProcessRequired = AttribDict["PROCESSING_REQUIRED"]
- GuidSectionObj.AuthStatusValid = AttribDict["AUTH_STATUS_VALID"]
- # Recursive sections...
- while True:
- IsLeafSection = self.__GetLeafSection(GuidSectionObj)
- IsEncapSection = self.__GetEncapsulationSec(GuidSectionObj)
- if not IsLeafSection and not IsEncapSection:
- break
-
- if not self.__IsToken( "}"):
- raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
- Obj.SectionList.append(GuidSectionObj)
-
- return True
-
- return False
-
- ## __GetGuidAttri() method
- #
- # Get attributes for GUID section
- #
- # @param self The object pointer
- # @retval AttribDict Dictionary of key-value pair of section attributes
- #
- def __GetGuidAttrib(self):
-
- AttribDict = {}
- AttribDict["PROCESSING_REQUIRED"] = False
- AttribDict["AUTH_STATUS_VALID"] = False
- if self.__IsKeyword("PROCESSING_REQUIRED") or self.__IsKeyword("AUTH_STATUS_VALID"):
- AttribKey = self.__Token
-
- if not self.__IsToken("="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken() or self.__Token.upper() not in ("TRUE", "FALSE", "1", "0"):
- raise Warning("expected TRUE/FALSE (1/0) At Line ", self.FileName, self.CurrentLineNumber)
- AttribDict[AttribKey] = self.__Token
-
- if self.__IsKeyword("PROCESSING_REQUIRED") or self.__IsKeyword("AUTH_STATUS_VALID"):
- AttribKey = self.__Token
-
- if not self.__IsToken("="):
- raise Warning("expected '=' At Line ")
-
- if not self.__GetNextToken() or self.__Token.upper() not in ("TRUE", "FALSE", "1", "0"):
- raise Warning("expected TRUE/FALSE (1/0) At Line ", self.FileName, self.CurrentLineNumber)
- AttribDict[AttribKey] = self.__Token
-
- return AttribDict
-
- ## __GetEncapsulationSec() method
- #
- # Get encapsulation section for FILE
- #
- # @param self The object pointer
- # @param FfsFile for whom section is got
- # @retval True Successfully find section statement
- # @retval False Not able to find section statement
- #
- def __GetEncapsulationSec(self, FfsFileObj):
-
- OldPos = self.GetFileBufferPos()
- if not self.__IsKeyword( "SECTION"):
- if len(FfsFileObj.SectionList) == 0:
- raise Warning("expected SECTION At Line ", self.FileName, self.CurrentLineNumber)
- else:
- return False
-
- AlignValue = None
- if self.__GetAlignment():
- if self.__Token not in ("8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
- raise Warning("Incorrect alignment '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
- AlignValue = self.__Token
-
- if not self.__GetCglSection(FfsFileObj, AlignValue):
- self.SetFileBufferPos(OldPos)
- return False
- else:
- return True
-
- ## __GetCapsule() method
- #
- # Get capsule section contents and store its data into capsule list of self.Profile
- #
- # @param self The object pointer
- # @retval True Successfully find a capsule
- # @retval False Not able to find a capsule
- #
- def __GetCapsule(self):
-
- if not self.__GetNextToken():
- return False
-
- S = self.__Token.upper()
- if S.startswith("[") and not S.startswith("[CAPSULE."):
- if not S.startswith("[VTF.") and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
- raise Warning("Unknown section or section appear sequence error (The correct sequence should be [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
- self.__UndoToken()
- return False
-
- self.__UndoToken()
- if not self.__IsToken("[CAPSULE.", True):
- FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
- print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
- % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
- raise Warning("expected [Capsule.] At Line ", self.FileName, self.CurrentLineNumber)
-
- CapsuleObj = CommonDataClass.FdfClass.CapsuleClassObject()
-
- CapsuleName = self.__GetUiName()
- if not CapsuleName:
- raise Warning("expected capsule name At line ", self.FileName, self.CurrentLineNumber)
-
- CapsuleObj.UiCapsuleName = CapsuleName.upper()
-
- if not self.__IsToken( "]"):
- raise Warning("expected ']' At Line ", self.FileName, self.CurrentLineNumber)
-
- if self.__IsKeyword("CREATE_FILE"):
- if not self.__IsToken( "="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected file name At Line ", self.FileName, self.CurrentLineNumber)
-
- CapsuleObj.CreateFile = self.__Token
-
- self.__GetCapsuleStatements(CapsuleObj)
- self.Profile.CapsuleList.append(CapsuleObj)
- return True
-
- ## __GetCapsuleStatements() method
- #
- # Get statements for capsule
- #
- # @param self The object pointer
- # @param Obj for whom statements are got
- #
- def __GetCapsuleStatements(self, Obj):
- self.__GetCapsuleTokens(Obj)
- self.__GetDefineStatements(Obj)
- self.__GetSetStatements(Obj)
-
- self.__GetCapsuleData(Obj)
-
- ## __GetCapsuleStatements() method
- #
- # Get token statements for capsule
- #
- # @param self The object pointer
- # @param Obj for whom token statements are got
- #
- def __GetCapsuleTokens(self, Obj):
-
- if not self.__IsKeyword("CAPSULE_GUID"):
- raise Warning("expected 'CAPSULE_GUID' At Line ", self.FileName, self.CurrentLineNumber)
-
- while self.__CurrentLine().find("=") != -1:
- NameValue = self.__CurrentLine().split("=")
- Obj.TokensDict[NameValue[0].strip()] = NameValue[1].strip()
- self.CurrentLineNumber += 1
- self.CurrentOffsetWithinLine = 0
-
- ## __GetCapsuleData() method
- #
- # Get capsule data for capsule
- #
- # @param self The object pointer
- # @param Obj for whom capsule data are got
- #
- def __GetCapsuleData(self, Obj):
-
- while True:
- IsInf = self.__GetInfStatement(Obj, True)
- IsFile = self.__GetFileStatement(Obj, True)
- IsFv = self.__GetFvStatement(Obj)
- if not IsInf and not IsFile and not IsFv:
- break
-
- ## __GetFvStatement() method
- #
- # Get FV for capsule
- #
- # @param self The object pointer
- # @param CapsuleObj for whom FV is got
- # @retval True Successfully find a FV statement
- # @retval False Not able to find a FV statement
- #
- def __GetFvStatement(self, CapsuleObj):
-
- if not self.__IsKeyword("FV"):
- return False
-
- if not self.__IsToken("="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected FV name At Line ", self.FileName, self.CurrentLineNumber)
-
-# CapsuleFv = CapsuleData.CapsuleFv()
-# CapsuleFv.FvName = self.__Token
-# CapsuleObj.CapsuleDataList.append(CapsuleFv)
- return True
-
- ## __GetRule() method
- #
- # Get Rule section contents and store its data into rule list of self.Profile
- #
- # @param self The object pointer
- # @retval True Successfully find a Rule
- # @retval False Not able to find a Rule
- #
- def __GetRule(self):
-
- if not self.__GetNextToken():
- return False
-
- S = self.__Token.upper()
- if S.startswith("[") and not S.startswith("[RULE."):
- if not S.startswith("[OPTIONROM."):
- raise Warning("Unknown section or section appear sequence error (The correct sequence should be [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
- self.__UndoToken()
- return False
- self.__UndoToken()
- if not self.__IsToken("[Rule.", True):
- FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
- print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
- % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
- raise Warning("expected [Rule.] At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__SkipToToken("."):
- raise Warning("expected '.' At Line ", self.FileName, self.CurrentLineNumber)
-
- Arch = self.__SkippedChars.rstrip(".")
- if Arch.upper() not in ("IA32", "X64", "IPF", "EBC", "ARM", "AARCH64", "COMMON"):
- raise Warning("Unknown Arch '%s'" % Arch, self.FileName, self.CurrentLineNumber)
-
- ModuleType = self.__GetModuleType()
-
- TemplateName = ""
- if self.__IsToken("."):
- if not self.__GetNextWord():
- raise Warning("expected template name At Line ", self.FileName, self.CurrentLineNumber)
- TemplateName = self.__Token
-
- if not self.__IsToken( "]"):
- raise Warning("expected ']' At Line ", self.FileName, self.CurrentLineNumber)
-
- RuleObj = self.__GetRuleFileStatements()
- RuleObj.Arch = Arch.upper()
- RuleObj.ModuleType = ModuleType
- RuleObj.TemplateName = TemplateName
- if TemplateName == '' :
- self.Profile.RuleDict['RULE' + \
- '.' + \
- Arch.upper() + \
- '.' + \
- ModuleType.upper() ] = RuleObj
- else :
- self.Profile.RuleDict['RULE' + \
- '.' + \
- Arch.upper() + \
- '.' + \
- ModuleType.upper() + \
- '.' + \
- TemplateName.upper() ] = RuleObj
-# self.Profile.RuleList.append(rule)
- return True
-
- ## __GetModuleType() method
- #
- # Return the module type
- #
- # @param self The object pointer
- # @retval string module type
- #
- def __GetModuleType(self):
-
- if not self.__GetNextWord():
- raise Warning("expected Module type At Line ", self.FileName, self.CurrentLineNumber)
- if self.__Token.upper() not in ("SEC", "PEI_CORE", "PEIM", "DXE_CORE", \
- "DXE_DRIVER", "DXE_SAL_DRIVER", \
- "DXE_SMM_DRIVER", "DXE_RUNTIME_DRIVER", \
- "UEFI_DRIVER", "UEFI_APPLICATION", "USER_DEFINED", "DEFAULT", "BASE", \
- "SECURITY_CORE", "COMBINED_PEIM_DRIVER", "PIC_PEIM", "RELOCATABLE_PEIM", \
- "PE32_PEIM", "BS_DRIVER", "RT_DRIVER", "SAL_RT_DRIVER", "APPLICATION", "ACPITABLE", "SMM_CORE"):
- raise Warning("Unknown Module type At line ", self.FileName, self.CurrentLineNumber)
- return self.__Token
-
- ## __GetFileExtension() method
- #
- # Return the file extension
- #
- # @param self The object pointer
- # @retval string file name extension
- #
- def __GetFileExtension(self):
- if not self.__IsToken("."):
- raise Warning("expected '.' At Line ", self.FileName, self.CurrentLineNumber)
-
- Ext = ""
- if self.__GetNextToken():
- Pattern = re.compile(r'([a-zA-Z][a-zA-Z0-9]*)')
- if Pattern.match(self.__Token):
- Ext = self.__Token
- return '.' + Ext
- else:
- raise Warning("Unknown file extension At Line ", self.FileName, self.CurrentLineNumber)
-
- else:
- raise Warning("expected file extension At Line ", self.FileName, self.CurrentLineNumber)
-
- ## __GetRuleFileStatement() method
- #
- # Get rule contents
- #
- # @param self The object pointer
- # @retval Rule Rule object
- #
- def __GetRuleFileStatements(self):
-
- if not self.__IsKeyword("FILE"):
- raise Warning("expected FILE At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextWord():
- raise Warning("expected FFS type At Line ", self.FileName, self.CurrentLineNumber)
-
- Type = self.__Token.strip().upper()
- if Type not in ("RAW", "FREEFORM", "SEC", "PEI_CORE", "PEIM",\
- "PEI_DXE_COMBO", "DRIVER", "DXE_CORE", "APPLICATION", "FV_IMAGE", "SMM", "SMM_CORE"):
- raise Warning("Unknown FV type At line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken("="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsKeyword("$(NAMED_GUID)"):
- if not self.__GetNextWord():
- raise Warning("expected $(NAMED_GUID)", self.FileName, self.CurrentLineNumber)
- if self.__Token == 'PCD':
- if not self.__IsToken( "("):
- raise Warning("expected '('", self.FileName, self.CurrentLineNumber)
- PcdPair = self.__GetNextPcdName()
- if not self.__IsToken( ")"):
- raise Warning("expected ')'", self.FileName, self.CurrentLineNumber)
- self.__Token = 'PCD('+PcdPair[1]+'.'+PcdPair[0]+')'
-
- NameGuid = self.__Token
-
- KeepReloc = None
- if self.__IsKeyword('RELOCS_STRIPPED') or self.__IsKeyword('RELOCS_RETAINED'):
- if self.__FileCouldHaveRelocFlag(Type):
- if self.__Token == 'RELOCS_STRIPPED':
- KeepReloc = False
- else:
- KeepReloc = True
- else:
- raise Warning("File type %s could not have reloc strip flag At Line %d" % (Type, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
-
- KeyStringList = []
- if self.__GetNextToken():
- Pattern = re.compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\)|\*)')
- if Pattern.match(self.__Token):
- KeyStringList.append(self.__Token)
- if self.__IsToken(","):
- while self.__GetNextToken():
- if not Pattern.match(self.__Token):
- raise Warning("expected KeyString \"Target_Tag_Arch\" At Line ", self.FileName, self.CurrentLineNumber)
- KeyStringList.append(self.__Token)
-
- if not self.__IsToken(","):
- break
-
- else:
- self.__UndoToken()
-
-
- Fixed = False
- if self.__IsKeyword("Fixed", True):
- Fixed = True
-
- CheckSum = False
- if self.__IsKeyword("CheckSum", True):
- CheckSum = True
-
- AlignValue = ""
- if self.__GetAlignment():
- if self.__Token not in ("Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
- raise Warning("Incorrect alignment At Line ", self.FileName, self.CurrentLineNumber)
- AlignValue = self.__Token
-
- if self.__IsToken("{"):
- # Complex file rule expected
- Rule = RuleComplexFile.RuleComplexFile()
- Rule.FvFileType = Type
- Rule.NameGuid = NameGuid
- Rule.Alignment = AlignValue
- Rule.CheckSum = CheckSum
- Rule.Fixed = Fixed
- Rule.KeyStringList = KeyStringList
- if KeepReloc != None:
- Rule.KeepReloc = KeepReloc
-
- while True:
- IsEncapsulate = self.__GetRuleEncapsulationSection(Rule)
- IsLeaf = self.__GetEfiSection(Rule)
- if not IsEncapsulate and not IsLeaf:
- break
-
- if not self.__IsToken("}"):
- raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
-
- return Rule
-
- elif self.__IsToken("|"):
- # Ext rule expected
- Ext = self.__GetFileExtension()
-
- Rule = RuleSimpleFile.RuleSimpleFile()
-
- Rule.FvFileType = Type
- Rule.NameGuid = NameGuid
- Rule.Alignment = AlignValue
- Rule.CheckSum = CheckSum
- Rule.Fixed = Fixed
- Rule.FileExtension = Ext
- Rule.KeyStringList = KeyStringList
- if KeepReloc != None:
- Rule.KeepReloc = KeepReloc
-
- return Rule
-
- else:
- # Simple file rule expected
- if not self.__GetNextWord():
- raise Warning("expected leaf section type At Line ", self.FileName, self.CurrentLineNumber)
-
- SectionName = self.__Token
-
- if SectionName not in ("COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
- "UI", "PEI_DEPEX", "VERSION", "SUBTYPE_GUID", "SMM_DEPEX"):
- raise Warning("Unknown leaf section name '%s'" % SectionName, self.FileName, self.CurrentLineNumber)
-
-
- if self.__IsKeyword("Fixed", True):
- Fixed = True
-
- if self.__IsKeyword("CheckSum", True):
- CheckSum = True
-
- if self.__GetAlignment():
- if self.__Token not in ("Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
- raise Warning("Incorrect alignment At Line ", self.FileName, self.CurrentLineNumber)
- if self.__Token == 'Auto' and (not SectionName == 'PE32') and (not SectionName == 'TE'):
- raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
- AlignValue = self.__Token
-
- if not self.__GetNextToken():
- raise Warning("expected File name At Line ", self.FileName, self.CurrentLineNumber)
-
- Rule = RuleSimpleFile.RuleSimpleFile()
- Rule.SectionType = SectionName
- Rule.FvFileType = Type
- Rule.NameGuid = NameGuid
- Rule.Alignment = AlignValue
- Rule.CheckSum = CheckSum
- Rule.Fixed = Fixed
- Rule.FileName = self.__Token
- Rule.KeyStringList = KeyStringList
- if KeepReloc != None:
- Rule.KeepReloc = KeepReloc
- return Rule
-
- ## __GetEfiSection() method
- #
- # Get section list for Rule
- #
- # @param self The object pointer
- # @param Obj for whom section is got
- # @retval True Successfully find section statement
- # @retval False Not able to find section statement
- #
- def __GetEfiSection(self, Obj):
-
- OldPos = self.GetFileBufferPos()
- if not self.__GetNextWord():
- return False
- SectionName = self.__Token
-
- if SectionName not in ("COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
- "UI", "VERSION", "PEI_DEPEX", "GUID", "SMM_DEPEX"):
- self.__UndoToken()
- return False
-
- if SectionName == "FV_IMAGE":
- FvImageSectionObj = FvImageSection.FvImageSection()
- if self.__IsKeyword("FV_IMAGE"):
- pass
- if self.__IsToken( "{"):
- FvObj = Fv.FV()
- self.__GetDefineStatements(FvObj)
- self.__GetBlockStatement(FvObj)
- self.__GetSetStatements(FvObj)
- self.__GetFvAlignment(FvObj)
- self.__GetFvAttributes(FvObj)
- self.__GetAprioriSection(FvObj)
- self.__GetAprioriSection(FvObj)
-
- while True:
- IsInf = self.__GetInfStatement(FvObj)
- IsFile = self.__GetFileStatement(FvObj)
- if not IsInf and not IsFile:
- break
-
- if not self.__IsToken( "}"):
- raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
- FvImageSectionObj.Fv = FvObj
- FvImageSectionObj.FvName = None
-
- else:
- if not self.__IsKeyword("FV"):
- raise Warning("expected 'FV' At Line ", self.FileName, self.CurrentLineNumber)
- FvImageSectionObj.FvFileType = self.__Token
-
- if self.__GetAlignment():
- if self.__Token not in ("8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
- raise Warning("Incorrect alignment At Line ", self.FileName, self.CurrentLineNumber)
- FvImageSectionObj.Alignment = self.__Token
-
- if self.__IsToken('|'):
- FvImageSectionObj.FvFileExtension = self.__GetFileExtension()
- elif self.__GetNextToken():
- if self.__Token not in ("}", "COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
- "UI", "VERSION", "PEI_DEPEX", "GUID", "SMM_DEPEX"):
- FvImageSectionObj.FvFileName = self.__Token
- else:
- self.__UndoToken()
- else:
- raise Warning("expected FV file name At Line ", self.FileName, self.CurrentLineNumber)
-
- Obj.SectionList.append(FvImageSectionObj)
- return True
-
- EfiSectionObj = EfiSection.EfiSection()
- EfiSectionObj.SectionType = SectionName
-
- if not self.__GetNextToken():
- raise Warning("expected file type At Line ", self.FileName, self.CurrentLineNumber)
-
- if self.__Token == "STRING":
- if not self.__RuleSectionCouldHaveString(EfiSectionObj.SectionType):
- raise Warning("%s section could NOT have string data At Line %d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken('='):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected Quoted String At Line ", self.FileName, self.CurrentLineNumber)
-
- if self.__GetStringData():
- EfiSectionObj.StringData = self.__Token
-
- if self.__IsKeyword("BUILD_NUM"):
- if not self.__RuleSectionCouldHaveBuildNum(EfiSectionObj.SectionType):
- raise Warning("%s section could NOT have BUILD_NUM At Line %d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken("="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
- if not self.__GetNextToken():
- raise Warning("expected Build number At Line ", self.FileName, self.CurrentLineNumber)
- EfiSectionObj.BuildNum = self.__Token
-
- else:
- EfiSectionObj.FileType = self.__Token
- self.__CheckRuleSectionFileType(EfiSectionObj.SectionType, EfiSectionObj.FileType)
-
- if self.__IsKeyword("Optional"):
- if not self.__RuleSectionCouldBeOptional(EfiSectionObj.SectionType):
- raise Warning("%s section could NOT be optional At Line %d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
- EfiSectionObj.Optional = True
-
- if self.__IsKeyword("BUILD_NUM"):
- if not self.__RuleSectionCouldHaveBuildNum(EfiSectionObj.SectionType):
- raise Warning("%s section could NOT have BUILD_NUM At Line %d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken("="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
- if not self.__GetNextToken():
- raise Warning("expected Build number At Line ", self.FileName, self.CurrentLineNumber)
- EfiSectionObj.BuildNum = self.__Token
-
- if self.__GetAlignment():
- if self.__Token not in ("Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
- raise Warning("Incorrect alignment '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
- if self.__Token == 'Auto' and (not SectionName == 'PE32') and (not SectionName == 'TE'):
- raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
- EfiSectionObj.Alignment = self.__Token
-
- if self.__IsKeyword('RELOCS_STRIPPED') or self.__IsKeyword('RELOCS_RETAINED'):
- if self.__SectionCouldHaveRelocFlag(EfiSectionObj.SectionType):
- if self.__Token == 'RELOCS_STRIPPED':
- EfiSectionObj.KeepReloc = False
- else:
- EfiSectionObj.KeepReloc = True
- if Obj.KeepReloc != None and Obj.KeepReloc != EfiSectionObj.KeepReloc:
- raise Warning("Section type %s has reloc strip flag conflict with Rule At Line %d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
- else:
- raise Warning("Section type %s could not have reloc strip flag At Line %d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
-
-
- if self.__IsToken('|'):
- EfiSectionObj.FileExtension = self.__GetFileExtension()
- elif self.__GetNextToken():
- if self.__Token not in ("}", "COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
- "UI", "VERSION", "PEI_DEPEX", "GUID", "SMM_DEPEX"):
-
- if self.__Token.startswith('PCD'):
- self.__UndoToken()
- self.__GetNextWord()
-
- if self.__Token == 'PCD':
- if not self.__IsToken( "("):
- raise Warning("expected '('", self.FileName, self.CurrentLineNumber)
- PcdPair = self.__GetNextPcdName()
- if not self.__IsToken( ")"):
- raise Warning("expected ')'", self.FileName, self.CurrentLineNumber)
- self.__Token = 'PCD('+PcdPair[1]+'.'+PcdPair[0]+')'
-
- EfiSectionObj.FileName = self.__Token
-
- else:
- self.__UndoToken()
- else:
- raise Warning("expected section file name At Line ", self.FileName, self.CurrentLineNumber)
-
- Obj.SectionList.append(EfiSectionObj)
- return True
-
- ## __RuleSectionCouldBeOptional() method
- #
- # Get whether a section could be optional
- #
- # @param self The object pointer
- # @param SectionType The section type to check
- # @retval True section could be optional
- # @retval False section never optional
- #
- def __RuleSectionCouldBeOptional(self, SectionType):
- if SectionType in ("DXE_DEPEX", "UI", "VERSION", "PEI_DEPEX", "RAW", "SMM_DEPEX"):
- return True
- else:
- return False
-
- ## __RuleSectionCouldHaveBuildNum() method
- #
- # Get whether a section could have build number information
- #
- # @param self The object pointer
- # @param SectionType The section type to check
- # @retval True section could have build number information
- # @retval False section never have build number information
- #
- def __RuleSectionCouldHaveBuildNum(self, SectionType):
- if SectionType in ("VERSION"):
- return True
- else:
- return False
-
- ## __RuleSectionCouldHaveString() method
- #
- # Get whether a section could have string
- #
- # @param self The object pointer
- # @param SectionType The section type to check
- # @retval True section could have string
- # @retval False section never have string
- #
- def __RuleSectionCouldHaveString(self, SectionType):
- if SectionType in ("UI", "VERSION"):
- return True
- else:
- return False
-
- ## __CheckRuleSectionFileType() method
- #
- # Get whether a section matches a file type
- #
- # @param self The object pointer
- # @param SectionType The section type to check
- # @param FileType The file type to check
- #
- def __CheckRuleSectionFileType(self, SectionType, FileType):
- if SectionType == "COMPAT16":
- if FileType not in ("COMPAT16", "SEC_COMPAT16"):
- raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
- elif SectionType == "PE32":
- if FileType not in ("PE32", "SEC_PE32"):
- raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
- elif SectionType == "PIC":
- if FileType not in ("PIC", "PIC"):
- raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
- elif SectionType == "TE":
- if FileType not in ("TE", "SEC_TE"):
- raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
- elif SectionType == "RAW":
- if FileType not in ("BIN", "SEC_BIN", "RAW", "ASL", "ACPI"):
- raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
- elif SectionType == "DXE_DEPEX" or SectionType == "SMM_DEPEX":
- if FileType not in ("DXE_DEPEX", "SEC_DXE_DEPEX", "SMM_DEPEX"):
- raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
- elif SectionType == "UI":
- if FileType not in ("UI", "SEC_UI"):
- raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
- elif SectionType == "VERSION":
- if FileType not in ("VERSION", "SEC_VERSION"):
- raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
- elif SectionType == "PEI_DEPEX":
- if FileType not in ("PEI_DEPEX", "SEC_PEI_DEPEX"):
- raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
- elif SectionType == "GUID":
- if FileType not in ("PE32", "SEC_GUID"):
- raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
-
- ## __GetRuleEncapsulationSection() method
- #
- # Get encapsulation section for Rule
- #
- # @param self The object pointer
- # @param Rule for whom section is got
- # @retval True Successfully find section statement
- # @retval False Not able to find section statement
- #
- def __GetRuleEncapsulationSection(self, Rule):
-
- if self.__IsKeyword( "COMPRESS"):
- Type = "PI_STD"
- if self.__IsKeyword("PI_STD") or self.__IsKeyword("PI_NONE"):
- Type = self.__Token
-
- if not self.__IsToken("{"):
- raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
-
- CompressSectionObj = CompressSection.CompressSection()
-
- CompressSectionObj.CompType = Type
- # Recursive sections...
- while True:
- IsEncapsulate = self.__GetRuleEncapsulationSection(CompressSectionObj)
- IsLeaf = self.__GetEfiSection(CompressSectionObj)
- if not IsEncapsulate and not IsLeaf:
- break
-
- if not self.__IsToken( "}"):
- raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
- Rule.SectionList.append(CompressSectionObj)
-
- return True
-
- elif self.__IsKeyword( "GUIDED"):
- GuidValue = None
- if self.__GetNextGuid():
- GuidValue = self.__Token
-
- if self.__IsKeyword( "$(NAMED_GUID)"):
- GuidValue = self.__Token
-
- AttribDict = self.__GetGuidAttrib()
-
- if not self.__IsToken("{"):
- raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
- GuidSectionObj = GuidSection.GuidSection()
- GuidSectionObj.NameGuid = GuidValue
- GuidSectionObj.SectionType = "GUIDED"
- GuidSectionObj.ProcessRequired = AttribDict["PROCESSING_REQUIRED"]
- GuidSectionObj.AuthStatusValid = AttribDict["AUTH_STATUS_VALID"]
-
- # Efi sections...
- while True:
- IsEncapsulate = self.__GetRuleEncapsulationSection(GuidSectionObj)
- IsLeaf = self.__GetEfiSection(GuidSectionObj)
- if not IsEncapsulate and not IsLeaf:
- break
-
- if not self.__IsToken( "}"):
- raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
- Rule.SectionList.append(GuidSectionObj)
-
- return True
-
- return False
-
- ## __GetVtf() method
- #
- # Get VTF section contents and store its data into VTF list of self.Profile
- #
- # @param self The object pointer
- # @retval True Successfully find a VTF
- # @retval False Not able to find a VTF
- #
- def __GetVtf(self):
-
- if not self.__GetNextToken():
- return False
-
- S = self.__Token.upper()
- if S.startswith("[") and not S.startswith("[VTF."):
- if not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
- raise Warning("Unknown section or section appear sequence error (The correct sequence should be [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
- self.__UndoToken()
- return False
-
- self.__UndoToken()
- if not self.__IsToken("[VTF.", True):
- FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
- print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
- % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
- raise Warning("expected [VTF.] At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__SkipToToken("."):
- raise Warning("expected '.' At Line ", self.FileName, self.CurrentLineNumber)
-
- Arch = self.__SkippedChars.rstrip(".").upper()
- if Arch not in ("IA32", "X64", "IPF", "ARM", "AARCH64"):
- raise Warning("Unknown Arch At line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextWord():
- raise Warning("expected VTF name At Line ", self.FileName, self.CurrentLineNumber)
- Name = self.__Token.upper()
-
- VtfObj = Vtf.Vtf()
- VtfObj.UiName = Name
- VtfObj.KeyArch = Arch
-
- if self.__IsToken(","):
- if not self.__GetNextWord():
- raise Warning("expected Arch list At Line ", self.FileName, self.CurrentLineNumber)
- if self.__Token.upper() not in ("IA32", "X64", "IPF", "ARM", "AARCH64"):
- raise Warning("Unknown Arch At line ", self.FileName, self.CurrentLineNumber)
- VtfObj.ArchList = self.__Token.upper()
-
- if not self.__IsToken( "]"):
- raise Warning("expected ']' At Line ", self.FileName, self.CurrentLineNumber)
-
- if self.__IsKeyword("IA32_RST_BIN"):
- if not self.__IsToken("="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected Reset file At Line ", self.FileName, self.CurrentLineNumber)
-
- VtfObj.ResetBin = self.__Token
-
- while self.__GetComponentStatement(VtfObj):
- pass
-
- self.Profile.VtfList.append(VtfObj)
- return True
-
- ## __GetComponentStatement() method
- #
- # Get components in VTF
- #
- # @param self The object pointer
- # @param VtfObj for whom component is got
- # @retval True Successfully find a component
- # @retval False Not able to find a component
- #
- def __GetComponentStatement(self, VtfObj):
-
- if not self.__IsKeyword("COMP_NAME"):
- return False
-
- if not self.__IsToken("="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextWord():
- raise Warning("expected Component Name At Line ", self.FileName, self.CurrentLineNumber)
-
- CompStatementObj = ComponentStatement.ComponentStatement()
- CompStatementObj.CompName = self.__Token
-
- if not self.__IsKeyword("COMP_LOC"):
- raise Warning("expected COMP_LOC At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken("="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- CompStatementObj.CompLoc = ""
- if self.__GetNextWord():
- CompStatementObj.CompLoc = self.__Token
- if self.__IsToken('|'):
- if not self.__GetNextWord():
- raise Warning("Expected Region Name At Line ", self.FileName, self.CurrentLineNumber)
-
- if self.__Token not in ("F", "N", "S"): #, "H", "L", "PH", "PL"): not support
- raise Warning("Unknown location type At line ", self.FileName, self.CurrentLineNumber)
-
- CompStatementObj.FilePos = self.__Token
- else:
- self.CurrentLineNumber += 1
- self.CurrentOffsetWithinLine = 0
-
- if not self.__IsKeyword("COMP_TYPE"):
- raise Warning("expected COMP_TYPE At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken("="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected Component type At Line ", self.FileName, self.CurrentLineNumber)
- if self.__Token not in ("FIT", "PAL_B", "PAL_A", "OEM"):
- if not self.__Token.startswith("0x") or len(self.__Token) < 3 or len(self.__Token) > 4 or \
- not self.__HexDigit(self.__Token[2]) or not self.__HexDigit(self.__Token[-1]):
- raise Warning("Unknown location type At line ", self.FileName, self.CurrentLineNumber)
- CompStatementObj.CompType = self.__Token
-
- if not self.__IsKeyword("COMP_VER"):
- raise Warning("expected COMP_VER At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken("="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected Component version At Line ", self.FileName, self.CurrentLineNumber)
-
- Pattern = re.compile('-$|[0-9]{0,1}[0-9]{1}\.[0-9]{0,1}[0-9]{1}')
- if Pattern.match(self.__Token) == None:
- raise Warning("Unknown version format At line ", self.FileName, self.CurrentLineNumber)
- CompStatementObj.CompVer = self.__Token
-
- if not self.__IsKeyword("COMP_CS"):
- raise Warning("expected COMP_CS At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken("="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected Component CS At Line ", self.FileName, self.CurrentLineNumber)
- if self.__Token not in ("1", "0"):
- raise Warning("Unknown Component CS At line ", self.FileName, self.CurrentLineNumber)
- CompStatementObj.CompCs = self.__Token
-
-
- if not self.__IsKeyword("COMP_BIN"):
- raise Warning("expected COMP_BIN At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken("="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected Component file At Line ", self.FileName, self.CurrentLineNumber)
-
- CompStatementObj.CompBin = self.__Token
-
- if not self.__IsKeyword("COMP_SYM"):
- raise Warning("expected COMP_SYM At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken("="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__GetNextToken():
- raise Warning("expected Component symbol file At Line ", self.FileName, self.CurrentLineNumber)
-
- CompStatementObj.CompSym = self.__Token
-
- if not self.__IsKeyword("COMP_SIZE"):
- raise Warning("expected COMP_SIZE At Line ", self.FileName, self.CurrentLineNumber)
-
- if not self.__IsToken("="):
- raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
-
- if self.__IsToken("-"):
- CompStatementObj.CompSize = self.__Token
- elif self.__GetNextDecimalNumber():
- CompStatementObj.CompSize = self.__Token
- elif self.__GetNextHexNumber():
- CompStatementObj.CompSize = self.__Token
- else:
- raise Warning("Unknown size At line ", self.FileName, self.CurrentLineNumber)
-
- VtfObj.ComponentStatementList.append(CompStatementObj)
- return True
-
- ## __GetFvInFd() method
- #
- # Get FV list contained in FD
- #
- # @param self The object pointer
- # @param FdName FD name
- # @retval FvList list of FV in FD
- #
- def __GetFvInFd (self, FdName):
-
- FvList = []
- if FdName.upper() in self.Profile.FdDict.keys():
- FdObj = self.Profile.FdDict[FdName.upper()]
- for elementRegion in FdObj.RegionList:
- if elementRegion.RegionType == 'FV':
- for elementRegionData in elementRegion.RegionDataList:
- if elementRegionData != None and elementRegionData.upper() not in FvList:
- FvList.append(elementRegionData.upper())
- return FvList
-
- ## __GetReferencedFdFvTuple() method
- #
- # Get FD and FV list referenced by a FFS file
- #
- # @param self The object pointer
- # @param FfsFile contains sections to be searched
- # @param RefFdList referenced FD by section
- # @param RefFvList referenced FV by section
- #
- def __GetReferencedFdFvTuple(self, FvObj, RefFdList = [], RefFvList = []):
-
- for FfsObj in FvObj.FfsList:
- if isinstance(FfsObj, FfsFileStatement.FileStatement):
- if FfsObj.FvName != None and FfsObj.FvName.upper() not in RefFvList:
- RefFvList.append(FfsObj.FvName.upper())
- elif FfsObj.FdName != None and FfsObj.FdName.upper() not in RefFdList:
- RefFdList.append(FfsObj.FdName.upper())
- else:
- self.__GetReferencedFdFvTupleFromSection(FfsObj, RefFdList, RefFvList)
-
- ## __GetReferencedFdFvTupleFromSection() method
- #
- # Get FD and FV list referenced by a FFS section
- #
- # @param self The object pointer
- # @param FfsFile contains sections to be searched
- # @param FdList referenced FD by section
- # @param FvList referenced FV by section
- #
- def __GetReferencedFdFvTupleFromSection(self, FfsFile, FdList = [], FvList = []):
-
- SectionStack = []
- SectionStack.extend(FfsFile.SectionList)
- while SectionStack != []:
- SectionObj = SectionStack.pop()
- if isinstance(SectionObj, FvImageSection.FvImageSection):
- if SectionObj.FvName != None and SectionObj.FvName.upper() not in FvList:
- FvList.append(SectionObj.FvName.upper())
- if SectionObj.Fv != None and SectionObj.Fv.UiFvName != None and SectionObj.Fv.UiFvName.upper() not in FvList:
- FvList.append(SectionObj.Fv.UiFvName.upper())
- self.__GetReferencedFdFvTuple(SectionObj.Fv, FdList, FvList)
-
- if isinstance(SectionObj, CompressSection.CompressSection) or isinstance(SectionObj, GuidSection.GuidSection):
- SectionStack.extend(SectionObj.SectionList)
-
- ## CycleReferenceCheck() method
- #
- # Check whether cycle reference exists in FDF
- #
- # @param self The object pointer
- # @retval True cycle reference exists
- # @retval False Not exists cycle reference
- #
- def CycleReferenceCheck(self):
-
- CycleRefExists = False
-
- try:
- for FvName in self.Profile.FvDict.keys():
- LogStr = "Cycle Reference Checking for FV: %s\n" % FvName
- RefFvStack = []
- RefFvStack.append(FvName)
- FdAnalyzedList = []
-
- while RefFvStack != []:
- FvNameFromStack = RefFvStack.pop()
- if FvNameFromStack.upper() in self.Profile.FvDict.keys():
- FvObj = self.Profile.FvDict[FvNameFromStack.upper()]
- else:
- continue
-
- RefFdList = []
- RefFvList = []
- self.__GetReferencedFdFvTuple(FvObj, RefFdList, RefFvList)
-
- for RefFdName in RefFdList:
- if RefFdName in FdAnalyzedList:
- continue
-
- LogStr += "FD %s is referenced by FV %s\n" % (RefFdName, FvNameFromStack)
- FvInFdList = self.__GetFvInFd(RefFdName)
- if FvInFdList != []:
- LogStr += "FD %s contains FV: " % RefFdName
- for FvObj in FvInFdList:
- LogStr += FvObj
- LogStr += ' \n'
- if FvObj not in RefFvStack:
- RefFvStack.append(FvObj)
-
- if FvName in RefFvStack:
- CycleRefExists = True
- raise Warning(LogStr)
- FdAnalyzedList.append(RefFdName)
-
- for RefFvName in RefFvList:
- LogStr += "FV %s is referenced by FV %s\n" % (RefFvName, FvNameFromStack)
- if RefFvName not in RefFvStack:
- RefFvStack.append(RefFvName)
-
- if FvName in RefFvStack:
- CycleRefExists = True
- raise Warning(LogStr)
-
- except Warning:
- print LogStr
-
- finally:
- return CycleRefExists
-
-if __name__ == "__main__":
- import sys
- try:
- test_file = sys.argv[1]
- except IndexError, v:
- print "Usage: %s filename" % sys.argv[0]
- sys.exit(1)
-
- parser = FdfParser(test_file)
- try:
- parser.ParseFile()
- parser.CycleReferenceCheck()
- except Warning, X:
- print X.message
- else:
- print "Success!"
-
diff --git a/BaseTools/Source/Python/Common/GlobalData.py b/BaseTools/Source/Python/Common/GlobalData.py
deleted file mode 100644
index 667877e907..0000000000
--- a/BaseTools/Source/Python/Common/GlobalData.py
+++ /dev/null
@@ -1,89 +0,0 @@
-## @file
-# This file is used to define common static strings used by INF/DEC/DSC files
-#
-# Copyright (c) 2007 - 2016, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-
-import re
-
-gIsWindows = None
-
-gEdkCompatibilityPkg = "EdkCompatibilityPkg"
-gWorkspace = "."
-gEdkSource = "EdkCompatibilityPkg"
-gEfiSource = "."
-gEcpSource = "EdkCompatibilityPkg"
-
-gOptions = None
-gCaseInsensitive = False
-gAllFiles = None
-gCommand = None
-
-gGlobalDefines = {}
-gPlatformDefines = {}
-# PCD name and value pair for fixed at build and feature flag
-gPlatformPcds = {}
-# PCDs with type that are not fixed at build and feature flag
-gPlatformOtherPcds = {}
-gActivePlatform = None
-gCommandLineDefines = {}
-gEdkGlobal = {}
-gOverrideDir = {}
-gCommandMaxLength = 4096
-# for debug trace purpose when problem occurs
-gProcessingFile = ''
-gBuildingModule = ''
-
-## Regular expression for matching macro used in DSC/DEC/INF file inclusion
-gMacroRefPattern = re.compile("\$\(([A-Z][_A-Z0-9]*)\)", re.UNICODE)
-gMacroDefPattern = re.compile("^(DEFINE|EDK_GLOBAL)[ \t]+")
-gMacroNamePattern = re.compile("^[A-Z][A-Z0-9_]*$")
-# C-style wide string pattern
-gWideStringPattern = re.compile('(\W|\A)L"')
-#
-# A global variable for whether current build in AutoGen phase or not.
-#
-gAutoGenPhase = False
-
-#
-# The Conf dir outside the workspace dir
-#
-gConfDirectory = ''
-
-#
-# The relative default database file path
-#
-gDatabasePath = ".cache/build.db"
-
-#
-# Build flag for binary build
-#
-gIgnoreSource = False
-
-#
-# FDF parser
-#
-gFdfParser = None
-
-#
-# If a module is built more than once with different PCDs or library classes
-# a temporary INF file with same content is created, the temporary file is removed
-# when build exits.
-#
-gTempInfs = []
-
-BuildOptionPcd = []
-
-#
-# Mixed PCD name dict
-#
-MixedPcd = {}
-
-# Pcd name for the Pcd which used in the Conditional directives
-gConditionalPcds = []
diff --git a/BaseTools/Source/Python/Common/Identification.py b/BaseTools/Source/Python/Common/Identification.py
deleted file mode 100644
index f43150dfc1..0000000000
--- a/BaseTools/Source/Python/Common/Identification.py
+++ /dev/null
@@ -1,58 +0,0 @@
-## @file
-# This file is used to define the identification of INF/DEC/DSC files
-#
-# Copyright (c) 2007, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-
-## Identification
-#
-# This class defined basic Identification information structure which is used by INF/DEC/DSC files
-#
-# @param object: Inherited from object class
-#
-# @var FileName: To store data for Filename
-# @var FileFullPath: To store data for full path of the file
-# @var FileRelativePath: To store data for relative path of the file
-# @var RunStatus: Status of build system running
-#
-class Identification(object):
- def __init__(self):
- self.FileName = ''
- self.FileFullPath = ''
- self.FileRelativePath = ''
- self.PackagePath = ''
-
- ## GetFileName
- #
- # Reserved
- #
- def GetFileName(self, FileFullPath, FileRelativePath):
- pass
-
- ## GetFileName
- #
- # Reserved
- #
- def GetFileFullPath(self, FileName, FileRelativePath):
- pass
-
- ## GetFileName
- #
- # Reserved
- #
- def GetFileRelativePath(self, FileName, FileFullPath):
- pass
-
-##
-#
-# This acts like the main() function for the script, unless it is 'import'ed into another
-# script.
-#
-if __name__ == '__main__':
- id = Identification()
diff --git a/BaseTools/Source/Python/Common/InfClassObject.py b/BaseTools/Source/Python/Common/InfClassObject.py
deleted file mode 100644
index f24e4e41a0..0000000000
--- a/BaseTools/Source/Python/Common/InfClassObject.py
+++ /dev/null
@@ -1,1116 +0,0 @@
-## @file
-# This file is used to define each component of INF file
-#
-# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-##
-# Import Modules
-#
-import Common.LongFilePathOs as os
-import re
-import EdkLogger
-from CommonDataClass.CommonClass import LibraryClassClass
-from CommonDataClass.ModuleClass import *
-from String import *
-from DataType import *
-from Identification import *
-from Dictionary import *
-from BuildToolError import *
-from Misc import sdict
-import GlobalData
-from Table.TableInf import TableInf
-import Database
-from Parsing import *
-from Common.LongFilePathSupport import OpenLongFilePath as open
-
-#
-# Global variable
-#
-Section = {TAB_UNKNOWN.upper() : MODEL_UNKNOWN,
- TAB_INF_DEFINES.upper() : MODEL_META_DATA_HEADER,
- TAB_BUILD_OPTIONS.upper() : MODEL_META_DATA_BUILD_OPTION,
- TAB_INCLUDES.upper() : MODEL_EFI_INCLUDE,
- TAB_LIBRARIES.upper() : MODEL_EFI_LIBRARY_INSTANCE,
- TAB_LIBRARY_CLASSES.upper() : MODEL_EFI_LIBRARY_CLASS,
- TAB_PACKAGES.upper() : MODEL_META_DATA_PACKAGE,
- TAB_NMAKE.upper() : MODEL_META_DATA_NMAKE,
- TAB_INF_FIXED_PCD.upper() : MODEL_PCD_FIXED_AT_BUILD,
- TAB_INF_PATCH_PCD.upper() : MODEL_PCD_PATCHABLE_IN_MODULE,
- TAB_INF_FEATURE_PCD.upper() : MODEL_PCD_FEATURE_FLAG,
- TAB_INF_PCD_EX.upper() : MODEL_PCD_DYNAMIC_EX,
- TAB_INF_PCD.upper() : MODEL_PCD_DYNAMIC,
- TAB_SOURCES.upper() : MODEL_EFI_SOURCE_FILE,
- TAB_GUIDS.upper() : MODEL_EFI_GUID,
- TAB_PROTOCOLS.upper() : MODEL_EFI_PROTOCOL,
- TAB_PPIS.upper() : MODEL_EFI_PPI,
- TAB_DEPEX.upper() : MODEL_EFI_DEPEX,
- TAB_BINARIES.upper() : MODEL_EFI_BINARY_FILE,
- TAB_USER_EXTENSIONS.upper() : MODEL_META_DATA_USER_EXTENSION
- }
-
-gComponentType2ModuleType = {
- "LIBRARY" : "BASE",
- "SECURITY_CORE" : "SEC",
- "PEI_CORE" : "PEI_CORE",
- "COMBINED_PEIM_DRIVER" : "PEIM",
- "PIC_PEIM" : "PEIM",
- "RELOCATABLE_PEIM" : "PEIM",
- "PE32_PEIM" : "PEIM",
- "BS_DRIVER" : "DXE_DRIVER",
- "RT_DRIVER" : "DXE_RUNTIME_DRIVER",
- "SAL_RT_DRIVER" : "DXE_SAL_DRIVER",
- "APPLICATION" : "UEFI_APPLICATION",
- "LOGO" : "BASE",
-}
-
-gNmakeFlagPattern = re.compile("(?:EBC_)?([A-Z]+)_(?:STD_|PROJ_|ARCH_)?FLAGS(?:_DLL|_ASL|_EXE)?", re.UNICODE)
-gNmakeFlagName2ToolCode = {
- "C" : "CC",
- "LIB" : "SLINK",
- "LINK" : "DLINK",
-}
-
-class InfHeader(ModuleHeaderClass):
- _Mapping_ = {
- #
- # Required Fields
- #
- TAB_INF_DEFINES_BASE_NAME : "Name",
- TAB_INF_DEFINES_FILE_GUID : "Guid",
- TAB_INF_DEFINES_MODULE_TYPE : "ModuleType",
- TAB_INF_DEFINES_EFI_SPECIFICATION_VERSION : "UefiSpecificationVersion",
- TAB_INF_DEFINES_UEFI_SPECIFICATION_VERSION : "UefiSpecificationVersion",
- TAB_INF_DEFINES_EDK_RELEASE_VERSION : "EdkReleaseVersion",
- #
- # Optional Fields
- #
- TAB_INF_DEFINES_INF_VERSION : "InfVersion",
- TAB_INF_DEFINES_BINARY_MODULE : "BinaryModule",
- TAB_INF_DEFINES_COMPONENT_TYPE : "ComponentType",
- TAB_INF_DEFINES_MAKEFILE_NAME : "MakefileName",
- TAB_INF_DEFINES_BUILD_NUMBER : "BuildNumber",
- TAB_INF_DEFINES_BUILD_TYPE : "BuildType",
- TAB_INF_DEFINES_FFS_EXT : "FfsExt",
- TAB_INF_DEFINES_FV_EXT : "FvExt",
- TAB_INF_DEFINES_SOURCE_FV : "SourceFv",
- TAB_INF_DEFINES_VERSION_NUMBER : "VersionNumber",
- TAB_INF_DEFINES_VERSION_STRING : "VersionString",
- TAB_INF_DEFINES_VERSION : "Version",
- TAB_INF_DEFINES_PCD_IS_DRIVER : "PcdIsDriver",
- TAB_INF_DEFINES_TIANO_EDK_FLASHMAP_H : "TianoEdkFlashMap_h",
- TAB_INF_DEFINES_SHADOW : "Shadow",
-# TAB_INF_DEFINES_LIBRARY_CLASS : "LibraryClass",
-# TAB_INF_DEFINES_ENTRY_POINT : "ExternImages",
-# TAB_INF_DEFINES_UNLOAD_IMAGE : "ExternImages",
-# TAB_INF_DEFINES_CONSTRUCTOR : ,
-# TAB_INF_DEFINES_DESTRUCTOR : ,
-# TAB_INF_DEFINES_DEFINE : "Define",
-# TAB_INF_DEFINES_SPEC : "Specification",
-# TAB_INF_DEFINES_CUSTOM_MAKEFILE : "CustomMakefile",
-# TAB_INF_DEFINES_MACRO :
- }
-
- def __init__(self):
- ModuleHeaderClass.__init__(self)
- self.VersionNumber = ''
- self.VersionString = ''
- #print self.__dict__
- def __setitem__(self, key, value):
- self.__dict__[self._Mapping_[key]] = value
- def __getitem__(self, key):
- return self.__dict__[self._Mapping_[key]]
- ## "in" test support
- def __contains__(self, key):
- return key in self._Mapping_
-
-## InfObject
-#
-# This class defined basic Inf object which is used by inheriting
-#
-# @param object: Inherited from object class
-#
-class InfObject(object):
- def __init__(self):
- object.__init__()
-
-## Inf
-#
-# This class defined the structure used in Inf object
-#
-# @param InfObject: Inherited from InfObject class
-# @param Ffilename: Input value for Ffilename of Inf file, default is None
-# @param IsMergeAllArches: Input value for IsMergeAllArches
-# True is to merge all arches
-# Fales is not to merge all arches
-# default is False
-# @param IsToModule: Input value for IsToModule
-# True is to transfer to ModuleObject automatically
-# False is not to transfer to ModuleObject automatically
-# default is False
-# @param WorkspaceDir: Input value for current workspace directory, default is None
-#
-# @var Identification: To store value for Identification, it is a structure as Identification
-# @var UserExtensions: To store value for UserExtensions
-# @var Module: To store value for Module, it is a structure as ModuleClass
-# @var WorkspaceDir: To store value for WorkspaceDir
-# @var KeyList: To store value for KeyList, a list for all Keys used in Inf
-#
-class Inf(InfObject):
- def __init__(self, Filename=None, IsToDatabase=False, IsToModule=False, WorkspaceDir=None, Database=None, SupArchList=DataType.ARCH_LIST):
- self.Identification = Identification()
- self.Module = ModuleClass()
- self.UserExtensions = ''
- self.WorkspaceDir = WorkspaceDir
- self.SupArchList = SupArchList
- self.IsToDatabase = IsToDatabase
-
- self.Cur = Database.Cur
- self.TblFile = Database.TblFile
- self.TblInf = Database.TblInf
- self.FileID = -1
- #self.TblInf = TableInf(Database.Cur)
-
- self.KeyList = [
- TAB_SOURCES, TAB_BUILD_OPTIONS, TAB_BINARIES, TAB_INCLUDES, TAB_GUIDS,
- TAB_PROTOCOLS, TAB_PPIS, TAB_LIBRARY_CLASSES, TAB_PACKAGES, TAB_LIBRARIES,
- TAB_INF_FIXED_PCD, TAB_INF_PATCH_PCD, TAB_INF_FEATURE_PCD, TAB_INF_PCD,
- TAB_INF_PCD_EX, TAB_DEPEX, TAB_NMAKE, TAB_INF_DEFINES
- ]
- #
- # Upper all KEYs to ignore case sensitive when parsing
- #
- self.KeyList = map(lambda c: c.upper(), self.KeyList)
-
- #
- # Init RecordSet
- #
- self.RecordSet = {}
- for Key in self.KeyList:
- self.RecordSet[Section[Key]] = []
-
- #
- # Load Inf file if filename is not None
- #
- if Filename != None:
- self.LoadInfFile(Filename)
-
- #
- # Transfer to Module Object if IsToModule is True
- #
- if IsToModule:
- self.InfToModule()
-
- ## Transfer to Module Object
- #
- # Transfer all contents of an Inf file to a standard Module Object
- #
- def InfToModule(self):
- #
- # Init global information for the file
- #
- ContainerFile = self.Identification.FileFullPath
-
- #
- # Generate Package Header
- #
- self.GenModuleHeader(ContainerFile)
-
- #
- # Generate BuildOptions
- #
- self.GenBuildOptions(ContainerFile)
-
- #
- # Generate Includes
- #
- self.GenIncludes(ContainerFile)
-
- #
- # Generate Libraries
- #
- self.GenLibraries(ContainerFile)
-
- #
- # Generate LibraryClasses
- #
- self.GenLibraryClasses(ContainerFile)
-
- #
- # Generate Packages
- #
- self.GenPackages(ContainerFile)
-
- #
- # Generate Nmakes
- #
- self.GenNmakes(ContainerFile)
-
- #
- # Generate Pcds
- #
- self.GenPcds(ContainerFile)
-
- #
- # Generate Sources
- #
- self.GenSources(ContainerFile)
-
- #
- # Generate UserExtensions
- #
- self.GenUserExtensions(ContainerFile)
-
- #
- # Generate Guids
- #
- self.GenGuidProtocolPpis(DataType.TAB_GUIDS, ContainerFile)
-
- #
- # Generate Protocols
- #
- self.GenGuidProtocolPpis(DataType.TAB_PROTOCOLS, ContainerFile)
-
- #
- # Generate Ppis
- #
- self.GenGuidProtocolPpis(DataType.TAB_PPIS, ContainerFile)
-
- #
- # Generate Depexes
- #
- self.GenDepexes(ContainerFile)
-
- #
- # Generate Binaries
- #
- self.GenBinaries(ContainerFile)
-
- ## Parse [Defines] section
- #
- # Parse [Defines] section into InfDefines object
- #
- # @param InfFile The path of the INF file
- # @param Section The title of "Defines" section
- # @param Lines The content of "Defines" section
- #
- def ParseDefines(self, InfFile, Section, Lines):
- TokenList = Section.split(TAB_SPLIT)
- if len(TokenList) == 3:
- RaiseParserError(Section, "Defines", InfFile, "[xx.yy.%s] format (with platform) is not supported")
- if len(TokenList) == 2:
- Arch = TokenList[1].upper()
- else:
- Arch = TAB_ARCH_COMMON
-
- if Arch not in self.Defines:
- self.Defines[Arch] = InfDefines()
- GetSingleValueOfKeyFromLines(Lines, self.Defines[Arch].DefinesDictionary,
- TAB_COMMENT_SPLIT, TAB_EQUAL_SPLIT, False, None)
-
- ## Load Inf file
- #
- # Load the file if it exists
- #
- # @param Filename: Input value for filename of Inf file
- #
- def LoadInfFile(self, Filename):
- #
- # Insert a record for file
- #
- Filename = NormPath(Filename)
- self.Identification.FileFullPath = Filename
- (self.Identification.FileRelativePath, self.Identification.FileName) = os.path.split(Filename)
- self.FileID = self.TblFile.InsertFile(Filename, MODEL_FILE_INF)
-
- #
- # Init InfTable
- #
- #self.TblInf.Table = "Inf%s" % self.FileID
- #self.TblInf.Create()
-
- #
- # Init common datas
- #
- IfDefList, SectionItemList, CurrentSection, ArchList, ThirdList, IncludeFiles = \
- [], [], TAB_UNKNOWN, [], [], []
- LineNo = 0
-
- #
- # Parse file content
- #
- IsFindBlockComment = False
- ReservedLine = ''
- for Line in open(Filename, 'r'):
- LineNo = LineNo + 1
- #
- # Remove comment block
- #
- if Line.find(TAB_COMMENT_EDK_START) > -1:
- ReservedLine = GetSplitList(Line, TAB_COMMENT_EDK_START, 1)[0]
- IsFindBlockComment = True
- if Line.find(TAB_COMMENT_EDK_END) > -1:
- Line = ReservedLine + GetSplitList(Line, TAB_COMMENT_EDK_END, 1)[1]
- ReservedLine = ''
- IsFindBlockComment = False
- if IsFindBlockComment:
- continue
-
- #
- # Remove comments at tail and remove spaces again
- #
- Line = CleanString(Line)
- if Line == '':
- continue
-
- #
- # Find a new section tab
- # First insert previous section items
- # And then parse the content of the new section
- #
- if Line.startswith(TAB_SECTION_START) and Line.endswith(TAB_SECTION_END):
- if Line[1:3] == "--":
- continue
- Model = Section[CurrentSection.upper()]
- #
- # Insert items data of previous section
- #
- InsertSectionItemsIntoDatabase(self.TblInf, self.FileID, Filename, Model, CurrentSection, SectionItemList, ArchList, ThirdList, IfDefList, self.RecordSet)
- #
- # Parse the new section
- #
- SectionItemList = []
- ArchList = []
- ThirdList = []
-
- CurrentSection = ''
- LineList = GetSplitValueList(Line[len(TAB_SECTION_START):len(Line) - len(TAB_SECTION_END)], TAB_COMMA_SPLIT)
- for Item in LineList:
- ItemList = GetSplitValueList(Item, TAB_SPLIT)
- if CurrentSection == '':
- CurrentSection = ItemList[0]
- else:
- if CurrentSection != ItemList[0]:
- EdkLogger.error("Parser", PARSER_ERROR, "Different section names '%s' and '%s' are found in one section definition, this is not allowed." % (CurrentSection, ItemList[0]), File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
- if CurrentSection.upper() not in self.KeyList:
- RaiseParserError(Line, CurrentSection, Filename, '', LineNo)
- CurrentSection = TAB_UNKNOWN
- continue
- ItemList.append('')
- ItemList.append('')
- if len(ItemList) > 5:
- RaiseParserError(Line, CurrentSection, Filename, '', LineNo)
- else:
- if ItemList[1] != '' and ItemList[1].upper() not in ARCH_LIST_FULL:
- EdkLogger.error("Parser", PARSER_ERROR, "Invalid Arch definition '%s' found" % ItemList[1], File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
- ArchList.append(ItemList[1].upper())
- ThirdList.append(ItemList[2])
-
- continue
-
- #
- # Not in any defined section
- #
- if CurrentSection == TAB_UNKNOWN:
- ErrorMsg = "%s is not in any defined section" % Line
- EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg, File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
-
- #
- # Add a section item
- #
- SectionItemList.append([Line, LineNo])
- # End of parse
- #End of For
-
- #
- # Insert items data of last section
- #
- Model = Section[CurrentSection.upper()]
- InsertSectionItemsIntoDatabase(self.TblInf, self.FileID, Filename, Model, CurrentSection, SectionItemList, ArchList, ThirdList, IfDefList, self.RecordSet)
-
- #
- # Replace all DEFINE macros with its actual values
- #
- ParseDefineMacro2(self.TblInf, self.RecordSet, GlobalData.gGlobalDefines)
-
- ## Show detailed information of Module
- #
- # Print all members and their values of Module class
- #
- def ShowModule(self):
- M = self.Module
- for Arch in M.Header.keys():
- print '\nArch =', Arch
- print 'Filename =', M.Header[Arch].FileName
- print 'FullPath =', M.Header[Arch].FullPath
- print 'BaseName =', M.Header[Arch].Name
- print 'Guid =', M.Header[Arch].Guid
- print 'Version =', M.Header[Arch].Version
- print 'InfVersion =', M.Header[Arch].InfVersion
- print 'UefiSpecificationVersion =', M.Header[Arch].UefiSpecificationVersion
- print 'EdkReleaseVersion =', M.Header[Arch].EdkReleaseVersion
- print 'ModuleType =', M.Header[Arch].ModuleType
- print 'BinaryModule =', M.Header[Arch].BinaryModule
- print 'ComponentType =', M.Header[Arch].ComponentType
- print 'MakefileName =', M.Header[Arch].MakefileName
- print 'BuildNumber =', M.Header[Arch].BuildNumber
- print 'BuildType =', M.Header[Arch].BuildType
- print 'FfsExt =', M.Header[Arch].FfsExt
- print 'FvExt =', M.Header[Arch].FvExt
- print 'SourceFv =', M.Header[Arch].SourceFv
- print 'PcdIsDriver =', M.Header[Arch].PcdIsDriver
- print 'TianoEdkFlashMap_h =', M.Header[Arch].TianoEdkFlashMap_h
- print 'Shadow =', M.Header[Arch].Shadow
- print 'LibraryClass =', M.Header[Arch].LibraryClass
- for Item in M.Header[Arch].LibraryClass:
- print Item.LibraryClass, DataType.TAB_VALUE_SPLIT.join(Item.SupModuleList)
- print 'CustomMakefile =', M.Header[Arch].CustomMakefile
- print 'Define =', M.Header[Arch].Define
- print 'Specification =', M.Header[Arch].Specification
- for Item in self.Module.ExternImages:
- print '\nEntry_Point = %s, UnloadImage = %s' % (Item.ModuleEntryPoint, Item.ModuleUnloadImage)
- for Item in self.Module.ExternLibraries:
- print 'Constructor = %s, Destructor = %s' % (Item.Constructor, Item.Destructor)
- print '\nBuildOptions =', M.BuildOptions
- for Item in M.BuildOptions:
- print Item.ToolChainFamily, Item.ToolChain, Item.Option, Item.SupArchList
- print '\nIncludes =', M.Includes
- for Item in M.Includes:
- print Item.FilePath, Item.SupArchList
- print '\nLibraries =', M.Libraries
- for Item in M.Libraries:
- print Item.Library, Item.SupArchList
- print '\nLibraryClasses =', M.LibraryClasses
- for Item in M.LibraryClasses:
- print Item.LibraryClass, Item.RecommendedInstance, Item.FeatureFlag, Item.SupModuleList, Item.SupArchList, Item.Define
- print '\nPackageDependencies =', M.PackageDependencies
- for Item in M.PackageDependencies:
- print Item.FilePath, Item.SupArchList, Item.FeatureFlag
- print '\nNmake =', M.Nmake
- for Item in M.Nmake:
- print Item.Name, Item.Value, Item.SupArchList
- print '\nPcds =', M.PcdCodes
- for Item in M.PcdCodes:
- print '\tCName=', Item.CName, 'TokenSpaceGuidCName=', Item.TokenSpaceGuidCName, 'DefaultValue=', Item.DefaultValue, 'ItemType=', Item.ItemType, Item.SupArchList
- print '\nSources =', M.Sources
- for Source in M.Sources:
- print Source.SourceFile, 'Fam=', Source.ToolChainFamily, 'Pcd=', Source.FeatureFlag, 'Tag=', Source.TagName, 'ToolCode=', Source.ToolCode, Source.SupArchList
- print '\nUserExtensions =', M.UserExtensions
- for UserExtension in M.UserExtensions:
- print UserExtension.UserID, UserExtension.Identifier, UserExtension.Content
- print '\nGuids =', M.Guids
- for Item in M.Guids:
- print Item.CName, Item.SupArchList, Item.FeatureFlag
- print '\nProtocols =', M.Protocols
- for Item in M.Protocols:
- print Item.CName, Item.SupArchList, Item.FeatureFlag
- print '\nPpis =', M.Ppis
- for Item in M.Ppis:
- print Item.CName, Item.SupArchList, Item.FeatureFlag
- print '\nDepex =', M.Depex
- for Item in M.Depex:
- print Item.Depex, Item.SupArchList, Item.Define
- print '\nBinaries =', M.Binaries
- for Binary in M.Binaries:
- print 'Type=', Binary.FileType, 'Target=', Binary.Target, 'Name=', Binary.BinaryFile, 'FeatureFlag=', Binary.FeatureFlag, 'SupArchList=', Binary.SupArchList
-
- ## Convert [Defines] section content to ModuleHeaderClass
- #
- # Convert [Defines] section content to ModuleHeaderClass
- #
- # @param Defines The content under [Defines] section
- # @param ModuleHeader An object of ModuleHeaderClass
- # @param Arch The supported ARCH
- #
- def GenModuleHeader(self, ContainerFile):
- EdkLogger.debug(2, "Generate ModuleHeader ...")
- File = self.Identification.FileFullPath
- #
- # Update all defines item in database
- #
- RecordSet = self.RecordSet[MODEL_META_DATA_HEADER]
- for Record in RecordSet:
- ValueList = GetSplitValueList(Record[0], TAB_EQUAL_SPLIT)
- if len(ValueList) != 2:
- RaiseParserError(Record[0], 'Defines', ContainerFile, '<Key> = <Value>', Record[2])
- ID, Value1, Value2, Arch, LineNo = Record[3], ValueList[0], ValueList[1], Record[1], Record[2]
- SqlCommand = """update %s set Value1 = '%s', Value2 = '%s'
- where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(Value1), ConvertToSqlString2(Value2), ID)
- self.TblInf.Exec(SqlCommand)
-
- for Arch in DataType.ARCH_LIST:
- ModuleHeader = InfHeader()
- ModuleHeader.FileName = self.Identification.FileName
- ModuleHeader.FullPath = self.Identification.FileFullPath
- DefineList = QueryDefinesItem2(self.TblInf, Arch, self.FileID)
-
- NotProcessedDefineList = []
- for D in DefineList:
- if D[0] in ModuleHeader:
- ModuleHeader[D[0]] = GetSplitValueList(D[1])[0]
- else:
- NotProcessedDefineList.append(D)
-
- if ModuleHeader.ComponentType == "LIBRARY":
- Lib = LibraryClassClass()
- Lib.LibraryClass = ModuleHeader.Name
- Lib.SupModuleList = DataType.SUP_MODULE_LIST
- ModuleHeader.LibraryClass.append(Lib)
-
- # we need to make some key defines resolved first
- for D in NotProcessedDefineList:
- if D[0] == TAB_INF_DEFINES_LIBRARY_CLASS:
- List = GetSplitValueList(D[1], DataType.TAB_VALUE_SPLIT, 1)
- Lib = LibraryClassClass()
- Lib.LibraryClass = CleanString(List[0])
- if len(List) == 1:
- Lib.SupModuleList = DataType.SUP_MODULE_LIST
- elif len(List) == 2:
- Lib.SupModuleList = GetSplitValueList(CleanString(List[1]), ' ')
- ModuleHeader.LibraryClass.append(Lib)
- elif D[0] == TAB_INF_DEFINES_CUSTOM_MAKEFILE:
- List = D[1].split(DataType.TAB_VALUE_SPLIT)
- if len(List) == 2:
- ModuleHeader.CustomMakefile[CleanString(List[0])] = CleanString(List[1])
- else:
- RaiseParserError(D[1], 'CUSTOM_MAKEFILE of Defines', File, 'CUSTOM_MAKEFILE=<Family>|<Filename>', D[2])
- elif D[0] == TAB_INF_DEFINES_ENTRY_POINT:
- Image = ModuleExternImageClass()
- Image.ModuleEntryPoint = CleanString(D[1])
- self.Module.ExternImages.append(Image)
- elif D[0] == TAB_INF_DEFINES_UNLOAD_IMAGE:
- Image = ModuleExternImageClass()
- Image.ModuleUnloadImage = CleanString(D[1])
- self.Module.ExternImages.append(Image)
- elif D[0] == TAB_INF_DEFINES_CONSTRUCTOR:
- LibraryClass = ModuleExternLibraryClass()
- LibraryClass.Constructor = CleanString(D[1])
- self.Module.ExternLibraries.append(LibraryClass)
- elif D[0] == TAB_INF_DEFINES_DESTRUCTOR:
- LibraryClass = ModuleExternLibraryClass()
- LibraryClass.Destructor = CleanString(D[1])
- self.Module.ExternLibraries.append(LibraryClass)
- elif D[0] == TAB_INF_DEFINES_DEFINE:
- List = D[1].split(DataType.TAB_EQUAL_SPLIT)
- if len(List) != 2:
- RaiseParserError(Item, 'DEFINE of Defines', File, 'DEFINE <Word> = <Word>', D[2])
- else:
- ModuleHeader.Define[CleanString(List[0])] = CleanString(List[1])
- elif D[0] == TAB_INF_DEFINES_SPEC:
- List = D[1].split(DataType.TAB_EQUAL_SPLIT)
- if len(List) != 2:
- RaiseParserError(Item, 'SPEC of Defines', File, 'SPEC <Word> = <Version>', D[2])
- else:
- ModuleHeader.Specification[CleanString(List[0])] = CleanString(List[1])
-
- #
- # Get version of INF
- #
- if ModuleHeader.InfVersion != "":
- # EdkII inf
- VersionNumber = ModuleHeader.VersionNumber
- VersionString = ModuleHeader.VersionString
- if len(VersionNumber) > 0 and len(VersionString) == 0:
- EdkLogger.warn(2000, 'VERSION_NUMBER depricated; INF file %s should be modified to use VERSION_STRING instead.' % self.Identification.FileFullPath)
- ModuleHeader.Version = VersionNumber
- if len(VersionString) > 0:
- if len(VersionNumber) > 0:
- EdkLogger.warn(2001, 'INF file %s defines both VERSION_NUMBER and VERSION_STRING, using VERSION_STRING' % self.Identification.FileFullPath)
- ModuleHeader.Version = VersionString
- else:
- # Edk inf
- ModuleHeader.InfVersion = "0x00010000"
- if ModuleHeader.ComponentType in gComponentType2ModuleType:
- ModuleHeader.ModuleType = gComponentType2ModuleType[ModuleHeader.ComponentType]
- elif ModuleHeader.ComponentType != '':
- EdkLogger.error("Parser", PARSER_ERROR, "Unsupported Edk component type [%s]" % ModuleHeader.ComponentType, ExtraData=File, RaiseError=EdkLogger.IsRaiseError)
-
- self.Module.Header[Arch] = ModuleHeader
-
-
- ## GenBuildOptions
- #
- # Gen BuildOptions of Inf
- # [<Family>:]<ToolFlag>=Flag
- #
- # @param ContainerFile: The Inf file full path
- #
- def GenBuildOptions(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_BUILD_OPTIONS)
- BuildOptions = {}
- #
- # Get all BuildOptions
- #
- RecordSet = self.RecordSet[MODEL_META_DATA_BUILD_OPTION]
-
- #
- # Go through each arch
- #
- for Arch in self.SupArchList:
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- (Family, ToolChain, Flag) = GetBuildOption(Record[0], ContainerFile, Record[2])
- MergeArches(BuildOptions, (Family, ToolChain, Flag), Arch)
- #
- # Update to Database
- #
- if self.IsToDatabase:
- SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s'
- where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(Family), ConvertToSqlString2(ToolChain), ConvertToSqlString2(Flag), Record[3])
- self.TblInf.Exec(SqlCommand)
-
- for Key in BuildOptions.keys():
- BuildOption = BuildOptionClass(Key[0], Key[1], Key[2])
- BuildOption.SupArchList = BuildOptions[Key]
- self.Module.BuildOptions.append(BuildOption)
-
- ## GenIncludes
- #
- # Gen Includes of Inf
- #
- #
- # @param ContainerFile: The Inf file full path
- #
- def GenIncludes(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_INCLUDES)
- Includes = sdict()
- #
- # Get all Includes
- #
- RecordSet = self.RecordSet[MODEL_EFI_INCLUDE]
-
- #
- # Go through each arch
- #
- for Arch in self.SupArchList:
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- MergeArches(Includes, Record[0], Arch)
-
- for Key in Includes.keys():
- Include = IncludeClass()
- Include.FilePath = NormPath(Key)
- Include.SupArchList = Includes[Key]
- self.Module.Includes.append(Include)
-
- ## GenLibraries
- #
- # Gen Libraries of Inf
- #
- #
- # @param ContainerFile: The Inf file full path
- #
- def GenLibraries(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_LIBRARIES)
- Libraries = sdict()
- #
- # Get all Includes
- #
- RecordSet = self.RecordSet[MODEL_EFI_LIBRARY_INSTANCE]
-
- #
- # Go through each arch
- #
- for Arch in self.SupArchList:
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- MergeArches(Libraries, Record[0], Arch)
-
- for Key in Libraries.keys():
- Library = ModuleLibraryClass()
- # replace macro and remove file extension
- Library.Library = Key.rsplit('.', 1)[0]
- Library.SupArchList = Libraries[Key]
- self.Module.Libraries.append(Library)
-
- ## GenLibraryClasses
- #
- # Get LibraryClass of Inf
- # <LibraryClassKeyWord>|<LibraryInstance>
- #
- # @param ContainerFile: The Inf file full path
- #
- def GenLibraryClasses(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_LIBRARY_CLASSES)
- LibraryClasses = {}
- #
- # Get all LibraryClasses
- #
- RecordSet = self.RecordSet[MODEL_EFI_LIBRARY_CLASS]
-
- #
- # Go through each arch
- #
- for Arch in self.SupArchList:
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- (LibClassName, LibClassIns, Pcd, SupModelList) = GetLibraryClassOfInf([Record[0], Record[4]], ContainerFile, self.WorkspaceDir, Record[2])
- MergeArches(LibraryClasses, (LibClassName, LibClassIns, Pcd, SupModelList), Arch)
- #
- # Update to Database
- #
- if self.IsToDatabase:
- SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s'
- where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(LibClassName), ConvertToSqlString2(LibClassIns), ConvertToSqlString2(SupModelList), Record[3])
- self.TblInf.Exec(SqlCommand)
-
- for Key in LibraryClasses.keys():
- KeyList = Key[0].split(DataType.TAB_VALUE_SPLIT)
- LibraryClass = LibraryClassClass()
- LibraryClass.LibraryClass = Key[0]
- LibraryClass.RecommendedInstance = NormPath(Key[1])
- LibraryClass.FeatureFlag = Key[2]
- LibraryClass.SupArchList = LibraryClasses[Key]
- LibraryClass.SupModuleList = GetSplitValueList(Key[3])
- self.Module.LibraryClasses.append(LibraryClass)
-
- ## GenPackages
- #
- # Gen Packages of Inf
- #
- #
- # @param ContainerFile: The Inf file full path
- #
- def GenPackages(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_PACKAGES)
- Packages = {}
- #
- # Get all Packages
- #
- RecordSet = self.RecordSet[MODEL_META_DATA_PACKAGE]
-
- #
- # Go through each arch
- #
- for Arch in self.SupArchList:
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- (Package, Pcd) = GetPackage(Record[0], ContainerFile, self.WorkspaceDir, Record[2])
- MergeArches(Packages, (Package, Pcd), Arch)
- if self.IsToDatabase:
- SqlCommand = """update %s set Value1 = '%s', Value2 = '%s'
- where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(Package), ConvertToSqlString2(Pcd), Record[3])
- self.TblInf.Exec(SqlCommand)
-
-
- for Key in Packages.keys():
- Package = ModulePackageDependencyClass()
- Package.FilePath = NormPath(Key[0])
- Package.SupArchList = Packages[Key]
- Package.FeatureFlag = Key[1]
- self.Module.PackageDependencies.append(Package)
-
- ## GenNmakes
- #
- # Gen Nmakes of Inf
- #
- #
- # @param ContainerFile: The Inf file full path
- #
- def GenNmakes(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_NMAKE)
- Nmakes = sdict()
- #
- # Get all Nmakes
- #
- RecordSet = self.RecordSet[MODEL_META_DATA_NMAKE]
-
-
- #
- # Go through each arch
- #
- for Arch in self.SupArchList:
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- MergeArches(Nmakes, Record[0], Arch)
-
- for Key in Nmakes.keys():
- List = GetSplitValueList(Key, DataType.TAB_EQUAL_SPLIT, MaxSplit=1)
- if len(List) != 2:
- RaiseParserError(Key, 'Nmake', ContainerFile, '<MacroName> = <Value>')
- continue
- Nmake = ModuleNmakeClass()
- Nmake.Name = List[0]
- Nmake.Value = List[1]
- Nmake.SupArchList = Nmakes[Key]
- self.Module.Nmake.append(Nmake)
-
- # convert Edk format to EdkII format
- if Nmake.Name == "IMAGE_ENTRY_POINT":
- Image = ModuleExternImageClass()
- Image.ModuleEntryPoint = Nmake.Value
- self.Module.ExternImages.append(Image)
- elif Nmake.Name == "DPX_SOURCE":
- Source = ModuleSourceFileClass(NormPath(Nmake.Value), "", "", "", "", Nmake.SupArchList)
- self.Module.Sources.append(Source)
- else:
- ToolList = gNmakeFlagPattern.findall(Nmake.Name)
- if len(ToolList) == 0 or len(ToolList) != 1:
- EdkLogger.warn("\nParser", "Don't know how to do with MACRO: %s" % Nmake.Name,
- ExtraData=ContainerFile)
- else:
- if ToolList[0] in gNmakeFlagName2ToolCode:
- Tool = gNmakeFlagName2ToolCode[ToolList[0]]
- else:
- Tool = ToolList[0]
- BuildOption = BuildOptionClass("MSFT", "*_*_*_%s_FLAGS" % Tool, Nmake.Value)
- BuildOption.SupArchList = Nmake.SupArchList
- self.Module.BuildOptions.append(BuildOption)
-
- ## GenPcds
- #
- # Gen Pcds of Inf
- # <TokenSpaceGuidCName>.<PcdCName>[|<Value>]
- #
- # @param ContainerFile: The Dec file full path
- #
- def GenPcds(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_PCDS)
- Pcds = {}
- PcdToken = {}
-
- #
- # Get all Guids
- #
- RecordSet1 = self.RecordSet[MODEL_PCD_FIXED_AT_BUILD]
- RecordSet2 = self.RecordSet[MODEL_PCD_PATCHABLE_IN_MODULE]
- RecordSet3 = self.RecordSet[MODEL_PCD_FEATURE_FLAG]
- RecordSet4 = self.RecordSet[MODEL_PCD_DYNAMIC_EX]
- RecordSet5 = self.RecordSet[MODEL_PCD_DYNAMIC]
-
- #
- # Go through each arch
- #
- for Arch in self.SupArchList:
- for Record in RecordSet1:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- if self.Module.Header[Arch].LibraryClass != {}:
- pass
- (TokenGuidCName, TokenName, Value, Type) = GetPcdOfInf(Record[0], TAB_PCDS_FIXED_AT_BUILD, ContainerFile, Record[2])
- MergeArches(Pcds, (TokenGuidCName, TokenName, Value, Type), Arch)
- PcdToken[Record[3]] = (TokenGuidCName, TokenName)
- for Record in RecordSet2:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- (TokenGuidCName, TokenName, Value, Type) = GetPcdOfInf(Record[0], TAB_PCDS_PATCHABLE_IN_MODULE, ContainerFile, Record[2])
- MergeArches(Pcds, (TokenGuidCName, TokenName, Value, Type), Arch)
- PcdToken[Record[3]] = (TokenGuidCName, TokenName)
- for Record in RecordSet3:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- (TokenGuidCName, TokenName, Value, Type) = GetPcdOfInf(Record[0], TAB_PCDS_FEATURE_FLAG, ContainerFile, Record[2])
- MergeArches(Pcds, (TokenGuidCName, TokenName, Value, Type), Arch)
- PcdToken[Record[3]] = (TokenGuidCName, TokenName)
- for Record in RecordSet4:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- (TokenGuidCName, TokenName, Value, Type) = GetPcdOfInf(Record[0], TAB_PCDS_DYNAMIC_EX, ContainerFile, Record[2])
- MergeArches(Pcds, (TokenGuidCName, TokenName, Value, Type), Arch)
- PcdToken[Record[3]] = (TokenGuidCName, TokenName)
- for Record in RecordSet5:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- (TokenGuidCName, TokenName, Value, Type) = GetPcdOfInf(Record[0], "", ContainerFile, Record[2])
- MergeArches(Pcds, (TokenGuidCName, TokenName, Value, Type), Arch)
- PcdToken[Record[3]] = (TokenGuidCName, TokenName)
- #
- # Update to database
- #
- if self.IsToDatabase:
- for Key in PcdToken.keys():
- SqlCommand = """update %s set Value2 = '%s' where ID = %s""" % (self.TblInf.Table, ".".join((PcdToken[Key][0], PcdToken[Key][1])), Key)
- self.TblInf.Exec(SqlCommand)
-
- for Key in Pcds.keys():
- Pcd = PcdClass()
- Pcd.CName = Key[1]
- Pcd.TokenSpaceGuidCName = Key[0]
- Pcd.DefaultValue = Key[2]
- Pcd.ItemType = Key[3]
- Pcd.SupArchList = Pcds[Key]
- self.Module.PcdCodes.append(Pcd)
-
- ## GenSources
- #
- # Gen Sources of Inf
- # <Filename>[|<Family>[|<TagName>[|<ToolCode>[|<PcdFeatureFlag>]]]]
- #
- # @param ContainerFile: The Dec file full path
- #
- def GenSources(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_SOURCES)
- Sources = {}
-
- #
- # Get all Nmakes
- #
- RecordSet = self.RecordSet[MODEL_EFI_SOURCE_FILE]
-
- #
- # Go through each arch
- #
- for Arch in self.SupArchList:
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- (Filename, Family, TagName, ToolCode, Pcd) = GetSource(Record[0], ContainerFile, self.Identification.FileRelativePath, Record[2])
- MergeArches(Sources, (Filename, Family, TagName, ToolCode, Pcd), Arch)
- if self.IsToDatabase:
- SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s', Value4 = '%s', Value5 = '%s'
- where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(Filename), ConvertToSqlString2(Family), ConvertToSqlString2(TagName), ConvertToSqlString2(ToolCode), ConvertToSqlString2(Pcd), Record[3])
- self.TblInf.Exec(SqlCommand)
-
- for Key in Sources.keys():
- Source = ModuleSourceFileClass(Key[0], Key[2], Key[3], Key[1], Key[4], Sources[Key])
- self.Module.Sources.append(Source)
-
- ## GenUserExtensions
- #
- # Gen UserExtensions of Inf
- #
- def GenUserExtensions(self, ContainerFile):
-# #
-# # UserExtensions
-# #
-# if self.UserExtensions != '':
-# UserExtension = UserExtensionsClass()
-# Lines = self.UserExtensions.splitlines()
-# List = GetSplitValueList(Lines[0], DataType.TAB_SPLIT, 2)
-# if len(List) != 3:
-# RaiseParserError(Lines[0], 'UserExtensions', File, "UserExtensions.UserId.'Identifier'")
-# else:
-# UserExtension.UserID = List[1]
-# UserExtension.Identifier = List[2][0:-1].replace("'", '').replace('\"', '')
-# for Line in Lines[1:]:
-# UserExtension.Content = UserExtension.Content + CleanString(Line) + '\n'
-# self.Module.UserExtensions.append(UserExtension)
- pass
-
- ## GenDepexes
- #
- # Gen Depex of Inf
- #
- # @param ContainerFile: The Inf file full path
- #
- def GenDepexes(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_DEPEX)
- Depex = {}
- #
- # Get all Depexes
- #
- RecordSet = self.RecordSet[MODEL_EFI_DEPEX]
-
- #
- # Go through each arch
- #
- for Arch in self.SupArchList:
- Line = ''
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- Line = Line + Record[0] + ' '
- if Line != '':
- MergeArches(Depex, Line, Arch)
-
- for Key in Depex.keys():
- Dep = ModuleDepexClass()
- Dep.Depex = Key
- Dep.SupArchList = Depex[Key]
- self.Module.Depex.append(Dep)
-
- ## GenBinaries
- #
- # Gen Binary of Inf
- # <FileType>|<Filename>|<Target>[|<TokenSpaceGuidCName>.<PcdCName>]
- #
- # @param ContainerFile: The Dec file full path
- #
- def GenBinaries(self, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % TAB_BINARIES)
- Binaries = {}
-
- #
- # Get all Guids
- #
- RecordSet = self.RecordSet[MODEL_EFI_BINARY_FILE]
-
- #
- # Go through each arch
- #
- for Arch in self.SupArchList:
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- (FileType, Filename, Target, Pcd) = GetBinary(Record[0], ContainerFile, self.Identification.FileRelativePath, Record[2])
- MergeArches(Binaries, (FileType, Filename, Target, Pcd), Arch)
- if self.IsToDatabase:
- SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s', Value4 = '%s'
- where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(FileType), ConvertToSqlString2(Filename), ConvertToSqlString2(Target), ConvertToSqlString2(Pcd), Record[3])
- self.TblInf.Exec(SqlCommand)
-
- for Key in Binaries.keys():
- Binary = ModuleBinaryFileClass(NormPath(Key[1]), Key[0], Key[2], Key[3], Binaries[Key])
- self.Module.Binaries.append(Binary)
-
- ## GenGuids
- #
- # Gen Guids of Inf
- # <CName>=<GuidValue>
- #
- # @param ContainerFile: The Inf file full path
- #
- def GenGuidProtocolPpis(self, Type, ContainerFile):
- EdkLogger.debug(2, "Generate %s ..." % Type)
- Lists = {}
- #
- # Get all Items
- #
- RecordSet = self.RecordSet[Section[Type.upper()]]
-
- #
- # Go through each arch
- #
- for Arch in self.SupArchList:
- for Record in RecordSet:
- if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
- (Name, Value) = GetGuidsProtocolsPpisOfInf(Record[0], Type, ContainerFile, Record[2])
- MergeArches(Lists, (Name, Value), Arch)
- if self.IsToDatabase:
- SqlCommand = """update %s set Value1 = '%s', Value2 = '%s'
- where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(Name), ConvertToSqlString2(Value), Record[3])
- self.TblInf.Exec(SqlCommand)
-
- ListMember = None
- if Type == TAB_GUIDS:
- ListMember = self.Module.Guids
- elif Type == TAB_PROTOCOLS:
- ListMember = self.Module.Protocols
- elif Type == TAB_PPIS:
- ListMember = self.Module.Ppis
-
- for Key in Lists.keys():
- ListClass = GuidProtocolPpiCommonClass()
- ListClass.CName = Key[0]
- ListClass.SupArchList = Lists[Key]
- ListClass.FeatureFlag = Key[1]
- ListMember.append(ListClass)
-
-##
-#
-# This acts like the main() function for the script, unless it is 'import'ed into another
-# script.
-#
-if __name__ == '__main__':
- EdkLogger.Initialize()
- EdkLogger.SetLevel(EdkLogger.DEBUG_0)
-
- W = os.getenv('WORKSPACE')
- F = os.path.join(W, 'MdeModulePkg/Application/HelloWorld/HelloWorld.inf')
-
- Db = Database.Database('Inf.db')
- Db.InitDatabase()
-
- P = Inf(os.path.normpath(F), True, True, W, Db)
- P.ShowModule()
-
- Db.Close()
diff --git a/BaseTools/Source/Python/Common/LongFilePathOs.py b/BaseTools/Source/Python/Common/LongFilePathOs.py
deleted file mode 100644
index 2e530f9dd7..0000000000
--- a/BaseTools/Source/Python/Common/LongFilePathOs.py
+++ /dev/null
@@ -1,73 +0,0 @@
-## @file
-# Override built in module os to provide support for long file path
-#
-# Copyright (c) 2014, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-import os
-import LongFilePathOsPath
-from Common.LongFilePathSupport import LongFilePath
-from Common.LongFilePathSupport import UniToStr
-
-path = LongFilePathOsPath
-
-def access(path, mode):
- return os.access(LongFilePath(path), mode)
-
-def remove(path):
- return os.remove(LongFilePath(path))
-
-def removedirs(name):
- return os.removedirs(LongFilePath(name))
-
-def rmdir(path):
- return os.rmdir(LongFilePath(path))
-
-def mkdir(path):
- return os.mkdir(LongFilePath(path))
-
-def makedirs(name, mode=0777):
- return os.makedirs(LongFilePath(name), mode)
-
-def rename(old, new):
- return os.rename(LongFilePath(old), LongFilePath(new))
-
-def chdir(path):
- return os.chdir(LongFilePath(path))
-
-def chmod(path, mode):
- return os.chmod(LongFilePath(path), mode)
-
-def stat(path):
- return os.stat(LongFilePath(path))
-
-def utime(path, times):
- return os.utime(LongFilePath(path), times)
-
-def listdir(path):
- List = []
- uList = os.listdir(u"%s" % LongFilePath(path))
- for Item in uList:
- List.append(UniToStr(Item))
- return List
-
-environ = os.environ
-getcwd = os.getcwd
-chdir = os.chdir
-walk = os.walk
-W_OK = os.W_OK
-F_OK = os.F_OK
-sep = os.sep
-linesep = os.linesep
-getenv = os.getenv
-pathsep = os.pathsep
-name = os.name
-SEEK_SET = os.SEEK_SET
-SEEK_END = os.SEEK_END
diff --git a/BaseTools/Source/Python/Common/LongFilePathOsPath.py b/BaseTools/Source/Python/Common/LongFilePathOsPath.py
deleted file mode 100644
index 0bba446419..0000000000
--- a/BaseTools/Source/Python/Common/LongFilePathOsPath.py
+++ /dev/null
@@ -1,53 +0,0 @@
-## @file
-# Override built in module os.path to provide support for long file path
-#
-# Copyright (c) 2014, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-import os
-from Common.LongFilePathSupport import LongFilePath
-
-def isfile(path):
- return os.path.isfile(LongFilePath(path))
-
-def isdir(path):
- return os.path.isdir(LongFilePath(path))
-
-def exists(path):
- return os.path.exists(LongFilePath(path))
-
-def getsize(filename):
- return os.path.getsize(LongFilePath(filename))
-
-def getmtime(filename):
- return os.path.getmtime(LongFilePath(filename))
-
-def getatime(filename):
- return os.path.getatime(LongFilePath(filename))
-
-def getctime(filename):
- return os.path.getctime(LongFilePath(filename))
-
-join = os.path.join
-splitext = os.path.splitext
-splitdrive = os.path.splitdrive
-split = os.path.split
-abspath = os.path.abspath
-basename = os.path.basename
-commonprefix = os.path.commonprefix
-sep = os.path.sep
-normpath = os.path.normpath
-normcase = os.path.normcase
-dirname = os.path.dirname
-islink = os.path.islink
-isabs = os.path.isabs
-realpath = os.path.realpath
-relpath = os.path.relpath
-pardir = os.path.pardir
diff --git a/BaseTools/Source/Python/Common/LongFilePathSupport.py b/BaseTools/Source/Python/Common/LongFilePathSupport.py
deleted file mode 100644
index b3e3c8ea64..0000000000
--- a/BaseTools/Source/Python/Common/LongFilePathSupport.py
+++ /dev/null
@@ -1,63 +0,0 @@
-## @file
-# Override built in function file.open to provide support for long file path
-#
-# Copyright (c) 2014 - 2015, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-import os
-import platform
-import shutil
-import codecs
-
-##
-# OpenLongPath
-# Convert a file path to a long file path
-#
-def LongFilePath(FileName):
- FileName = os.path.normpath(FileName)
- if platform.system() == 'Windows':
- if FileName.startswith('\\\\?\\'):
- return FileName
- if FileName.startswith('\\\\'):
- return '\\\\?\\UNC\\' + FileName[2:]
- if os.path.isabs(FileName):
- return '\\\\?\\' + FileName
- return FileName
-
-##
-# OpenLongFilePath
-# wrap open to support opening a long file path
-#
-def OpenLongFilePath(FileName, Mode='r', Buffer= -1):
- return open(LongFilePath(FileName), Mode, Buffer)
-
-def CodecOpenLongFilePath(Filename, Mode='rb', Encoding=None, Errors='strict', Buffering=1):
- return codecs.open(LongFilePath(Filename), Mode, Encoding, Errors, Buffering)
-
-##
-# CopyLongFilePath
-# wrap copyfile to support copy a long file path
-#
-def CopyLongFilePath(src, dst):
- with open(LongFilePath(src), 'rb') as fsrc:
- with open(LongFilePath(dst), 'wb') as fdst:
- shutil.copyfileobj(fsrc, fdst)
-
-## Convert a python unicode string to a normal string
-#
-# Convert a python unicode string to a normal string
-# UniToStr(u'I am a string') is 'I am a string'
-#
-# @param Uni: The python unicode string
-#
-# @retval: The formatted normal string
-#
-def UniToStr(Uni):
- return repr(Uni)[2:-1]
diff --git a/BaseTools/Source/Python/Common/MigrationUtilities.py b/BaseTools/Source/Python/Common/MigrationUtilities.py
deleted file mode 100644
index e9f1cabcb7..0000000000
--- a/BaseTools/Source/Python/Common/MigrationUtilities.py
+++ /dev/null
@@ -1,568 +0,0 @@
-## @file
-# Contains several utilitities shared by migration tools.
-#
-# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-##
-# Import Modules
-#
-import Common.LongFilePathOs as os
-import re
-import EdkLogger
-from optparse import OptionParser
-from Common.BuildToolError import *
-from XmlRoutines import *
-from CommonDataClass.CommonClass import *
-from Common.LongFilePathSupport import OpenLongFilePath as open
-
-## Set all fields of CommonClass object.
-#
-# Set all attributes of CommonClass object from XML Dom object of XmlCommon.
-#
-# @param Common The destine CommonClass object.
-# @param XmlCommon The source XML Dom object.
-#
-def SetCommon(Common, XmlCommon):
- XmlTag = "Usage"
- Common.Usage = XmlAttribute(XmlCommon, XmlTag).split()
-
- XmlTag = "FeatureFlag"
- Common.FeatureFlag = XmlAttribute(XmlCommon, XmlTag)
-
- XmlTag = "SupArchList"
- Common.SupArchList = XmlAttribute(XmlCommon, XmlTag).split()
-
- XmlTag = XmlNodeName(XmlCommon) + "/" + "HelpText"
- Common.HelpText = XmlElement(XmlCommon, XmlTag)
-
-
-## Set some fields of CommonHeaderClass object.
-#
-# Set Name, Guid, FileName and FullPath fields of CommonHeaderClass object from
-# XML Dom object of XmlCommonHeader, NameTag and FileName.
-#
-# @param CommonHeader The destine CommonClass object.
-# @param XmlCommonHeader The source XML Dom object.
-# @param NameTag The name tag in XML Dom object.
-# @param FileName The file name of the XML file.
-#
-def SetIdentification(CommonHeader, XmlCommonHeader, NameTag, FileName):
- XmlParentTag = XmlNodeName(XmlCommonHeader)
-
- XmlTag = XmlParentTag + "/" + NameTag
- CommonHeader.Name = XmlElement(XmlCommonHeader, XmlTag)
-
- XmlTag = XmlParentTag + "/" + "GuidValue"
- CommonHeader.Guid = XmlElement(XmlCommonHeader, XmlTag)
-
- XmlTag = XmlParentTag + "/" + "Version"
- CommonHeader.Version = XmlElement(XmlCommonHeader, XmlTag)
-
- CommonHeader.FileName = os.path.basename(FileName)
- CommonHeader.FullPath = os.path.abspath(FileName)
-
-
-## Regular expression to match specification and value.
-mReSpecification = re.compile(r"(?P<Specification>\w+)\s+(?P<Value>\w*)")
-
-## Add specification to specification dictionary.
-#
-# Abstract specification name, value pair from Specification String and add them
-# to specification dictionary.
-#
-# @param SpecificationDict The destine Specification dictionary.
-# @param SpecificationString The source Specification String from which the
-# specification name and value pair is abstracted.
-#
-def AddToSpecificationDict(SpecificationDict, SpecificationString):
- """Abstract specification name, value pair from Specification String"""
- for SpecificationMatch in mReSpecification.finditer(SpecificationString):
- Specification = SpecificationMatch.group("Specification")
- Value = SpecificationMatch.group("Value")
- SpecificationDict[Specification] = Value
-
-## Set all fields of CommonHeaderClass object.
-#
-# Set all attributes of CommonHeaderClass object from XML Dom object of
-# XmlCommonHeader, NameTag and FileName.
-#
-# @param CommonHeader The destine CommonClass object.
-# @param XmlCommonHeader The source XML Dom object.
-# @param NameTag The name tag in XML Dom object.
-# @param FileName The file name of the XML file.
-#
-def SetCommonHeader(CommonHeader, XmlCommonHeader):
- """Set all attributes of CommonHeaderClass object from XmlCommonHeader"""
- XmlParent = XmlNodeName(XmlCommonHeader)
-
- XmlTag = XmlParent + "/" + "Abstract"
- CommonHeader.Abstract = XmlElement(XmlCommonHeader, XmlTag)
-
- XmlTag = XmlParent + "/" + "Description"
- CommonHeader.Description = XmlElement(XmlCommonHeader, XmlTag)
-
- XmlTag = XmlParent + "/" + "Copyright"
- CommonHeader.Copyright = XmlElement(XmlCommonHeader, XmlTag)
-
- XmlTag = XmlParent + "/" + "License"
- CommonHeader.License = XmlElement(XmlCommonHeader, XmlTag)
-
- XmlTag = XmlParent + "/" + "Specification"
- Specification = XmlElement(XmlCommonHeader, XmlTag)
-
- AddToSpecificationDict(CommonHeader.Specification, Specification)
-
- XmlTag = XmlParent + "/" + "ModuleType"
- CommonHeader.ModuleType = XmlElement(XmlCommonHeader, XmlTag)
-
-
-## Load a new Cloned Record class object.
-#
-# Read an input XML ClonedRecord DOM object and return an object of Cloned Record
-# contained in the DOM object.
-#
-# @param XmlCloned A child XML DOM object in a Common XML DOM.
-#
-# @retvel ClonedRecord A new Cloned Record object created by XmlCloned.
-#
-def LoadClonedRecord(XmlCloned):
- ClonedRecord = ClonedRecordClass()
-
- XmlTag = "Id"
- ClonedRecord.Id = int(XmlAttribute(XmlCloned, XmlTag))
-
- XmlTag = "FarGuid"
- ClonedRecord.FarGuid = XmlAttribute(XmlCloned, XmlTag)
-
- XmlTag = "Cloned/PackageGuid"
- ClonedRecord.PackageGuid = XmlElement(XmlCloned, XmlTag)
-
- XmlTag = "Cloned/PackageVersion"
- ClonedRecord.PackageVersion = XmlElement(XmlCloned, XmlTag)
-
- XmlTag = "Cloned/ModuleGuid"
- ClonedRecord.ModuleGuid = XmlElement(XmlCloned, XmlTag)
-
- XmlTag = "Cloned/ModuleVersion"
- ClonedRecord.ModuleVersion = XmlElement(XmlCloned, XmlTag)
-
- return ClonedRecord
-
-
-## Load a new Guid/Protocol/Ppi common class object.
-#
-# Read an input XML Guid/Protocol/Ppi DOM object and return an object of
-# Guid/Protocol/Ppi contained in the DOM object.
-#
-# @param XmlGuidProtocolPpiCommon A child XML DOM object in a Common XML DOM.
-#
-# @retvel GuidProtocolPpiCommon A new GuidProtocolPpiCommon class object
-# created by XmlGuidProtocolPpiCommon.
-#
-def LoadGuidProtocolPpiCommon(XmlGuidProtocolPpiCommon):
- GuidProtocolPpiCommon = GuidProtocolPpiCommonClass()
-
- XmlTag = "Name"
- GuidProtocolPpiCommon.Name = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
-
- XmlParent = XmlNodeName(XmlGuidProtocolPpiCommon)
- if XmlParent == "Entry":
- XmlTag = "%s/C_Name" % XmlParent
- elif XmlParent == "GuidCNames":
- XmlTag = "%s/GuidCName" % XmlParent
- else:
- XmlTag = "%s/%sCName" % (XmlParent, XmlParent)
-
- GuidProtocolPpiCommon.CName = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)
-
- XmlTag = XmlParent + "/" + "GuidValue"
- GuidProtocolPpiCommon.Guid = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)
-
- if XmlParent.endswith("Notify"):
- GuidProtocolPpiCommon.Notify = True
-
- XmlTag = "GuidTypeList"
- GuidTypes = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
- GuidProtocolPpiCommon.GuidTypeList = GuidTypes.split()
-
- XmlTag = "SupModuleList"
- SupModules = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
- GuidProtocolPpiCommon.SupModuleList = SupModules.split()
-
- SetCommon(GuidProtocolPpiCommon, XmlGuidProtocolPpiCommon)
-
- return GuidProtocolPpiCommon
-
-
-## Load a new Pcd class object.
-#
-# Read an input XML Pcd DOM object and return an object of Pcd
-# contained in the DOM object.
-#
-# @param XmlPcd A child XML DOM object in a Common XML DOM.
-#
-# @retvel Pcd A new Pcd object created by XmlPcd.
-#
-def LoadPcd(XmlPcd):
- """Return a new PcdClass object equivalent to XmlPcd"""
- Pcd = PcdClass()
-
- XmlTag = "PcdEntry/C_Name"
- Pcd.CName = XmlElement(XmlPcd, XmlTag)
-
- XmlTag = "PcdEntry/Token"
- Pcd.Token = XmlElement(XmlPcd, XmlTag)
-
- XmlTag = "PcdEntry/TokenSpaceGuidCName"
- Pcd.TokenSpaceGuidCName = XmlElement(XmlPcd, XmlTag)
-
- XmlTag = "PcdEntry/DatumType"
- Pcd.DatumType = XmlElement(XmlPcd, XmlTag)
-
- XmlTag = "PcdEntry/MaxDatumSize"
- Pcd.MaxDatumSize = XmlElement(XmlPcd, XmlTag)
-
- XmlTag = "PcdEntry/DefaultValue"
- Pcd.DefaultValue = XmlElement(XmlPcd, XmlTag)
-
- XmlTag = "PcdItemType"
- Pcd.ItemType = XmlAttribute(XmlPcd, XmlTag)
-
- XmlTag = "PcdEntry/ValidUsage"
- Pcd.ValidUsage = XmlElement(XmlPcd, XmlTag).split()
-
- XmlTag = "SupModuleList"
- Pcd.SupModuleList = XmlAttribute(XmlPcd, XmlTag).split()
-
- SetCommon(Pcd, XmlPcd)
-
- return Pcd
-
-
-## Load a new LibraryClass class object.
-#
-# Read an input XML LibraryClass DOM object and return an object of LibraryClass
-# contained in the DOM object.
-#
-# @param XmlLibraryClass A child XML DOM object in a Common XML DOM.
-#
-# @retvel LibraryClass A new LibraryClass object created by XmlLibraryClass.
-#
-def LoadLibraryClass(XmlLibraryClass):
- LibraryClass = LibraryClassClass()
-
- XmlTag = "LibraryClass/Keyword"
- LibraryClass.LibraryClass = XmlElement(XmlLibraryClass, XmlTag)
- if LibraryClass.LibraryClass == "":
- XmlTag = "Name"
- LibraryClass.LibraryClass = XmlAttribute(XmlLibraryClass, XmlTag)
-
- XmlTag = "LibraryClass/IncludeHeader"
- LibraryClass.IncludeHeader = XmlElement(XmlLibraryClass, XmlTag)
-
- XmlTag = "RecommendedInstanceVersion"
- RecommendedInstanceVersion = XmlAttribute(XmlLibraryClass, XmlTag)
- LibraryClass.RecommendedInstanceVersion = RecommendedInstanceVersion
-
- XmlTag = "RecommendedInstanceGuid"
- RecommendedInstanceGuid = XmlAttribute(XmlLibraryClass, XmlTag)
- LibraryClass.RecommendedInstanceGuid = RecommendedInstanceGuid
-
- XmlTag = "SupModuleList"
- SupModules = XmlAttribute(XmlLibraryClass, XmlTag)
- LibraryClass.SupModuleList = SupModules.split()
-
- SetCommon(LibraryClass, XmlLibraryClass)
-
- return LibraryClass
-
-
-## Load a new Build Option class object.
-#
-# Read an input XML BuildOption DOM object and return an object of Build Option
-# contained in the DOM object.
-#
-# @param XmlBuildOption A child XML DOM object in a Common XML DOM.
-#
-# @retvel BuildOption A new Build Option object created by XmlBuildOption.
-#
-def LoadBuildOption(XmlBuildOption):
- """Return a new BuildOptionClass object equivalent to XmlBuildOption"""
- BuildOption = BuildOptionClass()
-
- BuildOption.Option = XmlElementData(XmlBuildOption)
-
- XmlTag = "BuildTargets"
- BuildOption.BuildTargetList = XmlAttribute(XmlBuildOption, XmlTag).split()
-
- XmlTag = "ToolChainFamily"
- BuildOption.ToolChainFamily = XmlAttribute(XmlBuildOption, XmlTag)
-
- XmlTag = "TagName"
- BuildOption.TagName = XmlAttribute(XmlBuildOption, XmlTag)
-
- XmlTag = "ToolCode"
- BuildOption.ToolCode = XmlAttribute(XmlBuildOption, XmlTag)
-
- XmlTag = "SupArchList"
- BuildOption.SupArchList = XmlAttribute(XmlBuildOption, XmlTag).split()
-
- return BuildOption
-
-
-## Load a new User Extensions class object.
-#
-# Read an input XML UserExtensions DOM object and return an object of User
-# Extensions contained in the DOM object.
-#
-# @param XmlUserExtensions A child XML DOM object in a Common XML DOM.
-#
-# @retvel UserExtensions A new User Extensions object created by
-# XmlUserExtensions.
-#
-def LoadUserExtensions(XmlUserExtensions):
- UserExtensions = UserExtensionsClass()
-
- XmlTag = "UserID"
- UserExtensions.UserID = XmlAttribute(XmlUserExtensions, XmlTag)
-
- XmlTag = "Identifier"
- UserExtensions.Identifier = XmlAttribute(XmlUserExtensions, XmlTag)
-
- UserExtensions.Content = XmlElementData(XmlUserExtensions)
-
- return UserExtensions
-
-
-## Store content to a text file object.
-#
-# Write some text file content to a text file object. The contents may echo
-# in screen in a verbose way.
-#
-# @param TextFile The text file object.
-# @param Content The string object to be written to a text file.
-#
-def StoreTextFile(TextFile, Content):
- EdkLogger.verbose(Content)
- TextFile.write(Content)
-
-
-## Add item to a section.
-#
-# Add an Item with specific CPU architecture to section dictionary.
-# The possible duplication is ensured to be removed.
-#
-# @param Section Section dictionary indexed by CPU architecture.
-# @param Arch CPU architecture: Ia32, X64, Ipf, ARM, AARCH64, Ebc or Common.
-# @param Item The Item to be added to section dictionary.
-#
-def AddToSection(Section, Arch, Item):
- SectionArch = Section.get(Arch, [])
- if Item not in SectionArch:
- SectionArch.append(Item)
- Section[Arch] = SectionArch
-
-
-## Get section contents.
-#
-# Return the content of section named SectionName.
-# the contents is based on Methods and ObjectLists.
-#
-# @param SectionName The name of the section.
-# @param Method A function returning a string item of an object.
-# @param ObjectList The list of object.
-#
-# @retval Section The string content of a section.
-#
-def GetSection(SectionName, Method, ObjectList):
- SupportedArches = ["common", "Ia32", "X64", "Ipf", "Ebc", "ARM", "AARCH64"]
- SectionDict = {}
- for Object in ObjectList:
- Item = Method(Object)
- if Item == "":
- continue
- Item = " %s" % Item
- Arches = Object.SupArchList
- if len(Arches) == 0:
- AddToSection(SectionDict, "common", Item)
- else:
- for Arch in SupportedArches:
- if Arch.upper() in Arches:
- AddToSection(SectionDict, Arch, Item)
-
- Section = ""
- for Arch in SupportedArches:
- SectionArch = "\n".join(SectionDict.get(Arch, []))
- if SectionArch != "":
- Section += "[%s.%s]\n%s\n" % (SectionName, Arch, SectionArch)
- Section += "\n"
- if Section != "":
- Section += "\n"
- return Section
-
-
-## Store file header to a text file.
-#
-# Write standard file header to a text file. The content includes copyright,
-# abstract, description and license extracted from CommonHeader class object.
-#
-# @param TextFile The text file object.
-# @param CommonHeader The source CommonHeader class object.
-#
-def StoreHeader(TextFile, CommonHeader):
- CopyRight = CommonHeader.Copyright
- Abstract = CommonHeader.Abstract
- Description = CommonHeader.Description
- License = CommonHeader.License
-
- Header = "#/** @file\n#\n"
- Header += "# " + Abstract + "\n#\n"
- Header += "# " + Description.strip().replace("\n", "\n# ") + "\n"
- Header += "# " + CopyRight + "\n#\n"
- Header += "# " + License.replace("\n", "\n# ").replace(" ", " ")
- Header += "\n#\n#**/\n\n"
-
- StoreTextFile(TextFile, Header)
-
-## Store file header to a text file.
-#
-# Write Defines section to a text file. DefinesTupleList determines the content.
-#
-# @param TextFile The text file object.
-# @param DefinesTupleList The list of (Tag, Value) to be added as one item.
-#
-def StoreDefinesSection(TextFile, DefinesTupleList):
- Section = "[Defines]\n"
- for DefineItem in DefinesTupleList:
- Section += " %-30s = %s\n" % DefineItem
-
- Section += "\n\n"
- StoreTextFile(TextFile, Section)
-
-
-## Return one User Extension section.
-#
-# Read the input UserExtentsions class object and return one section.
-#
-# @param UserExtensions An input UserExtensions class object.
-#
-# @retval UserExtensionSection A section representing UserExtensions object.
-#
-def GetUserExtensions(UserExtensions):
- UserId = UserExtensions.UserID
- Identifier = UserExtensions.Identifier
- Content = UserExtensions.Content
-
- return "[UserExtensions.%s.%s]\n %s\n\n" % (UserId, Identifier, Content)
-
-## Regular expression to match an equation.
-mReEquation = re.compile(r"\s*(\S+)\s*=\s*(\S*)\s*")
-
-## Return a value tuple matching information in a text fle.
-#
-# Parse the text file and return a value tuple corresponding to an input tag
-# tuple. In case of any error, an tuple of empty strings is returned.
-#
-# @param FileName The file name of the text file.
-# @param TagTuple A tuple of tags as the key to the value.
-#
-# @param ValueTupe The returned tuple corresponding to the tag tuple.
-#
-def GetTextFileInfo(FileName, TagTuple):
- ValueTuple = [""] * len(TagTuple)
- try:
- for Line in open(FileName):
- Line = Line.split("#", 1)[0]
- MatchEquation = mReEquation.match(Line)
- if MatchEquation:
- Tag = MatchEquation.group(1).upper()
- Value = MatchEquation.group(2)
- for Index in range(len(TagTuple)):
- if TagTuple[Index] == Tag:
- ValueTuple[Index] = Value
- except:
- EdkLogger.info("IO Error in reading file %s" % FileName)
-
- return ValueTuple
-
-
-## Return a value tuple matching information in an XML fle.
-#
-# Parse the XML file and return a value tuple corresponding to an input tag
-# tuple. In case of any error, an tuple of empty strings is returned.
-#
-# @param FileName The file name of the XML file.
-# @param TagTuple A tuple of tags as the key to the value.
-#
-# @param ValueTupe The returned tuple corresponding to the tag tuple.
-#
-def GetXmlFileInfo(FileName, TagTuple):
- XmlDom = XmlParseFile(FileName)
- return tuple([XmlElement(XmlDom, XmlTag) for XmlTag in TagTuple])
-
-
-## Parse migration command line options
-#
-# Use standard Python module optparse to parse command line option of this tool.
-#
-# @param Source The source file type.
-# @param Destinate The destinate file type.
-#
-# @retval Options A optparse object containing the parsed options.
-# @retval InputFile Path of an source file to be migrated.
-#
-def MigrationOptionParser(Source, Destinate, ToolName, VersionNumber=1.0):
- # use clearer usage to override default usage message
- UsageString = "%s [-a] [-v|-q] [-o <output_file>] <input_file>" % ToolName
- Version = "%s Version %.2f" % (ToolName, VersionNumber)
- Copyright = "Copyright (c) 2007, Intel Corporation. All rights reserved."
-
- Parser = OptionParser(description=Copyright, version=Version, usage=UsageString)
- Parser.add_option("-o", "--output", dest="OutputFile", help="The name of the %s file to be created." % Destinate)
- Parser.add_option("-a", "--auto", dest="AutoWrite", action="store_true", default=False, help="Automatically create the %s file using the name of the %s file and replacing file extension" % (Source, Destinate))
- Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
- Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed.")
-
- Options, Args = Parser.parse_args()
-
- # Set logging level
- if Options.verbose:
- EdkLogger.setLevel(EdkLogger.VERBOSE)
- elif Options.quiet:
- EdkLogger.setLevel(EdkLogger.QUIET)
- else:
- EdkLogger.setLevel(EdkLogger.INFO)
-
- # error check
- if len(Args) == 0:
- raise MigrationError(PARAMETER_MISSING, name="Input file", usage=Parser.get_usage())
- if len(Args) > 1:
- raise MigrationError(PARAMETER_INVALID, name="Too many input files", usage=Parser.get_usage())
-
- InputFile = Args[0]
- if not os.path.exists(InputFile):
- raise MigrationError(FILE_NOT_FOUND, name=InputFile)
-
- if Options.OutputFile:
- if Options.AutoWrite:
- raise MigrationError(OPTION_CONFLICT, arg1="-o", arg2="-a", usage=Parser.get_usage())
- else:
- if Options.AutoWrite:
- Options.OutputFile = os.path.splitext(InputFile)[0] + "." + Destinate.lower()
- else:
- raise MigrationError(OPTION_MISSING, name="-o", usage=Parser.get_usage())
-
- return Options, InputFile
-
-# This acts like the main() function for the script, unless it is 'import'ed
-# into another script.
-if __name__ == '__main__':
- pass
diff --git a/BaseTools/Source/Python/Common/Misc.py b/BaseTools/Source/Python/Common/Misc.py
deleted file mode 100644
index dbb711e96c..0000000000
--- a/BaseTools/Source/Python/Common/Misc.py
+++ /dev/null
@@ -1,2098 +0,0 @@
-## @file
-# Common routines used by all tools
-#
-# Copyright (c) 2007 - 2017, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-##
-# Import Modules
-#
-import Common.LongFilePathOs as os
-import sys
-import string
-import thread
-import threading
-import time
-import re
-import cPickle
-import array
-import shutil
-from struct import pack
-from UserDict import IterableUserDict
-from UserList import UserList
-
-from Common import EdkLogger as EdkLogger
-from Common import GlobalData as GlobalData
-from DataType import *
-from BuildToolError import *
-from CommonDataClass.DataClass import *
-from Parsing import GetSplitValueList
-from Common.LongFilePathSupport import OpenLongFilePath as open
-from Common.MultipleWorkspace import MultipleWorkspace as mws
-
-## Regular expression used to find out place holders in string template
-gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE | re.UNICODE)
-
-## Dictionary used to store file time stamp for quick re-access
-gFileTimeStampCache = {} # {file path : file time stamp}
-
-## Dictionary used to store dependencies of files
-gDependencyDatabase = {} # arch : {file path : [dependent files list]}
-
-def GetVariableOffset(mapfilepath, efifilepath, varnames):
- """ Parse map file to get variable offset in current EFI file
- @param mapfilepath Map file absolution path
- @param efifilepath: EFI binary file full path
- @param varnames iteratable container whose elements are variable names to be searched
-
- @return List whos elements are tuple with variable name and raw offset
- """
- lines = []
- try:
- f = open(mapfilepath, 'r')
- lines = f.readlines()
- f.close()
- except:
- return None
-
- if len(lines) == 0: return None
- firstline = lines[0].strip()
- if (firstline.startswith("Archive member included ") and
- firstline.endswith(" file (symbol)")):
- return _parseForGCC(lines, efifilepath, varnames)
- return _parseGeneral(lines, efifilepath, varnames)
-
-def _parseForGCC(lines, efifilepath, varnames):
- """ Parse map file generated by GCC linker """
- status = 0
- sections = []
- varoffset = []
- for index, line in enumerate(lines):
- line = line.strip()
- # status machine transection
- if status == 0 and line == "Memory Configuration":
- status = 1
- continue
- elif status == 1 and line == 'Linker script and memory map':
- status = 2
- continue
- elif status ==2 and line == 'START GROUP':
- status = 3
- continue
-
- # status handler
- if status == 3:
- m = re.match('^([\w_\.]+) +([\da-fA-Fx]+) +([\da-fA-Fx]+)$', line)
- if m != None:
- sections.append(m.groups(0))
- for varname in varnames:
- Str = ''
- m = re.match("^.data.(%s)" % varname, line)
- if m != None:
- m = re.match(".data.(%s)$" % varname, line)
- if m != None:
- Str = lines[index + 1]
- else:
- Str = line[len(".data.%s" % varname):]
- if Str:
- m = re.match('^([\da-fA-Fx]+) +([\da-fA-Fx]+)', Str.strip())
- if m != None:
- varoffset.append((varname, int(m.groups(0)[0], 16) , int(sections[-1][1], 16), sections[-1][0]))
-
- if not varoffset:
- return []
- # get section information from efi file
- efisecs = PeImageClass(efifilepath).SectionHeaderList
- if efisecs == None or len(efisecs) == 0:
- return []
- #redirection
- redirection = 0
- for efisec in efisecs:
- for section in sections:
- if section[0].strip() == efisec[0].strip() and section[0].strip() == '.text':
- redirection = int(section[1], 16) - efisec[1]
-
- ret = []
- for var in varoffset:
- for efisec in efisecs:
- if var[1] >= efisec[1] and var[1] < efisec[1]+efisec[3]:
- ret.append((var[0], hex(efisec[2] + var[1] - efisec[1] - redirection)))
- return ret
-
-def _parseGeneral(lines, efifilepath, varnames):
- status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table
- secs = [] # key = section name
- varoffset = []
- secRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\da-fA-F]+)[Hh]? +([.\w\$]+) +(\w+)', re.UNICODE)
- symRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\.:\\\\\w\?@\$]+) +([\da-fA-F]+)', re.UNICODE)
-
- for line in lines:
- line = line.strip()
- if re.match("^Start[' ']+Length[' ']+Name[' ']+Class", line):
- status = 1
- continue
- if re.match("^Address[' ']+Publics by Value[' ']+Rva\+Base", line):
- status = 2
- continue
- if re.match("^entry point at", line):
- status = 3
- continue
- if status == 1 and len(line) != 0:
- m = secRe.match(line)
- assert m != None, "Fail to parse the section in map file , line is %s" % line
- sec_no, sec_start, sec_length, sec_name, sec_class = m.groups(0)
- secs.append([int(sec_no, 16), int(sec_start, 16), int(sec_length, 16), sec_name, sec_class])
- if status == 2 and len(line) != 0:
- for varname in varnames:
- m = symRe.match(line)
- assert m != None, "Fail to parse the symbol in map file, line is %s" % line
- sec_no, sym_offset, sym_name, vir_addr = m.groups(0)
- sec_no = int(sec_no, 16)
- sym_offset = int(sym_offset, 16)
- vir_addr = int(vir_addr, 16)
- m2 = re.match('^[_]*(%s)' % varname, sym_name)
- if m2 != None:
- # fond a binary pcd entry in map file
- for sec in secs:
- if sec[0] == sec_no and (sym_offset >= sec[1] and sym_offset < sec[1] + sec[2]):
- varoffset.append([varname, sec[3], sym_offset, vir_addr, sec_no])
-
- if not varoffset: return []
-
- # get section information from efi file
- efisecs = PeImageClass(efifilepath).SectionHeaderList
- if efisecs == None or len(efisecs) == 0:
- return []
-
- ret = []
- for var in varoffset:
- index = 0
- for efisec in efisecs:
- index = index + 1
- if var[1].strip() == efisec[0].strip():
- ret.append((var[0], hex(efisec[2] + var[2])))
- elif var[4] == index:
- ret.append((var[0], hex(efisec[2] + var[2])))
-
- return ret
-
-## Routine to process duplicated INF
-#
-# This function is called by following two cases:
-# Case 1 in DSC:
-# [components.arch]
-# Pkg/module/module.inf
-# Pkg/module/module.inf {
-# <Defines>
-# FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836
-# }
-# Case 2 in FDF:
-# INF Pkg/module/module.inf
-# INF FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836 Pkg/module/module.inf
-#
-# This function copies Pkg/module/module.inf to
-# Conf/.cache/0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf
-#
-# @param Path Original PathClass object
-# @param BaseName New file base name
-#
-# @retval return the new PathClass object
-#
-def ProcessDuplicatedInf(Path, BaseName, Workspace):
- Filename = os.path.split(Path.File)[1]
- if '.' in Filename:
- Filename = BaseName + Path.BaseName + Filename[Filename.rfind('.'):]
- else:
- Filename = BaseName + Path.BaseName
-
- #
- # If -N is specified on command line, cache is disabled
- # The directory has to be created
- #
- DbDir = os.path.split(GlobalData.gDatabasePath)[0]
- if not os.path.exists(DbDir):
- os.makedirs(DbDir)
- #
- # A temporary INF is copied to database path which must have write permission
- # The temporary will be removed at the end of build
- # In case of name conflict, the file name is
- # FILE_GUIDBaseName (0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf)
- #
- TempFullPath = os.path.join(DbDir,
- Filename)
- RtPath = PathClass(Path.File, Workspace)
- #
- # Modify the full path to temporary path, keep other unchanged
- #
- # To build same module more than once, the module path with FILE_GUID overridden has
- # the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the real path
- # in DSC which is used as relative path by C files and other files in INF.
- # A trick was used: all module paths are PathClass instances, after the initialization
- # of PathClass, the PathClass.Path is overridden by the temporary INF path.
- #
- # The reason for creating a temporary INF is:
- # Platform.Modules which is the base to create ModuleAutoGen objects is a dictionary,
- # the key is the full path of INF, the value is an object to save overridden library instances, PCDs.
- # A different key for the same module is needed to create different output directory,
- # retrieve overridden PCDs, library instances.
- #
- # The BaseName is the FILE_GUID which is also the output directory name.
- #
- #
- RtPath.Path = TempFullPath
- RtPath.BaseName = BaseName
- #
- # If file exists, compare contents
- #
- if os.path.exists(TempFullPath):
- with open(str(Path), 'rb') as f1: Src = f1.read()
- with open(TempFullPath, 'rb') as f2: Dst = f2.read()
- if Src == Dst:
- return RtPath
- GlobalData.gTempInfs.append(TempFullPath)
- shutil.copy2(str(Path), TempFullPath)
- return RtPath
-
-## Remove temporary created INFs whose paths were saved in gTempInfs
-#
-def ClearDuplicatedInf():
- for File in GlobalData.gTempInfs:
- if os.path.exists(File):
- os.remove(File)
-
-## callback routine for processing variable option
-#
-# This function can be used to process variable number of option values. The
-# typical usage of it is specify architecure list on command line.
-# (e.g. <tool> -a IA32 X64 IPF)
-#
-# @param Option Standard callback function parameter
-# @param OptionString Standard callback function parameter
-# @param Value Standard callback function parameter
-# @param Parser Standard callback function parameter
-#
-# @retval
-#
-def ProcessVariableArgument(Option, OptionString, Value, Parser):
- assert Value is None
- Value = []
- RawArgs = Parser.rargs
- while RawArgs:
- Arg = RawArgs[0]
- if (Arg[:2] == "--" and len(Arg) > 2) or \
- (Arg[:1] == "-" and len(Arg) > 1 and Arg[1] != "-"):
- break
- Value.append(Arg)
- del RawArgs[0]
- setattr(Parser.values, Option.dest, Value)
-
-## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C structure style
-#
-# @param Guid The GUID string
-#
-# @retval string The GUID string in C structure style
-#
-def GuidStringToGuidStructureString(Guid):
- GuidList = Guid.split('-')
- Result = '{'
- for Index in range(0, 3, 1):
- Result = Result + '0x' + GuidList[Index] + ', '
- Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
- for Index in range(0, 12, 2):
- Result = Result + ', 0x' + GuidList[4][Index:Index + 2]
- Result += '}}'
- return Result
-
-## Convert GUID structure in byte array to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-#
-# @param GuidValue The GUID value in byte array
-#
-# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
-#
-def GuidStructureByteArrayToGuidString(GuidValue):
- guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
- guidValueList = guidValueString.split(",")
- if len(guidValueList) != 16:
- return ''
- #EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
- try:
- return "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
- int(guidValueList[3], 16),
- int(guidValueList[2], 16),
- int(guidValueList[1], 16),
- int(guidValueList[0], 16),
- int(guidValueList[5], 16),
- int(guidValueList[4], 16),
- int(guidValueList[7], 16),
- int(guidValueList[6], 16),
- int(guidValueList[8], 16),
- int(guidValueList[9], 16),
- int(guidValueList[10], 16),
- int(guidValueList[11], 16),
- int(guidValueList[12], 16),
- int(guidValueList[13], 16),
- int(guidValueList[14], 16),
- int(guidValueList[15], 16)
- )
- except:
- return ''
-
-## Convert GUID string in C structure style to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-#
-# @param GuidValue The GUID value in C structure format
-#
-# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
-#
-def GuidStructureStringToGuidString(GuidValue):
- guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
- guidValueList = guidValueString.split(",")
- if len(guidValueList) != 11:
- return ''
- #EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
- try:
- return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
- int(guidValueList[0], 16),
- int(guidValueList[1], 16),
- int(guidValueList[2], 16),
- int(guidValueList[3], 16),
- int(guidValueList[4], 16),
- int(guidValueList[5], 16),
- int(guidValueList[6], 16),
- int(guidValueList[7], 16),
- int(guidValueList[8], 16),
- int(guidValueList[9], 16),
- int(guidValueList[10], 16)
- )
- except:
- return ''
-
-## Convert GUID string in C structure style to xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx
-#
-# @param GuidValue The GUID value in C structure format
-#
-# @retval string The GUID value in xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx format
-#
-def GuidStructureStringToGuidValueName(GuidValue):
- guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "")
- guidValueList = guidValueString.split(",")
- if len(guidValueList) != 11:
- EdkLogger.error(None, FORMAT_INVALID, "Invalid GUID value string [%s]" % GuidValue)
- return "%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x" % (
- int(guidValueList[0], 16),
- int(guidValueList[1], 16),
- int(guidValueList[2], 16),
- int(guidValueList[3], 16),
- int(guidValueList[4], 16),
- int(guidValueList[5], 16),
- int(guidValueList[6], 16),
- int(guidValueList[7], 16),
- int(guidValueList[8], 16),
- int(guidValueList[9], 16),
- int(guidValueList[10], 16)
- )
-
-## Create directories
-#
-# @param Directory The directory name
-#
-def CreateDirectory(Directory):
- if Directory == None or Directory.strip() == "":
- return True
- try:
- if not os.access(Directory, os.F_OK):
- os.makedirs(Directory)
- except:
- return False
- return True
-
-## Remove directories, including files and sub-directories in it
-#
-# @param Directory The directory name
-#
-def RemoveDirectory(Directory, Recursively=False):
- if Directory == None or Directory.strip() == "" or not os.path.exists(Directory):
- return
- if Recursively:
- CurrentDirectory = os.getcwd()
- os.chdir(Directory)
- for File in os.listdir("."):
- if os.path.isdir(File):
- RemoveDirectory(File, Recursively)
- else:
- os.remove(File)
- os.chdir(CurrentDirectory)
- os.rmdir(Directory)
-
-## Check if given file is changed or not
-#
-# This method is used to check if a file is changed or not between two build
-# actions. It makes use a cache to store files timestamp.
-#
-# @param File The path of file
-#
-# @retval True If the given file is changed, doesn't exist, or can't be
-# found in timestamp cache
-# @retval False If the given file is changed
-#
-def IsChanged(File):
- if not os.path.exists(File):
- return True
-
- FileState = os.stat(File)
- TimeStamp = FileState[-2]
-
- if File in gFileTimeStampCache and TimeStamp == gFileTimeStampCache[File]:
- FileChanged = False
- else:
- FileChanged = True
- gFileTimeStampCache[File] = TimeStamp
-
- return FileChanged
-
-## Store content in file
-#
-# This method is used to save file only when its content is changed. This is
-# quite useful for "make" system to decide what will be re-built and what won't.
-#
-# @param File The path of file
-# @param Content The new content of the file
-# @param IsBinaryFile The flag indicating if the file is binary file or not
-#
-# @retval True If the file content is changed and the file is renewed
-# @retval False If the file content is the same
-#
-def SaveFileOnChange(File, Content, IsBinaryFile=True):
- if not IsBinaryFile:
- Content = Content.replace("\n", os.linesep)
-
- if os.path.exists(File):
- try:
- if Content == open(File, "rb").read():
- return False
- except:
- EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
-
- DirName = os.path.dirname(File)
- if not CreateDirectory(DirName):
- EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
- else:
- if DirName == '':
- DirName = os.getcwd()
- if not os.access(DirName, os.W_OK):
- EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
-
- try:
- if GlobalData.gIsWindows:
- try:
- from PyUtility import SaveFileToDisk
- if not SaveFileToDisk(File, Content):
- EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData=File)
- except:
- Fd = open(File, "wb")
- Fd.write(Content)
- Fd.close()
- else:
- Fd = open(File, "wb")
- Fd.write(Content)
- Fd.close()
- except IOError, X:
- EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
-
- return True
-
-## Make a Python object persistent on file system
-#
-# @param Data The object to be stored in file
-# @param File The path of file to store the object
-#
-def DataDump(Data, File):
- Fd = None
- try:
- Fd = open(File, 'wb')
- cPickle.dump(Data, Fd, cPickle.HIGHEST_PROTOCOL)
- except:
- EdkLogger.error("", FILE_OPEN_FAILURE, ExtraData=File, RaiseError=False)
- finally:
- if Fd != None:
- Fd.close()
-
-## Restore a Python object from a file
-#
-# @param File The path of file stored the object
-#
-# @retval object A python object
-# @retval None If failure in file operation
-#
-def DataRestore(File):
- Data = None
- Fd = None
- try:
- Fd = open(File, 'rb')
- Data = cPickle.load(Fd)
- except Exception, e:
- EdkLogger.verbose("Failed to load [%s]\n\t%s" % (File, str(e)))
- Data = None
- finally:
- if Fd != None:
- Fd.close()
- return Data
-
-## Retrieve and cache the real path name in file system
-#
-# @param Root The root directory of path relative to
-#
-# @retval str The path string if the path exists
-# @retval None If path doesn't exist
-#
-class DirCache:
- _CACHE_ = set()
- _UPPER_CACHE_ = {}
-
- def __init__(self, Root):
- self._Root = Root
- for F in os.listdir(Root):
- self._CACHE_.add(F)
- self._UPPER_CACHE_[F.upper()] = F
-
- # =[] operator
- def __getitem__(self, Path):
- Path = Path[len(os.path.commonprefix([Path, self._Root])):]
- if not Path:
- return self._Root
- if Path and Path[0] == os.path.sep:
- Path = Path[1:]
- if Path in self._CACHE_:
- return os.path.join(self._Root, Path)
- UpperPath = Path.upper()
- if UpperPath in self._UPPER_CACHE_:
- return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
-
- IndexList = []
- LastSepIndex = -1
- SepIndex = Path.find(os.path.sep)
- while SepIndex > -1:
- Parent = UpperPath[:SepIndex]
- if Parent not in self._UPPER_CACHE_:
- break
- LastSepIndex = SepIndex
- SepIndex = Path.find(os.path.sep, LastSepIndex + 1)
-
- if LastSepIndex == -1:
- return None
-
- Cwd = os.getcwd()
- os.chdir(self._Root)
- SepIndex = LastSepIndex
- while SepIndex > -1:
- Parent = Path[:SepIndex]
- ParentKey = UpperPath[:SepIndex]
- if ParentKey not in self._UPPER_CACHE_:
- os.chdir(Cwd)
- return None
-
- if Parent in self._CACHE_:
- ParentDir = Parent
- else:
- ParentDir = self._UPPER_CACHE_[ParentKey]
- for F in os.listdir(ParentDir):
- Dir = os.path.join(ParentDir, F)
- self._CACHE_.add(Dir)
- self._UPPER_CACHE_[Dir.upper()] = Dir
-
- SepIndex = Path.find(os.path.sep, SepIndex + 1)
-
- os.chdir(Cwd)
- if Path in self._CACHE_:
- return os.path.join(self._Root, Path)
- elif UpperPath in self._UPPER_CACHE_:
- return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
- return None
-
-## Get all files of a directory
-#
-# @param Root: Root dir
-# @param SkipList : The files need be skipped
-#
-# @retval A list of all files
-#
-def GetFiles(Root, SkipList=None, FullPath=True):
- OriPath = Root
- FileList = []
- for Root, Dirs, Files in os.walk(Root):
- if SkipList:
- for Item in SkipList:
- if Item in Dirs:
- Dirs.remove(Item)
-
- for File in Files:
- File = os.path.normpath(os.path.join(Root, File))
- if not FullPath:
- File = File[len(OriPath) + 1:]
- FileList.append(File)
-
- return FileList
-
-## Check if gvien file exists or not
-#
-# @param File File name or path to be checked
-# @param Dir The directory the file is relative to
-#
-# @retval True if file exists
-# @retval False if file doesn't exists
-#
-def ValidFile(File, Ext=None):
- if Ext != None:
- Dummy, FileExt = os.path.splitext(File)
- if FileExt.lower() != Ext.lower():
- return False
- if not os.path.exists(File):
- return False
- return True
-
-def RealPath(File, Dir='', OverrideDir=''):
- NewFile = os.path.normpath(os.path.join(Dir, File))
- NewFile = GlobalData.gAllFiles[NewFile]
- if not NewFile and OverrideDir:
- NewFile = os.path.normpath(os.path.join(OverrideDir, File))
- NewFile = GlobalData.gAllFiles[NewFile]
- return NewFile
-
-def RealPath2(File, Dir='', OverrideDir=''):
- NewFile = None
- if OverrideDir:
- NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
- if NewFile:
- if OverrideDir[-1] == os.path.sep:
- return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
- else:
- return NewFile[len(OverrideDir) + 1:], NewFile[0:len(OverrideDir)]
- if GlobalData.gAllFiles:
- NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(Dir, File))]
- if not NewFile:
- NewFile = os.path.normpath(os.path.join(Dir, File))
- if not os.path.exists(NewFile):
- return None, None
- if NewFile:
- if Dir:
- if Dir[-1] == os.path.sep:
- return NewFile[len(Dir):], NewFile[0:len(Dir)]
- else:
- return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]
- else:
- return NewFile, ''
-
- return None, None
-
-## Check if gvien file exists or not
-#
-#
-def ValidFile2(AllFiles, File, Ext=None, Workspace='', EfiSource='', EdkSource='', Dir='.', OverrideDir=''):
- NewFile = File
- if Ext != None:
- Dummy, FileExt = os.path.splitext(File)
- if FileExt.lower() != Ext.lower():
- return False, File
-
- # Replace the Edk macros
- if OverrideDir != '' and OverrideDir != None:
- if OverrideDir.find('$(EFI_SOURCE)') > -1:
- OverrideDir = OverrideDir.replace('$(EFI_SOURCE)', EfiSource)
- if OverrideDir.find('$(EDK_SOURCE)') > -1:
- OverrideDir = OverrideDir.replace('$(EDK_SOURCE)', EdkSource)
-
- # Replace the default dir to current dir
- if Dir == '.':
- Dir = os.getcwd()
- Dir = Dir[len(Workspace) + 1:]
-
- # First check if File has Edk definition itself
- if File.find('$(EFI_SOURCE)') > -1 or File.find('$(EDK_SOURCE)') > -1:
- NewFile = File.replace('$(EFI_SOURCE)', EfiSource)
- NewFile = NewFile.replace('$(EDK_SOURCE)', EdkSource)
- NewFile = AllFiles[os.path.normpath(NewFile)]
- if NewFile != None:
- return True, NewFile
-
- # Second check the path with override value
- if OverrideDir != '' and OverrideDir != None:
- NewFile = AllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
- if NewFile != None:
- return True, NewFile
-
- # Last check the path with normal definitions
- File = os.path.join(Dir, File)
- NewFile = AllFiles[os.path.normpath(File)]
- if NewFile != None:
- return True, NewFile
-
- return False, File
-
-## Check if gvien file exists or not
-#
-#
-def ValidFile3(AllFiles, File, Workspace='', EfiSource='', EdkSource='', Dir='.', OverrideDir=''):
- # Replace the Edk macros
- if OverrideDir != '' and OverrideDir != None:
- if OverrideDir.find('$(EFI_SOURCE)') > -1:
- OverrideDir = OverrideDir.replace('$(EFI_SOURCE)', EfiSource)
- if OverrideDir.find('$(EDK_SOURCE)') > -1:
- OverrideDir = OverrideDir.replace('$(EDK_SOURCE)', EdkSource)
-
- # Replace the default dir to current dir
- # Dir is current module dir related to workspace
- if Dir == '.':
- Dir = os.getcwd()
- Dir = Dir[len(Workspace) + 1:]
-
- NewFile = File
- RelaPath = AllFiles[os.path.normpath(Dir)]
- NewRelaPath = RelaPath
-
- while(True):
- # First check if File has Edk definition itself
- if File.find('$(EFI_SOURCE)') > -1 or File.find('$(EDK_SOURCE)') > -1:
- File = File.replace('$(EFI_SOURCE)', EfiSource)
- File = File.replace('$(EDK_SOURCE)', EdkSource)
- NewFile = AllFiles[os.path.normpath(File)]
- if NewFile != None:
- NewRelaPath = os.path.dirname(NewFile)
- File = os.path.basename(NewFile)
- #NewRelaPath = NewFile[:len(NewFile) - len(File.replace("..\\", '').replace("../", '')) - 1]
- break
-
- # Second check the path with override value
- if OverrideDir != '' and OverrideDir != None:
- NewFile = AllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
- if NewFile != None:
- #NewRelaPath = os.path.dirname(NewFile)
- NewRelaPath = NewFile[:len(NewFile) - len(File.replace("..\\", '').replace("../", '')) - 1]
- break
-
- # Last check the path with normal definitions
- NewFile = AllFiles[os.path.normpath(os.path.join(Dir, File))]
- if NewFile != None:
- break
-
- # No file found
- break
-
- return NewRelaPath, RelaPath, File
-
-
-def GetRelPath(Path1, Path2):
- FileName = os.path.basename(Path2)
- L1 = os.path.normpath(Path1).split(os.path.normpath('/'))
- L2 = os.path.normpath(Path2).split(os.path.normpath('/'))
- for Index in range(0, len(L1)):
- if L1[Index] != L2[Index]:
- FileName = '../' * (len(L1) - Index)
- for Index2 in range(Index, len(L2)):
- FileName = os.path.join(FileName, L2[Index2])
- break
- return os.path.normpath(FileName)
-
-
-## Get GUID value from given packages
-#
-# @param CName The CName of the GUID
-# @param PackageList List of packages looking-up in
-# @param Inffile The driver file
-#
-# @retval GuidValue if the CName is found in any given package
-# @retval None if the CName is not found in all given packages
-#
-def GuidValue(CName, PackageList, Inffile = None):
- for P in PackageList:
- GuidKeys = P.Guids.keys()
- if Inffile and P._PrivateGuids:
- if not Inffile.startswith(P.MetaFile.Dir):
- GuidKeys = (dict.fromkeys(x for x in P.Guids if x not in P._PrivateGuids)).keys()
- if CName in GuidKeys:
- return P.Guids[CName]
- return None
-
-## Get Protocol value from given packages
-#
-# @param CName The CName of the GUID
-# @param PackageList List of packages looking-up in
-# @param Inffile The driver file
-#
-# @retval GuidValue if the CName is found in any given package
-# @retval None if the CName is not found in all given packages
-#
-def ProtocolValue(CName, PackageList, Inffile = None):
- for P in PackageList:
- ProtocolKeys = P.Protocols.keys()
- if Inffile and P._PrivateProtocols:
- if not Inffile.startswith(P.MetaFile.Dir):
- ProtocolKeys = (dict.fromkeys(x for x in P.Protocols if x not in P._PrivateProtocols)).keys()
- if CName in ProtocolKeys:
- return P.Protocols[CName]
- return None
-
-## Get PPI value from given packages
-#
-# @param CName The CName of the GUID
-# @param PackageList List of packages looking-up in
-# @param Inffile The driver file
-#
-# @retval GuidValue if the CName is found in any given package
-# @retval None if the CName is not found in all given packages
-#
-def PpiValue(CName, PackageList, Inffile = None):
- for P in PackageList:
- PpiKeys = P.Ppis.keys()
- if Inffile and P._PrivatePpis:
- if not Inffile.startswith(P.MetaFile.Dir):
- PpiKeys = (dict.fromkeys(x for x in P.Ppis if x not in P._PrivatePpis)).keys()
- if CName in PpiKeys:
- return P.Ppis[CName]
- return None
-
-## A string template class
-#
-# This class implements a template for string replacement. A string template
-# looks like following
-#
-# ${BEGIN} other_string ${placeholder_name} other_string ${END}
-#
-# The string between ${BEGIN} and ${END} will be repeated as many times as the
-# length of "placeholder_name", which is a list passed through a dict. The
-# "placeholder_name" is the key name of the dict. The ${BEGIN} and ${END} can
-# be not used and, in this case, the "placeholder_name" must not a list and it
-# will just be replaced once.
-#
-class TemplateString(object):
- _REPEAT_START_FLAG = "BEGIN"
- _REPEAT_END_FLAG = "END"
-
- class Section(object):
- _LIST_TYPES = [type([]), type(set()), type((0,))]
-
- def __init__(self, TemplateSection, PlaceHolderList):
- self._Template = TemplateSection
- self._PlaceHolderList = []
-
- # Split the section into sub-sections according to the position of placeholders
- if PlaceHolderList:
- self._SubSectionList = []
- SubSectionStart = 0
- #
- # The placeholders passed in must be in the format of
- #
- # PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint
- #
- for PlaceHolder, Start, End in PlaceHolderList:
- self._SubSectionList.append(TemplateSection[SubSectionStart:Start])
- self._SubSectionList.append(TemplateSection[Start:End])
- self._PlaceHolderList.append(PlaceHolder)
- SubSectionStart = End
- if SubSectionStart < len(TemplateSection):
- self._SubSectionList.append(TemplateSection[SubSectionStart:])
- else:
- self._SubSectionList = [TemplateSection]
-
- def __str__(self):
- return self._Template + " : " + str(self._PlaceHolderList)
-
- def Instantiate(self, PlaceHolderValues):
- RepeatTime = -1
- RepeatPlaceHolders = {}
- NonRepeatPlaceHolders = {}
-
- for PlaceHolder in self._PlaceHolderList:
- if PlaceHolder not in PlaceHolderValues:
- continue
- Value = PlaceHolderValues[PlaceHolder]
- if type(Value) in self._LIST_TYPES:
- if RepeatTime < 0:
- RepeatTime = len(Value)
- elif RepeatTime != len(Value):
- EdkLogger.error(
- "TemplateString",
- PARAMETER_INVALID,
- "${%s} has different repeat time from others!" % PlaceHolder,
- ExtraData=str(self._Template)
- )
- RepeatPlaceHolders["${%s}" % PlaceHolder] = Value
- else:
- NonRepeatPlaceHolders["${%s}" % PlaceHolder] = Value
-
- if NonRepeatPlaceHolders:
- StringList = []
- for S in self._SubSectionList:
- if S not in NonRepeatPlaceHolders:
- StringList.append(S)
- else:
- StringList.append(str(NonRepeatPlaceHolders[S]))
- else:
- StringList = self._SubSectionList
-
- if RepeatPlaceHolders:
- TempStringList = []
- for Index in range(RepeatTime):
- for S in StringList:
- if S not in RepeatPlaceHolders:
- TempStringList.append(S)
- else:
- TempStringList.append(str(RepeatPlaceHolders[S][Index]))
- StringList = TempStringList
-
- return "".join(StringList)
-
- ## Constructor
- def __init__(self, Template=None):
- self.String = ''
- self.IsBinary = False
- self._Template = Template
- self._TemplateSectionList = self._Parse(Template)
-
- ## str() operator
- #
- # @retval string The string replaced
- #
- def __str__(self):
- return self.String
-
- ## Split the template string into fragments per the ${BEGIN} and ${END} flags
- #
- # @retval list A list of TemplateString.Section objects
- #
- def _Parse(self, Template):
- SectionStart = 0
- SearchFrom = 0
- MatchEnd = 0
- PlaceHolderList = []
- TemplateSectionList = []
- while Template:
- MatchObj = gPlaceholderPattern.search(Template, SearchFrom)
- if not MatchObj:
- if MatchEnd <= len(Template):
- TemplateSection = TemplateString.Section(Template[SectionStart:], PlaceHolderList)
- TemplateSectionList.append(TemplateSection)
- break
-
- MatchString = MatchObj.group(1)
- MatchStart = MatchObj.start()
- MatchEnd = MatchObj.end()
-
- if MatchString == self._REPEAT_START_FLAG:
- if MatchStart > SectionStart:
- TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
- TemplateSectionList.append(TemplateSection)
- SectionStart = MatchEnd
- PlaceHolderList = []
- elif MatchString == self._REPEAT_END_FLAG:
- TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
- TemplateSectionList.append(TemplateSection)
- SectionStart = MatchEnd
- PlaceHolderList = []
- else:
- PlaceHolderList.append((MatchString, MatchStart - SectionStart, MatchEnd - SectionStart))
- SearchFrom = MatchEnd
- return TemplateSectionList
-
- ## Replace the string template with dictionary of placeholders and append it to previous one
- #
- # @param AppendString The string template to append
- # @param Dictionary The placeholder dictionaries
- #
- def Append(self, AppendString, Dictionary=None):
- if Dictionary:
- SectionList = self._Parse(AppendString)
- self.String += "".join([S.Instantiate(Dictionary) for S in SectionList])
- else:
- self.String += AppendString
-
- ## Replace the string template with dictionary of placeholders
- #
- # @param Dictionary The placeholder dictionaries
- #
- # @retval str The string replaced with placeholder values
- #
- def Replace(self, Dictionary=None):
- return "".join([S.Instantiate(Dictionary) for S in self._TemplateSectionList])
-
-## Progress indicator class
-#
-# This class makes use of thread to print progress on console.
-#
-class Progressor:
- # for avoiding deadloop
- _StopFlag = None
- _ProgressThread = None
- _CheckInterval = 0.25
-
- ## Constructor
- #
- # @param OpenMessage The string printed before progress charaters
- # @param CloseMessage The string printed after progress charaters
- # @param ProgressChar The charater used to indicate the progress
- # @param Interval The interval in seconds between two progress charaters
- #
- def __init__(self, OpenMessage="", CloseMessage="", ProgressChar='.', Interval=1.0):
- self.PromptMessage = OpenMessage
- self.CodaMessage = CloseMessage
- self.ProgressChar = ProgressChar
- self.Interval = Interval
- if Progressor._StopFlag == None:
- Progressor._StopFlag = threading.Event()
-
- ## Start to print progress charater
- #
- # @param OpenMessage The string printed before progress charaters
- #
- def Start(self, OpenMessage=None):
- if OpenMessage != None:
- self.PromptMessage = OpenMessage
- Progressor._StopFlag.clear()
- if Progressor._ProgressThread == None:
- Progressor._ProgressThread = threading.Thread(target=self._ProgressThreadEntry)
- Progressor._ProgressThread.setDaemon(False)
- Progressor._ProgressThread.start()
-
- ## Stop printing progress charater
- #
- # @param CloseMessage The string printed after progress charaters
- #
- def Stop(self, CloseMessage=None):
- OriginalCodaMessage = self.CodaMessage
- if CloseMessage != None:
- self.CodaMessage = CloseMessage
- self.Abort()
- self.CodaMessage = OriginalCodaMessage
-
- ## Thread entry method
- def _ProgressThreadEntry(self):
- sys.stdout.write(self.PromptMessage + " ")
- sys.stdout.flush()
- TimeUp = 0.0
- while not Progressor._StopFlag.isSet():
- if TimeUp <= 0.0:
- sys.stdout.write(self.ProgressChar)
- sys.stdout.flush()
- TimeUp = self.Interval
- time.sleep(self._CheckInterval)
- TimeUp -= self._CheckInterval
- sys.stdout.write(" " + self.CodaMessage + "\n")
- sys.stdout.flush()
-
- ## Abort the progress display
- @staticmethod
- def Abort():
- if Progressor._StopFlag != None:
- Progressor._StopFlag.set()
- if Progressor._ProgressThread != None:
- Progressor._ProgressThread.join()
- Progressor._ProgressThread = None
-
-## A dict which can access its keys and/or values orderly
-#
-# The class implements a new kind of dict which its keys or values can be
-# accessed in the order they are added into the dict. It guarantees the order
-# by making use of an internal list to keep a copy of keys.
-#
-class sdict(IterableUserDict):
- ## Constructor
- def __init__(self):
- IterableUserDict.__init__(self)
- self._key_list = []
-
- ## [] operator
- def __setitem__(self, key, value):
- if key not in self._key_list:
- self._key_list.append(key)
- IterableUserDict.__setitem__(self, key, value)
-
- ## del operator
- def __delitem__(self, key):
- self._key_list.remove(key)
- IterableUserDict.__delitem__(self, key)
-
- ## used in "for k in dict" loop to ensure the correct order
- def __iter__(self):
- return self.iterkeys()
-
- ## len() support
- def __len__(self):
- return len(self._key_list)
-
- ## "in" test support
- def __contains__(self, key):
- return key in self._key_list
-
- ## indexof support
- def index(self, key):
- return self._key_list.index(key)
-
- ## insert support
- def insert(self, key, newkey, newvalue, order):
- index = self._key_list.index(key)
- if order == 'BEFORE':
- self._key_list.insert(index, newkey)
- IterableUserDict.__setitem__(self, newkey, newvalue)
- elif order == 'AFTER':
- self._key_list.insert(index + 1, newkey)
- IterableUserDict.__setitem__(self, newkey, newvalue)
-
- ## append support
- def append(self, sdict):
- for key in sdict:
- if key not in self._key_list:
- self._key_list.append(key)
- IterableUserDict.__setitem__(self, key, sdict[key])
-
- def has_key(self, key):
- return key in self._key_list
-
- ## Empty the dict
- def clear(self):
- self._key_list = []
- IterableUserDict.clear(self)
-
- ## Return a copy of keys
- def keys(self):
- keys = []
- for key in self._key_list:
- keys.append(key)
- return keys
-
- ## Return a copy of values
- def values(self):
- values = []
- for key in self._key_list:
- values.append(self[key])
- return values
-
- ## Return a copy of (key, value) list
- def items(self):
- items = []
- for key in self._key_list:
- items.append((key, self[key]))
- return items
-
- ## Iteration support
- def iteritems(self):
- return iter(self.items())
-
- ## Keys interation support
- def iterkeys(self):
- return iter(self.keys())
-
- ## Values interation support
- def itervalues(self):
- return iter(self.values())
-
- ## Return value related to a key, and remove the (key, value) from the dict
- def pop(self, key, *dv):
- value = None
- if key in self._key_list:
- value = self[key]
- self.__delitem__(key)
- elif len(dv) != 0 :
- value = kv[0]
- return value
-
- ## Return (key, value) pair, and remove the (key, value) from the dict
- def popitem(self):
- key = self._key_list[-1]
- value = self[key]
- self.__delitem__(key)
- return key, value
-
- def update(self, dict=None, **kwargs):
- if dict != None:
- for k, v in dict.items():
- self[k] = v
- if len(kwargs):
- for k, v in kwargs.items():
- self[k] = v
-
-## Dictionary with restricted keys
-#
-class rdict(dict):
- ## Constructor
- def __init__(self, KeyList):
- for Key in KeyList:
- dict.__setitem__(self, Key, "")
-
- ## []= operator
- def __setitem__(self, key, value):
- if key not in self:
- EdkLogger.error("RestrictedDict", ATTRIBUTE_SET_FAILURE, "Key [%s] is not allowed" % key,
- ExtraData=", ".join(dict.keys(self)))
- dict.__setitem__(self, key, value)
-
- ## =[] operator
- def __getitem__(self, key):
- if key not in self:
- return ""
- return dict.__getitem__(self, key)
-
- ## del operator
- def __delitem__(self, key):
- EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="del")
-
- ## Empty the dict
- def clear(self):
- for Key in self:
- self.__setitem__(Key, "")
-
- ## Return value related to a key, and remove the (key, value) from the dict
- def pop(self, key, *dv):
- EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="pop")
-
- ## Return (key, value) pair, and remove the (key, value) from the dict
- def popitem(self):
- EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="popitem")
-
-## Dictionary using prioritized list as key
-#
-class tdict:
- _ListType = type([])
- _TupleType = type(())
- _Wildcard = 'COMMON'
- _ValidWildcardList = ['COMMON', 'DEFAULT', 'ALL', '*', 'PLATFORM']
-
- def __init__(self, _Single_=False, _Level_=2):
- self._Level_ = _Level_
- self.data = {}
- self._Single_ = _Single_
-
- # =[] operator
- def __getitem__(self, key):
- KeyType = type(key)
- RestKeys = None
- if KeyType == self._ListType or KeyType == self._TupleType:
- FirstKey = key[0]
- if len(key) > 1:
- RestKeys = key[1:]
- elif self._Level_ > 1:
- RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
- else:
- FirstKey = key
- if self._Level_ > 1:
- RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
-
- if FirstKey == None or str(FirstKey).upper() in self._ValidWildcardList:
- FirstKey = self._Wildcard
-
- if self._Single_:
- return self._GetSingleValue(FirstKey, RestKeys)
- else:
- return self._GetAllValues(FirstKey, RestKeys)
-
- def _GetSingleValue(self, FirstKey, RestKeys):
- Value = None
- #print "%s-%s" % (FirstKey, self._Level_) ,
- if self._Level_ > 1:
- if FirstKey == self._Wildcard:
- if FirstKey in self.data:
- Value = self.data[FirstKey][RestKeys]
- if Value == None:
- for Key in self.data:
- Value = self.data[Key][RestKeys]
- if Value != None: break
- else:
- if FirstKey in self.data:
- Value = self.data[FirstKey][RestKeys]
- if Value == None and self._Wildcard in self.data:
- #print "Value=None"
- Value = self.data[self._Wildcard][RestKeys]
- else:
- if FirstKey == self._Wildcard:
- if FirstKey in self.data:
- Value = self.data[FirstKey]
- if Value == None:
- for Key in self.data:
- Value = self.data[Key]
- if Value != None: break
- else:
- if FirstKey in self.data:
- Value = self.data[FirstKey]
- elif self._Wildcard in self.data:
- Value = self.data[self._Wildcard]
- return Value
-
- def _GetAllValues(self, FirstKey, RestKeys):
- Value = []
- if self._Level_ > 1:
- if FirstKey == self._Wildcard:
- for Key in self.data:
- Value += self.data[Key][RestKeys]
- else:
- if FirstKey in self.data:
- Value += self.data[FirstKey][RestKeys]
- if self._Wildcard in self.data:
- Value += self.data[self._Wildcard][RestKeys]
- else:
- if FirstKey == self._Wildcard:
- for Key in self.data:
- Value.append(self.data[Key])
- else:
- if FirstKey in self.data:
- Value.append(self.data[FirstKey])
- if self._Wildcard in self.data:
- Value.append(self.data[self._Wildcard])
- return Value
-
- ## []= operator
- def __setitem__(self, key, value):
- KeyType = type(key)
- RestKeys = None
- if KeyType == self._ListType or KeyType == self._TupleType:
- FirstKey = key[0]
- if len(key) > 1:
- RestKeys = key[1:]
- else:
- RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
- else:
- FirstKey = key
- if self._Level_ > 1:
- RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
-
- if FirstKey in self._ValidWildcardList:
- FirstKey = self._Wildcard
-
- if FirstKey not in self.data and self._Level_ > 0:
- self.data[FirstKey] = tdict(self._Single_, self._Level_ - 1)
-
- if self._Level_ > 1:
- self.data[FirstKey][RestKeys] = value
- else:
- self.data[FirstKey] = value
-
- def SetGreedyMode(self):
- self._Single_ = False
- if self._Level_ > 1:
- for Key in self.data:
- self.data[Key].SetGreedyMode()
-
- def SetSingleMode(self):
- self._Single_ = True
- if self._Level_ > 1:
- for Key in self.data:
- self.data[Key].SetSingleMode()
-
- def GetKeys(self, KeyIndex=0):
- assert KeyIndex >= 0
- if KeyIndex == 0:
- return set(self.data.keys())
- else:
- keys = set()
- for Key in self.data:
- keys |= self.data[Key].GetKeys(KeyIndex - 1)
- return keys
-
-## Boolean chain list
-#
-class Blist(UserList):
- def __init__(self, initlist=None):
- UserList.__init__(self, initlist)
- def __setitem__(self, i, item):
- if item not in [True, False]:
- if item == 0:
- item = False
- else:
- item = True
- self.data[i] = item
- def _GetResult(self):
- Value = True
- for item in self.data:
- Value &= item
- return Value
- Result = property(_GetResult)
-
-def ParseConsoleLog(Filename):
- Opr = open(os.path.normpath(Filename), 'r')
- Opw = open(os.path.normpath(Filename + '.New'), 'w+')
- for Line in Opr.readlines():
- if Line.find('.efi') > -1:
- Line = Line[Line.rfind(' ') : Line.rfind('.efi')].strip()
- Opw.write('%s\n' % Line)
-
- Opr.close()
- Opw.close()
-
-def AnalyzePcdExpression(Setting):
- Setting = Setting.strip()
- # There might be escaped quote in a string: \", \\\"
- Data = Setting.replace('\\\\', '//').replace('\\\"', '\\\'')
- # There might be '|' in string and in ( ... | ... ), replace it with '-'
- NewStr = ''
- InStr = False
- Pair = 0
- for ch in Data:
- if ch == '"':
- InStr = not InStr
- elif ch == '(' and not InStr:
- Pair += 1
- elif ch == ')' and not InStr:
- Pair -= 1
-
- if (Pair > 0 or InStr) and ch == TAB_VALUE_SPLIT:
- NewStr += '-'
- else:
- NewStr += ch
- FieldList = []
- StartPos = 0
- while True:
- Pos = NewStr.find(TAB_VALUE_SPLIT, StartPos)
- if Pos < 0:
- FieldList.append(Setting[StartPos:].strip())
- break
- FieldList.append(Setting[StartPos:Pos].strip())
- StartPos = Pos + 1
-
- return FieldList
-
-## AnalyzeDscPcd
-#
-# Analyze DSC PCD value, since there is no data type info in DSC
-# This fuction is used to match functions (AnalyzePcdData, AnalyzeHiiPcdData, AnalyzeVpdPcdData) used for retrieving PCD value from database
-# 1. Feature flag: TokenSpace.PcdCName|PcdValue
-# 2. Fix and Patch:TokenSpace.PcdCName|PcdValue[|MaxSize]
-# 3. Dynamic default:
-# TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
-# TokenSpace.PcdCName|PcdValue
-# 4. Dynamic VPD:
-# TokenSpace.PcdCName|VpdOffset[|VpdValue]
-# TokenSpace.PcdCName|VpdOffset[|MaxSize[|VpdValue]]
-# 5. Dynamic HII:
-# TokenSpace.PcdCName|HiiString|VaiableGuid|VariableOffset[|HiiValue]
-# PCD value needs to be located in such kind of string, and the PCD value might be an expression in which
-# there might have "|" operator, also in string value.
-#
-# @param Setting: String contain information described above with "TokenSpace.PcdCName|" stripped
-# @param PcdType: PCD type: feature, fixed, dynamic default VPD HII
-# @param DataType: The datum type of PCD: VOID*, UNIT, BOOL
-# @retval:
-# ValueList: A List contain fields described above
-# IsValid: True if conforming EBNF, otherwise False
-# Index: The index where PcdValue is in ValueList
-#
-def AnalyzeDscPcd(Setting, PcdType, DataType=''):
- FieldList = AnalyzePcdExpression(Setting)
-
- IsValid = True
- if PcdType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_FEATURE_FLAG):
- Value = FieldList[0]
- Size = ''
- if len(FieldList) > 1:
- Type = FieldList[1]
- # Fix the PCD type when no DataType input
- if Type == 'VOID*':
- DataType = 'VOID*'
- else:
- Size = FieldList[1]
- if len(FieldList) > 2:
- Size = FieldList[2]
- if DataType == 'VOID*':
- IsValid = (len(FieldList) <= 3)
- else:
- IsValid = (len(FieldList) <= 1)
- return [Value, '', Size], IsValid, 0
- elif PcdType in (MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT):
- Value = FieldList[0]
- Size = Type = ''
- if len(FieldList) > 1:
- Type = FieldList[1]
- else:
- Type = DataType
- if len(FieldList) > 2:
- Size = FieldList[2]
- else:
- if Type == 'VOID*':
- if Value.startswith("L"):
- Size = str((len(Value)- 3 + 1) * 2)
- elif Value.startswith("{"):
- Size = str(len(Value.split(",")))
- else:
- Size = str(len(Value) -2 + 1 )
- if DataType == 'VOID*':
- IsValid = (len(FieldList) <= 3)
- else:
- IsValid = (len(FieldList) <= 1)
- return [Value, Type, Size], IsValid, 0
- elif PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):
- VpdOffset = FieldList[0]
- Value = Size = ''
- if not DataType == 'VOID*':
- if len(FieldList) > 1:
- Value = FieldList[1]
- else:
- if len(FieldList) > 1:
- Size = FieldList[1]
- if len(FieldList) > 2:
- Value = FieldList[2]
- if DataType == 'VOID*':
- IsValid = (len(FieldList) <= 3)
- else:
- IsValid = (len(FieldList) <= 2)
- return [VpdOffset, Size, Value], IsValid, 2
- elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):
- HiiString = FieldList[0]
- Guid = Offset = Value = Attribute = ''
- if len(FieldList) > 1:
- Guid = FieldList[1]
- if len(FieldList) > 2:
- Offset = FieldList[2]
- if len(FieldList) > 3:
- Value = FieldList[3]
- if len(FieldList) > 4:
- Attribute = FieldList[4]
- IsValid = (3 <= len(FieldList) <= 5)
- return [HiiString, Guid, Offset, Value, Attribute], IsValid, 3
- return [], False, 0
-
-## AnalyzePcdData
-#
-# Analyze the pcd Value, Datum type and TokenNumber.
-# Used to avoid split issue while the value string contain "|" character
-#
-# @param[in] Setting: A String contain value/datum type/token number information;
-#
-# @retval ValueList: A List contain value, datum type and toke number.
-#
-def AnalyzePcdData(Setting):
- ValueList = ['', '', '']
-
- ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
- PtrValue = ValueRe.findall(Setting)
-
- ValueUpdateFlag = False
-
- if len(PtrValue) >= 1:
- Setting = re.sub(ValueRe, '', Setting)
- ValueUpdateFlag = True
-
- TokenList = Setting.split(TAB_VALUE_SPLIT)
- ValueList[0:len(TokenList)] = TokenList
-
- if ValueUpdateFlag:
- ValueList[0] = PtrValue[0]
-
- return ValueList
-
-## AnalyzeHiiPcdData
-#
-# Analyze the pcd Value, variable name, variable Guid and variable offset.
-# Used to avoid split issue while the value string contain "|" character
-#
-# @param[in] Setting: A String contain VariableName, VariableGuid, VariableOffset, DefaultValue information;
-#
-# @retval ValueList: A List contaian VariableName, VariableGuid, VariableOffset, DefaultValue.
-#
-def AnalyzeHiiPcdData(Setting):
- ValueList = ['', '', '', '']
-
- TokenList = GetSplitValueList(Setting)
- ValueList[0:len(TokenList)] = TokenList
-
- return ValueList
-
-## AnalyzeVpdPcdData
-#
-# Analyze the vpd pcd VpdOffset, MaxDatumSize and InitialValue.
-# Used to avoid split issue while the value string contain "|" character
-#
-# @param[in] Setting: A String contain VpdOffset/MaxDatumSize/InitialValue information;
-#
-# @retval ValueList: A List contain VpdOffset, MaxDatumSize and InitialValue.
-#
-def AnalyzeVpdPcdData(Setting):
- ValueList = ['', '', '']
-
- ValueRe = re.compile(r'\s*L?\".*\|.*\"\s*$')
- PtrValue = ValueRe.findall(Setting)
-
- ValueUpdateFlag = False
-
- if len(PtrValue) >= 1:
- Setting = re.sub(ValueRe, '', Setting)
- ValueUpdateFlag = True
-
- TokenList = Setting.split(TAB_VALUE_SPLIT)
- ValueList[0:len(TokenList)] = TokenList
-
- if ValueUpdateFlag:
- ValueList[2] = PtrValue[0]
-
- return ValueList
-
-## check format of PCD value against its the datum type
-#
-# For PCD value setting
-#
-def CheckPcdDatum(Type, Value):
- if Type == "VOID*":
- ValueRe = re.compile(r'\s*L?\".*\"\s*$')
- if not (((Value.startswith('L"') or Value.startswith('"')) and Value.endswith('"'))
- or (Value.startswith('{') and Value.endswith('}'))
- ):
- return False, "Invalid value [%s] of type [%s]; must be in the form of {...} for array"\
- ", or \"...\" for string, or L\"...\" for unicode string" % (Value, Type)
- elif ValueRe.match(Value):
- # Check the chars in UnicodeString or CString is printable
- if Value.startswith("L"):
- Value = Value[2:-1]
- else:
- Value = Value[1:-1]
- Printset = set(string.printable)
- Printset.remove(TAB_PRINTCHAR_VT)
- Printset.add(TAB_PRINTCHAR_BS)
- Printset.add(TAB_PRINTCHAR_NUL)
- if not set(Value).issubset(Printset):
- PrintList = list(Printset)
- PrintList.sort()
- return False, "Invalid PCD string value of type [%s]; must be printable chars %s." % (Type, PrintList)
- elif Type == 'BOOLEAN':
- if Value not in ['TRUE', 'True', 'true', '0x1', '0x01', '1', 'FALSE', 'False', 'false', '0x0', '0x00', '0']:
- return False, "Invalid value [%s] of type [%s]; must be one of TRUE, True, true, 0x1, 0x01, 1"\
- ", FALSE, False, false, 0x0, 0x00, 0" % (Value, Type)
- elif Type in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64]:
- try:
- Value = long(Value, 0)
- except:
- return False, "Invalid value [%s] of type [%s];"\
- " must be a hexadecimal, decimal or octal in C language format." % (Value, Type)
- else:
- return False, "Invalid type [%s]; must be one of VOID*, BOOLEAN, UINT8, UINT16, UINT32, UINT64." % (Type)
-
- return True, ""
-
-## Split command line option string to list
-#
-# subprocess.Popen needs the args to be a sequence. Otherwise there's problem
-# in non-windows platform to launch command
-#
-def SplitOption(OptionString):
- OptionList = []
- LastChar = " "
- OptionStart = 0
- QuotationMark = ""
- for Index in range(0, len(OptionString)):
- CurrentChar = OptionString[Index]
- if CurrentChar in ['"', "'"]:
- if QuotationMark == CurrentChar:
- QuotationMark = ""
- elif QuotationMark == "":
- QuotationMark = CurrentChar
- continue
- elif QuotationMark:
- continue
-
- if CurrentChar in ["/", "-"] and LastChar in [" ", "\t", "\r", "\n"]:
- if Index > OptionStart:
- OptionList.append(OptionString[OptionStart:Index - 1])
- OptionStart = Index
- LastChar = CurrentChar
- OptionList.append(OptionString[OptionStart:])
- return OptionList
-
-def CommonPath(PathList):
- P1 = min(PathList).split(os.path.sep)
- P2 = max(PathList).split(os.path.sep)
- for Index in xrange(min(len(P1), len(P2))):
- if P1[Index] != P2[Index]:
- return os.path.sep.join(P1[:Index])
- return os.path.sep.join(P1)
-
-#
-# Convert string to C format array
-#
-def ConvertStringToByteArray(Value):
- Value = Value.strip()
- if not Value:
- return None
- if Value[0] == '{':
- if not Value.endswith('}'):
- return None
- Value = Value.replace(' ', '').replace('{', '').replace('}', '')
- ValFields = Value.split(',')
- try:
- for Index in range(len(ValFields)):
- ValFields[Index] = str(int(ValFields[Index], 0))
- except ValueError:
- return None
- Value = '{' + ','.join(ValFields) + '}'
- return Value
-
- Unicode = False
- if Value.startswith('L"'):
- if not Value.endswith('"'):
- return None
- Value = Value[1:]
- Unicode = True
- elif not Value.startswith('"') or not Value.endswith('"'):
- return None
-
- Value = eval(Value) # translate escape character
- NewValue = '{'
- for Index in range(0,len(Value)):
- if Unicode:
- NewValue = NewValue + str(ord(Value[Index]) % 0x10000) + ','
- else:
- NewValue = NewValue + str(ord(Value[Index]) % 0x100) + ','
- Value = NewValue + '0}'
- return Value
-
-class PathClass(object):
- def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
- Arch='COMMON', ToolChainFamily='', Target='', TagName='', ToolCode=''):
- self.Arch = Arch
- self.File = str(File)
- if os.path.isabs(self.File):
- self.Root = ''
- self.AlterRoot = ''
- else:
- self.Root = str(Root)
- self.AlterRoot = str(AlterRoot)
-
- # Remove any '.' and '..' in path
- if self.Root:
- self.Root = mws.getWs(self.Root, self.File)
- self.Path = os.path.normpath(os.path.join(self.Root, self.File))
- self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
- # eliminate the side-effect of 'C:'
- if self.Root[-1] == ':':
- self.Root += os.path.sep
- # file path should not start with path separator
- if self.Root[-1] == os.path.sep:
- self.File = self.Path[len(self.Root):]
- else:
- self.File = self.Path[len(self.Root) + 1:]
- else:
- self.Path = os.path.normpath(self.File)
-
- self.SubDir, self.Name = os.path.split(self.File)
- self.BaseName, self.Ext = os.path.splitext(self.Name)
-
- if self.Root:
- if self.SubDir:
- self.Dir = os.path.join(self.Root, self.SubDir)
- else:
- self.Dir = self.Root
- else:
- self.Dir = self.SubDir
-
- if IsBinary:
- self.Type = Type
- else:
- self.Type = self.Ext.lower()
-
- self.IsBinary = IsBinary
- self.Target = Target
- self.TagName = TagName
- self.ToolCode = ToolCode
- self.ToolChainFamily = ToolChainFamily
-
- self._Key = None
-
- ## Convert the object of this class to a string
- #
- # Convert member Path of the class to a string
- #
- # @retval string Formatted String
- #
- def __str__(self):
- return self.Path
-
- ## Override __eq__ function
- #
- # Check whether PathClass are the same
- #
- # @retval False The two PathClass are different
- # @retval True The two PathClass are the same
- #
- def __eq__(self, Other):
- if type(Other) == type(self):
- return self.Path == Other.Path
- else:
- return self.Path == str(Other)
-
- ## Override __cmp__ function
- #
- # Customize the comparsion operation of two PathClass
- #
- # @retval 0 The two PathClass are different
- # @retval -1 The first PathClass is less than the second PathClass
- # @retval 1 The first PathClass is Bigger than the second PathClass
- def __cmp__(self, Other):
- if type(Other) == type(self):
- OtherKey = Other.Path
- else:
- OtherKey = str(Other)
-
- SelfKey = self.Path
- if SelfKey == OtherKey:
- return 0
- elif SelfKey > OtherKey:
- return 1
- else:
- return -1
-
- ## Override __hash__ function
- #
- # Use Path as key in hash table
- #
- # @retval string Key for hash table
- #
- def __hash__(self):
- return hash(self.Path)
-
- def _GetFileKey(self):
- if self._Key == None:
- self._Key = self.Path.upper() # + self.ToolChainFamily + self.TagName + self.ToolCode + self.Target
- return self._Key
-
- def _GetTimeStamp(self):
- return os.stat(self.Path)[8]
-
- def Validate(self, Type='', CaseSensitive=True):
- if GlobalData.gCaseInsensitive:
- CaseSensitive = False
- if Type and Type.lower() != self.Type:
- return FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % (self.File, Type, self.Type)
-
- RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
- if not RealRoot and not RealFile:
- RealFile = self.File
- if self.AlterRoot:
- RealFile = os.path.join(self.AlterRoot, self.File)
- elif self.Root:
- RealFile = os.path.join(self.Root, self.File)
- if len (mws.getPkgPath()) == 0:
- return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
- else:
- return FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.File, '\n\t'.join(mws.getPkgPath()))
-
- ErrorCode = 0
- ErrorInfo = ''
- if RealRoot != self.Root or RealFile != self.File:
- if CaseSensitive and (RealFile != self.File or (RealRoot != self.Root and RealRoot != self.AlterRoot)):
- ErrorCode = FILE_CASE_MISMATCH
- ErrorInfo = self.File + '\n\t' + RealFile + " [in file system]"
-
- self.SubDir, self.Name = os.path.split(RealFile)
- self.BaseName, self.Ext = os.path.splitext(self.Name)
- if self.SubDir:
- self.Dir = os.path.join(RealRoot, self.SubDir)
- else:
- self.Dir = RealRoot
- self.File = RealFile
- self.Root = RealRoot
- self.Path = os.path.join(RealRoot, RealFile)
- return ErrorCode, ErrorInfo
-
- Key = property(_GetFileKey)
- TimeStamp = property(_GetTimeStamp)
-
-## Parse PE image to get the required PE informaion.
-#
-class PeImageClass():
- ## Constructor
- #
- # @param File FilePath of PeImage
- #
- def __init__(self, PeFile):
- self.FileName = PeFile
- self.IsValid = False
- self.Size = 0
- self.EntryPoint = 0
- self.SectionAlignment = 0
- self.SectionHeaderList = []
- self.ErrorInfo = ''
- try:
- PeObject = open(PeFile, 'rb')
- except:
- self.ErrorInfo = self.FileName + ' can not be found\n'
- return
- # Read DOS header
- ByteArray = array.array('B')
- ByteArray.fromfile(PeObject, 0x3E)
- ByteList = ByteArray.tolist()
- # DOS signature should be 'MZ'
- if self._ByteListToStr (ByteList[0x0:0x2]) != 'MZ':
- self.ErrorInfo = self.FileName + ' has no valid DOS signature MZ'
- return
-
- # Read 4 byte PE Signature
- PeOffset = self._ByteListToInt(ByteList[0x3C:0x3E])
- PeObject.seek(PeOffset)
- ByteArray = array.array('B')
- ByteArray.fromfile(PeObject, 4)
- # PE signature should be 'PE\0\0'
- if ByteArray.tostring() != 'PE\0\0':
- self.ErrorInfo = self.FileName + ' has no valid PE signature PE00'
- return
-
- # Read PE file header
- ByteArray = array.array('B')
- ByteArray.fromfile(PeObject, 0x14)
- ByteList = ByteArray.tolist()
- SecNumber = self._ByteListToInt(ByteList[0x2:0x4])
- if SecNumber == 0:
- self.ErrorInfo = self.FileName + ' has no section header'
- return
-
- # Read PE optional header
- OptionalHeaderSize = self._ByteListToInt(ByteArray[0x10:0x12])
- ByteArray = array.array('B')
- ByteArray.fromfile(PeObject, OptionalHeaderSize)
- ByteList = ByteArray.tolist()
- self.EntryPoint = self._ByteListToInt(ByteList[0x10:0x14])
- self.SectionAlignment = self._ByteListToInt(ByteList[0x20:0x24])
- self.Size = self._ByteListToInt(ByteList[0x38:0x3C])
-
- # Read each Section Header
- for Index in range(SecNumber):
- ByteArray = array.array('B')
- ByteArray.fromfile(PeObject, 0x28)
- ByteList = ByteArray.tolist()
- SecName = self._ByteListToStr(ByteList[0:8])
- SecVirtualSize = self._ByteListToInt(ByteList[8:12])
- SecRawAddress = self._ByteListToInt(ByteList[20:24])
- SecVirtualAddress = self._ByteListToInt(ByteList[12:16])
- self.SectionHeaderList.append((SecName, SecVirtualAddress, SecRawAddress, SecVirtualSize))
- self.IsValid = True
- PeObject.close()
-
- def _ByteListToStr(self, ByteList):
- String = ''
- for index in range(len(ByteList)):
- if ByteList[index] == 0:
- break
- String += chr(ByteList[index])
- return String
-
- def _ByteListToInt(self, ByteList):
- Value = 0
- for index in range(len(ByteList) - 1, -1, -1):
- Value = (Value << 8) | int(ByteList[index])
- return Value
-
-
-class SkuClass():
-
- DEFAULT = 0
- SINGLE = 1
- MULTIPLE =2
-
- def __init__(self,SkuIdentifier='', SkuIds={}):
-
- self.AvailableSkuIds = sdict()
- self.SkuIdSet = []
- self.SkuIdNumberSet = []
- if SkuIdentifier == '' or SkuIdentifier is None:
- self.SkuIdSet = ['DEFAULT']
- self.SkuIdNumberSet = ['0U']
- elif SkuIdentifier == 'ALL':
- self.SkuIdSet = SkuIds.keys()
- self.SkuIdNumberSet = [num.strip() + 'U' for num in SkuIds.values()]
- else:
- r = SkuIdentifier.split('|')
- self.SkuIdSet=[r[k].strip() for k in range(len(r))]
- k = None
- try:
- self.SkuIdNumberSet = [SkuIds[k].strip() + 'U' for k in self.SkuIdSet]
- except Exception:
- EdkLogger.error("build", PARAMETER_INVALID,
- ExtraData = "SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
- % (k, " | ".join(SkuIds.keys())))
- if len(self.SkuIdSet) == 2 and 'DEFAULT' in self.SkuIdSet and SkuIdentifier != 'ALL':
- self.SkuIdSet.remove('DEFAULT')
- self.SkuIdNumberSet.remove('0U')
- for each in self.SkuIdSet:
- if each in SkuIds:
- self.AvailableSkuIds[each] = SkuIds[each]
- else:
- EdkLogger.error("build", PARAMETER_INVALID,
- ExtraData="SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
- % (each, " | ".join(SkuIds.keys())))
-
- def __SkuUsageType(self):
-
- if len(self.SkuIdSet) == 1:
- if self.SkuIdSet[0] == 'DEFAULT':
- return SkuClass.DEFAULT
- else:
- return SkuClass.SINGLE
- else:
- return SkuClass.MULTIPLE
-
- def __GetAvailableSkuIds(self):
- return self.AvailableSkuIds
-
- def __GetSystemSkuID(self):
- if self.__SkuUsageType() == SkuClass.SINGLE:
- return self.SkuIdSet[0]
- else:
- return 'DEFAULT'
- def __GetAvailableSkuIdNumber(self):
- return self.SkuIdNumberSet
- SystemSkuId = property(__GetSystemSkuID)
- AvailableSkuIdSet = property(__GetAvailableSkuIds)
- SkuUsageType = property(__SkuUsageType)
- AvailableSkuIdNumSet = property(__GetAvailableSkuIdNumber)
-
-#
-# Pack a registry format GUID
-#
-def PackRegistryFormatGuid(Guid):
- Guid = Guid.split('-')
- return pack('=LHHBBBBBBBB',
- int(Guid[0], 16),
- int(Guid[1], 16),
- int(Guid[2], 16),
- int(Guid[3][-4:-2], 16),
- int(Guid[3][-2:], 16),
- int(Guid[4][-12:-10], 16),
- int(Guid[4][-10:-8], 16),
- int(Guid[4][-8:-6], 16),
- int(Guid[4][-6:-4], 16),
- int(Guid[4][-4:-2], 16),
- int(Guid[4][-2:], 16)
- )
-
-def BuildOptionPcdValueFormat(TokenSpaceGuidCName, TokenCName, PcdDatumType, Value):
- if PcdDatumType == 'VOID*':
- if Value.startswith('L'):
- if not Value[1]:
- EdkLogger.error("build", FORMAT_INVALID, 'For Void* type PCD, when specify the Value in the command line, please use the following format: "string", L"string", H"{...}"')
- Value = Value[0] + '"' + Value[1:] + '"'
- elif Value.startswith('H'):
- if not Value[1]:
- EdkLogger.error("build", FORMAT_INVALID, 'For Void* type PCD, when specify the Value in the command line, please use the following format: "string", L"string", H"{...}"')
- Value = Value[1:]
- else:
- if not Value[0]:
- EdkLogger.error("build", FORMAT_INVALID, 'For Void* type PCD, when specify the Value in the command line, please use the following format: "string", L"string", H"{...}"')
- Value = '"' + Value + '"'
-
- IsValid, Cause = CheckPcdDatum(PcdDatumType, Value)
- if not IsValid:
- EdkLogger.error("build", FORMAT_INVALID, Cause, ExtraData="%s.%s" % (TokenSpaceGuidCName, TokenCName))
- if PcdDatumType == 'BOOLEAN':
- Value = Value.upper()
- if Value == 'TRUE' or Value == '1':
- Value = '1'
- elif Value == 'FALSE' or Value == '0':
- Value = '0'
- return Value
-
-##
-#
-# This acts like the main() function for the script, unless it is 'import'ed into another
-# script.
-#
-if __name__ == '__main__':
- pass
-
diff --git a/BaseTools/Source/Python/Common/MultipleWorkspace.py b/BaseTools/Source/Python/Common/MultipleWorkspace.py
deleted file mode 100644
index 2a76d49cc6..0000000000
--- a/BaseTools/Source/Python/Common/MultipleWorkspace.py
+++ /dev/null
@@ -1,156 +0,0 @@
-## @file
-# manage multiple workspace file.
-#
-# This file is required to make Python interpreter treat the directory
-# as containing package.
-#
-# Copyright (c) 2015 - 2016, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-import Common.LongFilePathOs as os
-from Common.DataType import TAB_WORKSPACE
-
-## MultipleWorkspace
-#
-# This class manage multiple workspace behavior
-#
-# @param class:
-#
-# @var WORKSPACE: defined the current WORKSPACE
-# @var PACKAGES_PATH: defined the other WORKSAPCE, if current WORKSPACE is invalid, search valid WORKSPACE from PACKAGES_PATH
-#
-class MultipleWorkspace(object):
- WORKSPACE = ''
- PACKAGES_PATH = None
-
- ## convertPackagePath()
- #
- # Convert path to match workspace.
- #
- # @param cls The class pointer
- # @param Ws The current WORKSPACE
- # @param Path Path to be converted to match workspace.
- #
- @classmethod
- def convertPackagePath(cls, Ws, Path):
- if str(os.path.normcase (Path)).startswith(Ws):
- return os.path.join(Ws, os.path.relpath(Path, Ws))
- return Path
-
- ## setWs()
- #
- # set WORKSPACE and PACKAGES_PATH environment
- #
- # @param cls The class pointer
- # @param Ws initialize WORKSPACE variable
- # @param PackagesPath initialize PackagesPath variable
- #
- @classmethod
- def setWs(cls, Ws, PackagesPath=None):
- cls.WORKSPACE = Ws
- if PackagesPath:
- cls.PACKAGES_PATH = [cls.convertPackagePath (Ws, os.path.normpath(Path.strip())) for Path in PackagesPath.split(os.pathsep)]
- else:
- cls.PACKAGES_PATH = []
-
- ## join()
- #
- # rewrite os.path.join function
- #
- # @param cls The class pointer
- # @param Ws the current WORKSPACE
- # @param *p path of the inf/dec/dsc/fdf/conf file
- # @retval Path the absolute path of specified file
- #
- @classmethod
- def join(cls, Ws, *p):
- Path = os.path.join(Ws, *p)
- if not os.path.exists(Path):
- for Pkg in cls.PACKAGES_PATH:
- Path = os.path.join(Pkg, *p)
- if os.path.exists(Path):
- return Path
- Path = os.path.join(Ws, *p)
- return Path
-
- ## relpath()
- #
- # rewrite os.path.relpath function
- #
- # @param cls The class pointer
- # @param Path path of the inf/dec/dsc/fdf/conf file
- # @param Ws the current WORKSPACE
- # @retval Path the relative path of specified file
- #
- @classmethod
- def relpath(cls, Path, Ws):
- for Pkg in cls.PACKAGES_PATH:
- if Path.lower().startswith(Pkg.lower()):
- Path = os.path.relpath(Path, Pkg)
- return Path
- if Path.lower().startswith(Ws.lower()):
- Path = os.path.relpath(Path, Ws)
- return Path
-
- ## getWs()
- #
- # get valid workspace for the path
- #
- # @param cls The class pointer
- # @param Ws the current WORKSPACE
- # @param Path path of the inf/dec/dsc/fdf/conf file
- # @retval Ws the valid workspace relative to the specified file path
- #
- @classmethod
- def getWs(cls, Ws, Path):
- absPath = os.path.join(Ws, Path)
- if not os.path.exists(absPath):
- for Pkg in cls.PACKAGES_PATH:
- absPath = os.path.join(Pkg, Path)
- if os.path.exists(absPath):
- return Pkg
- return Ws
-
- ## handleWsMacro()
- #
- # handle the $(WORKSPACE) tag, if current workspace is invalid path relative the tool, replace it.
- #
- # @param cls The class pointer
- # @retval PathStr Path string include the $(WORKSPACE)
- #
- @classmethod
- def handleWsMacro(cls, PathStr):
- if TAB_WORKSPACE in PathStr:
- PathList = PathStr.split()
- if PathList:
- for i, str in enumerate(PathList):
- MacroStartPos = str.find(TAB_WORKSPACE)
- if MacroStartPos != -1:
- Substr = str[MacroStartPos:]
- Path = Substr.replace(TAB_WORKSPACE, cls.WORKSPACE).strip()
- if not os.path.exists(Path):
- for Pkg in cls.PACKAGES_PATH:
- Path = Substr.replace(TAB_WORKSPACE, Pkg).strip()
- if os.path.exists(Path):
- break
- PathList[i] = str[0:MacroStartPos] + Path
- PathStr = ' '.join(PathList)
- return PathStr
-
- ## getPkgPath()
- #
- # get all package pathes.
- #
- # @param cls The class pointer
- #
- @classmethod
- def getPkgPath(cls):
- return cls.PACKAGES_PATH
- \ No newline at end of file
diff --git a/BaseTools/Source/Python/Common/Parsing.py b/BaseTools/Source/Python/Common/Parsing.py
deleted file mode 100644
index 584fc7f3c3..0000000000
--- a/BaseTools/Source/Python/Common/Parsing.py
+++ /dev/null
@@ -1,914 +0,0 @@
-## @file
-# This file is used to define common parsing related functions used in parsing INF/DEC/DSC process
-#
-# Copyright (c) 2008 - 2014, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-##
-# Import Modules
-#
-from String import *
-from CommonDataClass.DataClass import *
-from DataType import *
-
-## ParseDefineMacro
-#
-# Search whole table to find all defined Macro and replaced them with the real values
-#
-def ParseDefineMacro2(Table, RecordSets, GlobalMacro):
- Macros = {}
- #
- # Find all DEFINE macros in section [Header] and its section
- #
- SqlCommand = """select Value1, Value2, BelongsToItem, StartLine, Arch from %s
- where Model = %s
- and Enabled > -1""" % (Table.Table, MODEL_META_DATA_DEFINE)
- RecordSet = Table.Exec(SqlCommand)
- for Record in RecordSet:
- Macros[Record[0]] = Record[1]
-
- #
- # Overrided by Global Macros
- #
- for Key in GlobalMacro.keys():
- Macros[Key] = GlobalMacro[Key]
-
- #
- # Replace the Macros
- #
- for Key in RecordSets.keys():
- if RecordSets[Key] != []:
- for Item in RecordSets[Key]:
- Item[0] = ReplaceMacro(Item[0], Macros)
-
-## ParseDefineMacro
-#
-# Search whole table to find all defined Macro and replaced them with the real values
-#
-def ParseDefineMacro(Table, GlobalMacro):
- Macros = {}
- #
- # Find all DEFINE macros
- #
- SqlCommand = """select Value1, Value2, BelongsToItem, StartLine, Arch from %s
- where Model = %s
- and Enabled > -1""" % (Table.Table, MODEL_META_DATA_DEFINE)
- RecordSet = Table.Exec(SqlCommand)
- for Record in RecordSet:
-#***************************************************************************************************************************************************
-# The follow SqlCommand (expr replace) is not supported in Sqlite 3.3.4 which is used in Python 2.5 *
-# Reserved Only *
-# SqlCommand = """update %s set Value1 = replace(Value1, '%s', '%s') *
-# where ID in (select ID from %s *
-# where Model = %s *
-# and Value1 like '%%%s%%' *
-# and StartLine > %s *
-# and Enabled > -1 *
-# and Arch = '%s')""" % \ *
-# (self.TblDsc.Table, Record[0], Record[1], self.TblDsc.Table, Record[2], Record[1], Record[3], Record[4]) *
-#***************************************************************************************************************************************************
- Macros[Record[0]] = Record[1]
-
- #
- # Overrided by Global Macros
- #
- for Key in GlobalMacro.keys():
- Macros[Key] = GlobalMacro[Key]
-
- #
- # Found all defined macro and replaced
- #
- SqlCommand = """select ID, Value1 from %s
- where Model != %s
- and Value1 like '%%$(%%' and Value1 like '%%)%%'
- and Enabled > -1""" % (Table.Table, MODEL_META_DATA_DEFINE)
- FoundRecords = Table.Exec(SqlCommand)
- for FoundRecord in FoundRecords:
- NewValue = ReplaceMacro(FoundRecord[1], Macros)
- SqlCommand = """update %s set Value1 = '%s'
- where ID = %s""" % (Table.Table, ConvertToSqlString2(NewValue), FoundRecord[0])
- Table.Exec(SqlCommand)
-
-##QueryDefinesItem
-#
-# Search item of section [Defines] by name, return its values
-#
-# @param Table: The Table to be executed
-# @param Name: The Name of item of section [Defines]
-# @param Arch: The Arch of item of section [Defines]
-#
-# @retval RecordSet: A list of all matched records
-#
-def QueryDefinesItem(Table, Name, Arch, BelongsToFile):
- SqlCommand = """select Value2 from %s
- where Model = %s
- and Value1 = '%s'
- and Arch = '%s'
- and BelongsToFile = %s
- and Enabled > -1""" % (Table.Table, MODEL_META_DATA_HEADER, ConvertToSqlString2(Name), ConvertToSqlString2(Arch), BelongsToFile)
- RecordSet = Table.Exec(SqlCommand)
- if len(RecordSet) < 1:
- SqlCommand = """select Value2 from %s
- where Model = %s
- and Value1 = '%s'
- and Arch = '%s'
- and BelongsToFile = %s
- and Enabled > -1""" % (Table.Table, MODEL_META_DATA_HEADER, ConvertToSqlString2(Name), ConvertToSqlString2(TAB_ARCH_COMMON.upper()), BelongsToFile)
- RecordSet = Table.Exec(SqlCommand)
- if len(RecordSet) == 1:
- if Name == TAB_INF_DEFINES_LIBRARY_CLASS:
- return [RecordSet[0][0]]
- else:
- return GetSplitValueList(RecordSet[0][0])
- elif len(RecordSet) < 1:
- return ['']
- elif len(RecordSet) > 1:
- RetVal = []
- for Record in RecordSet:
- if Name == TAB_INF_DEFINES_LIBRARY_CLASS:
- RetVal.append(Record[0])
- else:
- Items = GetSplitValueList(Record[0])
- for Item in Items:
- RetVal.append(Item)
- return RetVal
-
-##QueryDefinesItem
-#
-# Search item of section [Defines] by name, return its values
-#
-# @param Table: The Table to be executed
-# @param Name: The Name of item of section [Defines]
-# @param Arch: The Arch of item of section [Defines]
-#
-# @retval RecordSet: A list of all matched records
-#
-def QueryDefinesItem2(Table, Arch, BelongsToFile):
- SqlCommand = """select Value1, Value2, StartLine from %s
- where Model = %s
- and Arch = '%s'
- and BelongsToFile = %s
- and Enabled > -1""" % (Table.Table, MODEL_META_DATA_HEADER, ConvertToSqlString2(Arch), BelongsToFile)
- RecordSet = Table.Exec(SqlCommand)
- if len(RecordSet) < 1:
- SqlCommand = """select Value1, Value2, StartLine from %s
- where Model = %s
- and Arch = '%s'
- and BelongsToFile = %s
- and Enabled > -1""" % (Table.Table, MODEL_META_DATA_HEADER, ConvertToSqlString2(TAB_ARCH_COMMON), BelongsToFile)
- RecordSet = Table.Exec(SqlCommand)
-
- return RecordSet
-
-##QueryDscItem
-#
-# Search all dsc item for a specific section
-#
-# @param Table: The Table to be executed
-# @param Model: The type of section
-#
-# @retval RecordSet: A list of all matched records
-#
-def QueryDscItem(Table, Model, BelongsToItem, BelongsToFile):
- SqlCommand = """select Value1, Arch, StartLine, ID, Value2 from %s
- where Model = %s
- and BelongsToItem = %s
- and BelongsToFile = %s
- and Enabled > -1""" % (Table.Table, Model, BelongsToItem, BelongsToFile)
- return Table.Exec(SqlCommand)
-
-##QueryDecItem
-#
-# Search all dec item for a specific section
-#
-# @param Table: The Table to be executed
-# @param Model: The type of section
-#
-# @retval RecordSet: A list of all matched records
-#
-def QueryDecItem(Table, Model, BelongsToItem):
- SqlCommand = """select Value1, Arch, StartLine, ID, Value2 from %s
- where Model = %s
- and BelongsToItem = %s
- and Enabled > -1""" % (Table.Table, Model, BelongsToItem)
- return Table.Exec(SqlCommand)
-
-##QueryInfItem
-#
-# Search all dec item for a specific section
-#
-# @param Table: The Table to be executed
-# @param Model: The type of section
-#
-# @retval RecordSet: A list of all matched records
-#
-def QueryInfItem(Table, Model, BelongsToItem):
- SqlCommand = """select Value1, Arch, StartLine, ID, Value2 from %s
- where Model = %s
- and BelongsToItem = %s
- and Enabled > -1""" % (Table.Table, Model, BelongsToItem)
- return Table.Exec(SqlCommand)
-
-## GetBuildOption
-#
-# Parse a string with format "[<Family>:]<ToolFlag>=Flag"
-# Return (Family, ToolFlag, Flag)
-#
-# @param String: String with BuildOption statement
-# @param File: The file which defines build option, used in error report
-#
-# @retval truple() A truple structure as (Family, ToolChain, Flag)
-#
-def GetBuildOption(String, File, LineNo = -1):
- (Family, ToolChain, Flag) = ('', '', '')
- if String.find(TAB_EQUAL_SPLIT) < 0:
- RaiseParserError(String, 'BuildOptions', File, '[<Family>:]<ToolFlag>=Flag', LineNo)
- else:
- List = GetSplitValueList(String, TAB_EQUAL_SPLIT, MaxSplit = 1)
- if List[0].find(':') > -1:
- Family = List[0][ : List[0].find(':')].strip()
- ToolChain = List[0][List[0].find(':') + 1 : ].strip()
- else:
- ToolChain = List[0].strip()
- Flag = List[1].strip()
- return (Family, ToolChain, Flag)
-
-## Get Library Class
-#
-# Get Library of Dsc as <LibraryClassKeyWord>|<LibraryInstance>
-#
-# @param Item: String as <LibraryClassKeyWord>|<LibraryInstance>
-# @param ContainerFile: The file which describes the library class, used for error report
-#
-# @retval (LibraryClassKeyWord, LibraryInstance, [SUP_MODULE_LIST]) Formatted Library Item
-#
-def GetLibraryClass(Item, ContainerFile, WorkspaceDir, LineNo = -1):
- List = GetSplitValueList(Item[0])
- SupMod = SUP_MODULE_LIST_STRING
- if len(List) != 2:
- RaiseParserError(Item[0], 'LibraryClasses', ContainerFile, '<LibraryClassKeyWord>|<LibraryInstance>')
- else:
- CheckFileType(List[1], '.Inf', ContainerFile, 'library class instance', Item[0], LineNo)
- CheckFileExist(WorkspaceDir, List[1], ContainerFile, 'LibraryClasses', Item[0], LineNo)
- if Item[1] != '':
- SupMod = Item[1]
-
- return (List[0], List[1], SupMod)
-
-## Get Library Class
-#
-# Get Library of Dsc as <LibraryClassKeyWord>[|<LibraryInstance>][|<TokenSpaceGuidCName>.<PcdCName>]
-#
-# @param Item: String as <LibraryClassKeyWord>|<LibraryInstance>
-# @param ContainerFile: The file which describes the library class, used for error report
-#
-# @retval (LibraryClassKeyWord, LibraryInstance, [SUP_MODULE_LIST]) Formatted Library Item
-#
-def GetLibraryClassOfInf(Item, ContainerFile, WorkspaceDir, LineNo = -1):
- ItemList = GetSplitValueList((Item[0] + DataType.TAB_VALUE_SPLIT * 2))
- SupMod = SUP_MODULE_LIST_STRING
-
- if len(ItemList) > 5:
- RaiseParserError(Item[0], 'LibraryClasses', ContainerFile, '<LibraryClassKeyWord>[|<LibraryInstance>][|<TokenSpaceGuidCName>.<PcdCName>]')
- else:
- CheckFileType(ItemList[1], '.Inf', ContainerFile, 'LibraryClasses', Item[0], LineNo)
- CheckFileExist(WorkspaceDir, ItemList[1], ContainerFile, 'LibraryClasses', Item[0], LineNo)
- if ItemList[2] != '':
- CheckPcdTokenInfo(ItemList[2], 'LibraryClasses', ContainerFile, LineNo)
- if Item[1] != '':
- SupMod = Item[1]
-
- return (ItemList[0], ItemList[1], ItemList[2], SupMod)
-
-## CheckPcdTokenInfo
-#
-# Check if PcdTokenInfo is following <TokenSpaceGuidCName>.<PcdCName>
-#
-# @param TokenInfoString: String to be checked
-# @param Section: Used for error report
-# @param File: Used for error report
-#
-# @retval True PcdTokenInfo is in correct format
-#
-def CheckPcdTokenInfo(TokenInfoString, Section, File, LineNo = -1):
- Format = '<TokenSpaceGuidCName>.<PcdCName>'
- if TokenInfoString != '' and TokenInfoString != None:
- TokenInfoList = GetSplitValueList(TokenInfoString, TAB_SPLIT)
- if len(TokenInfoList) == 2:
- return True
-
- RaiseParserError(TokenInfoString, Section, File, Format, LineNo)
-
-## Get Pcd
-#
-# Get Pcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<Value>[|<Type>|<MaximumDatumSize>]
-#
-# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|<Value>[|<Type>|<MaximumDatumSize>]
-# @param ContainerFile: The file which describes the pcd, used for error report
-#
-# @retval (TokenInfo[1], TokenInfo[0], List[1], List[2], List[3], Type)
-#
-def GetPcd(Item, Type, ContainerFile, LineNo = -1):
- TokenGuid, TokenName, Value, MaximumDatumSize, Token = '', '', '', '', ''
- List = GetSplitValueList(Item + TAB_VALUE_SPLIT * 2)
-
- if len(List) < 4 or len(List) > 6:
- RaiseParserError(Item, 'Pcds' + Type, ContainerFile, '<PcdTokenSpaceGuidCName>.<TokenCName>|<Value>[|<Type>|<MaximumDatumSize>]', LineNo)
- else:
- Value = List[1]
- MaximumDatumSize = List[2]
- Token = List[3]
-
- if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
- (TokenGuid, TokenName) = GetSplitValueList(List[0], TAB_SPLIT)
-
- return (TokenName, TokenGuid, Value, MaximumDatumSize, Token, Type)
-
-## Get FeatureFlagPcd
-#
-# Get FeatureFlagPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE
-#
-# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE
-# @param ContainerFile: The file which describes the pcd, used for error report
-#
-# @retval (TokenInfo[1], TokenInfo[0], List[1], Type)
-#
-def GetFeatureFlagPcd(Item, Type, ContainerFile, LineNo = -1):
- TokenGuid, TokenName, Value = '', '', ''
- List = GetSplitValueList(Item)
- if len(List) != 2:
- RaiseParserError(Item, 'Pcds' + Type, ContainerFile, '<PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE', LineNo)
- else:
- Value = List[1]
- if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
- (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
-
- return (TokenName, TokenGuid, Value, Type)
-
-## Get DynamicDefaultPcd
-#
-# Get DynamicDefaultPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<Value>[|<DatumTyp>[|<MaxDatumSize>]]
-#
-# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE
-# @param ContainerFile: The file which describes the pcd, used for error report
-#
-# @retval (TokenInfo[1], TokenInfo[0], List[1], List[2], List[3], Type)
-#
-def GetDynamicDefaultPcd(Item, Type, ContainerFile, LineNo = -1):
- TokenGuid, TokenName, Value, DatumTyp, MaxDatumSize = '', '', '', '', ''
- List = GetSplitValueList(Item + TAB_VALUE_SPLIT * 2)
- if len(List) < 4 or len(List) > 8:
- RaiseParserError(Item, 'Pcds' + Type, ContainerFile, '<PcdTokenSpaceGuidCName>.<TokenCName>|<Value>[|<DatumTyp>[|<MaxDatumSize>]]', LineNo)
- else:
- Value = List[1]
- DatumTyp = List[2]
- MaxDatumSize = List[3]
- if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
- (TokenGuid, TokenName) = GetSplitValueList(List[0], TAB_SPLIT)
-
- return (TokenName, TokenGuid, Value, DatumTyp, MaxDatumSize, Type)
-
-## Get DynamicHiiPcd
-#
-# Get DynamicHiiPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<String>|<VariableGuidCName>|<VariableOffset>[|<DefaultValue>[|<MaximumDatumSize>]]
-#
-# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE
-# @param ContainerFile: The file which describes the pcd, used for error report
-#
-# @retval (TokenInfo[1], TokenInfo[0], List[1], List[2], List[3], List[4], List[5], Type)
-#
-def GetDynamicHiiPcd(Item, Type, ContainerFile, LineNo = -1):
- TokenGuid, TokenName, L1, L2, L3, L4, L5 = '', '', '', '', '', '', ''
- List = GetSplitValueList(Item + TAB_VALUE_SPLIT * 2)
- if len(List) < 6 or len(List) > 8:
- RaiseParserError(Item, 'Pcds' + Type, ContainerFile, '<PcdTokenSpaceGuidCName>.<TokenCName>|<String>|<VariableGuidCName>|<VariableOffset>[|<DefaultValue>[|<MaximumDatumSize>]]', LineNo)
- else:
- L1, L2, L3, L4, L5 = List[1], List[2], List[3], List[4], List[5]
- if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
- (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
-
- return (TokenName, TokenGuid, L1, L2, L3, L4, L5, Type)
-
-## Get DynamicVpdPcd
-#
-# Get DynamicVpdPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<VpdOffset>[|<MaximumDatumSize>]
-#
-# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE
-# @param ContainerFile: The file which describes the pcd, used for error report
-#
-# @retval (TokenInfo[1], TokenInfo[0], List[1], List[2], Type)
-#
-def GetDynamicVpdPcd(Item, Type, ContainerFile, LineNo = -1):
- TokenGuid, TokenName, L1, L2 = '', '', '', ''
- List = GetSplitValueList(Item + TAB_VALUE_SPLIT)
- if len(List) < 3 or len(List) > 4:
- RaiseParserError(Item, 'Pcds' + Type, ContainerFile, '<PcdTokenSpaceGuidCName>.<TokenCName>|<VpdOffset>[|<MaximumDatumSize>]', LineNo)
- else:
- L1, L2 = List[1], List[2]
- if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
- (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
-
- return (TokenName, TokenGuid, L1, L2, Type)
-
-## GetComponent
-#
-# Parse block of the components defined in dsc file
-# Set KeyValues as [ ['component name', [lib1, lib2, lib3], [bo1, bo2, bo3], [pcd1, pcd2, pcd3]], ...]
-#
-# @param Lines: The content to be parsed
-# @param KeyValues: To store data after parsing
-#
-# @retval True Get component successfully
-#
-def GetComponent(Lines, KeyValues):
- (findBlock, findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (False, False, False, False, False, False, False, False)
- ListItem = None
- LibraryClassItem = []
- BuildOption = []
- Pcd = []
-
- for Line in Lines:
- Line = Line[0]
-
- #
- # Ignore !include statement
- #
- if Line.upper().find(TAB_INCLUDE.upper() + ' ') > -1 or Line.upper().find(TAB_DEFINE + ' ') > -1:
- continue
-
- if findBlock == False:
- ListItem = Line
- #
- # find '{' at line tail
- #
- if Line.endswith('{'):
- findBlock = True
- ListItem = CleanString(Line.rsplit('{', 1)[0], DataType.TAB_COMMENT_SPLIT)
-
- #
- # Parse a block content
- #
- if findBlock:
- if Line.find('<LibraryClasses>') != -1:
- (findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (True, False, False, False, False, False, False)
- continue
- if Line.find('<BuildOptions>') != -1:
- (findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (False, True, False, False, False, False, False)
- continue
- if Line.find('<PcdsFeatureFlag>') != -1:
- (findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (False, False, True, False, False, False, False)
- continue
- if Line.find('<PcdsPatchableInModule>') != -1:
- (findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (False, False, False, True, False, False, False)
- continue
- if Line.find('<PcdsFixedAtBuild>') != -1:
- (findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (False, False, False, False, True, False, False)
- continue
- if Line.find('<PcdsDynamic>') != -1:
- (findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (False, False, False, False, False, True, False)
- continue
- if Line.find('<PcdsDynamicEx>') != -1:
- (findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (False, False, False, False, False, False, True)
- continue
- if Line.endswith('}'):
- #
- # find '}' at line tail
- #
- KeyValues.append([ListItem, LibraryClassItem, BuildOption, Pcd])
- (findBlock, findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (False, False, False, False, False, False, False, False)
- LibraryClassItem, BuildOption, Pcd = [], [], []
- continue
-
- if findBlock:
- if findLibraryClass:
- LibraryClassItem.append(Line)
- elif findBuildOption:
- BuildOption.append(Line)
- elif findPcdsFeatureFlag:
- Pcd.append((DataType.TAB_PCDS_FEATURE_FLAG_NULL, Line))
- elif findPcdsPatchableInModule:
- Pcd.append((DataType.TAB_PCDS_PATCHABLE_IN_MODULE_NULL, Line))
- elif findPcdsFixedAtBuild:
- Pcd.append((DataType.TAB_PCDS_FIXED_AT_BUILD_NULL, Line))
- elif findPcdsDynamic:
- Pcd.append((DataType.TAB_PCDS_DYNAMIC_DEFAULT_NULL, Line))
- elif findPcdsDynamicEx:
- Pcd.append((DataType.TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL, Line))
- else:
- KeyValues.append([ListItem, [], [], []])
-
- return True
-
-## GetExec
-#
-# Parse a string with format "InfFilename [EXEC = ExecFilename]"
-# Return (InfFilename, ExecFilename)
-#
-# @param String: String with EXEC statement
-#
-# @retval truple() A pair as (InfFilename, ExecFilename)
-#
-def GetExec(String):
- InfFilename = ''
- ExecFilename = ''
- if String.find('EXEC') > -1:
- InfFilename = String[ : String.find('EXEC')].strip()
- ExecFilename = String[String.find('EXEC') + len('EXEC') : ].strip()
- else:
- InfFilename = String.strip()
-
- return (InfFilename, ExecFilename)
-
-## GetComponents
-#
-# Parse block of the components defined in dsc file
-# Set KeyValues as [ ['component name', [lib1, lib2, lib3], [bo1, bo2, bo3], [pcd1, pcd2, pcd3]], ...]
-#
-# @param Lines: The content to be parsed
-# @param Key: Reserved
-# @param KeyValues: To store data after parsing
-# @param CommentCharacter: Comment char, used to ignore comment content
-#
-# @retval True Get component successfully
-#
-def GetComponents(Lines, Key, KeyValues, CommentCharacter):
- if Lines.find(DataType.TAB_SECTION_END) > -1:
- Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
- (findBlock, findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (False, False, False, False, False, False, False, False)
- ListItem = None
- LibraryClassItem = []
- BuildOption = []
- Pcd = []
-
- LineList = Lines.split('\n')
- for Line in LineList:
- Line = CleanString(Line, CommentCharacter)
- if Line == None or Line == '':
- continue
-
- if findBlock == False:
- ListItem = Line
- #
- # find '{' at line tail
- #
- if Line.endswith('{'):
- findBlock = True
- ListItem = CleanString(Line.rsplit('{', 1)[0], CommentCharacter)
-
- #
- # Parse a block content
- #
- if findBlock:
- if Line.find('<LibraryClasses>') != -1:
- (findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (True, False, False, False, False, False, False)
- continue
- if Line.find('<BuildOptions>') != -1:
- (findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (False, True, False, False, False, False, False)
- continue
- if Line.find('<PcdsFeatureFlag>') != -1:
- (findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (False, False, True, False, False, False, False)
- continue
- if Line.find('<PcdsPatchableInModule>') != -1:
- (findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (False, False, False, True, False, False, False)
- continue
- if Line.find('<PcdsFixedAtBuild>') != -1:
- (findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (False, False, False, False, True, False, False)
- continue
- if Line.find('<PcdsDynamic>') != -1:
- (findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (False, False, False, False, False, True, False)
- continue
- if Line.find('<PcdsDynamicEx>') != -1:
- (findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (False, False, False, False, False, False, True)
- continue
- if Line.endswith('}'):
- #
- # find '}' at line tail
- #
- KeyValues.append([ListItem, LibraryClassItem, BuildOption, Pcd])
- (findBlock, findLibraryClass, findBuildOption, findPcdsFeatureFlag, findPcdsPatchableInModule, findPcdsFixedAtBuild, findPcdsDynamic, findPcdsDynamicEx) = (False, False, False, False, False, False, False, False)
- LibraryClassItem, BuildOption, Pcd = [], [], []
- continue
-
- if findBlock:
- if findLibraryClass:
- LibraryClassItem.append(Line)
- elif findBuildOption:
- BuildOption.append(Line)
- elif findPcdsFeatureFlag:
- Pcd.append((DataType.TAB_PCDS_FEATURE_FLAG, Line))
- elif findPcdsPatchableInModule:
- Pcd.append((DataType.TAB_PCDS_PATCHABLE_IN_MODULE, Line))
- elif findPcdsFixedAtBuild:
- Pcd.append((DataType.TAB_PCDS_FIXED_AT_BUILD, Line))
- elif findPcdsDynamic:
- Pcd.append((DataType.TAB_PCDS_DYNAMIC, Line))
- elif findPcdsDynamicEx:
- Pcd.append((DataType.TAB_PCDS_DYNAMIC_EX, Line))
- else:
- KeyValues.append([ListItem, [], [], []])
-
- return True
-
-## Get Source
-#
-# Get Source of Inf as <Filename>[|<Family>[|<TagName>[|<ToolCode>[|<PcdFeatureFlag>]]]]
-#
-# @param Item: String as <Filename>[|<Family>[|<TagName>[|<ToolCode>[|<PcdFeatureFlag>]]]]
-# @param ContainerFile: The file which describes the library class, used for error report
-#
-# @retval (List[0], List[1], List[2], List[3], List[4])
-#
-def GetSource(Item, ContainerFile, FileRelativePath, LineNo = -1):
- ItemNew = Item + DataType.TAB_VALUE_SPLIT * 4
- List = GetSplitValueList(ItemNew)
- if len(List) < 5 or len(List) > 9:
- RaiseParserError(Item, 'Sources', ContainerFile, '<Filename>[|<Family>[|<TagName>[|<ToolCode>[|<PcdFeatureFlag>]]]]', LineNo)
- List[0] = NormPath(List[0])
- CheckFileExist(FileRelativePath, List[0], ContainerFile, 'Sources', Item, LineNo)
- if List[4] != '':
- CheckPcdTokenInfo(List[4], 'Sources', ContainerFile, LineNo)
-
- return (List[0], List[1], List[2], List[3], List[4])
-
-## Get Binary
-#
-# Get Binary of Inf as <Filename>[|<Family>[|<TagName>[|<ToolCode>[|<PcdFeatureFlag>]]]]
-#
-# @param Item: String as <Filename>[|<Family>[|<TagName>[|<ToolCode>[|<PcdFeatureFlag>]]]]
-# @param ContainerFile: The file which describes the library class, used for error report
-#
-# @retval (List[0], List[1], List[2], List[3])
-# @retval List
-#
-def GetBinary(Item, ContainerFile, FileRelativePath, LineNo = -1):
- ItemNew = Item + DataType.TAB_VALUE_SPLIT
- List = GetSplitValueList(ItemNew)
- if len(List) != 4 and len(List) != 5:
- RaiseParserError(Item, 'Binaries', ContainerFile, "<FileType>|<Filename>|<Target>[|<TokenSpaceGuidCName>.<PcdCName>]", LineNo)
- else:
- if List[3] != '':
- CheckPcdTokenInfo(List[3], 'Binaries', ContainerFile, LineNo)
-
- if len(List) == 4:
- return (List[0], List[1], List[2], List[3])
- elif len(List) == 3:
- return (List[0], List[1], List[2], '')
- elif len(List) == 2:
- return (List[0], List[1], '', '')
- elif len(List) == 1:
- return (List[0], '', '', '')
-
-## Get Guids/Protocols/Ppis
-#
-# Get Guids/Protocols/Ppis of Inf as <GuidCName>[|<PcdFeatureFlag>]
-#
-# @param Item: String as <GuidCName>[|<PcdFeatureFlag>]
-# @param Type: Type of parsing string
-# @param ContainerFile: The file which describes the library class, used for error report
-#
-# @retval (List[0], List[1])
-#
-def GetGuidsProtocolsPpisOfInf(Item, Type, ContainerFile, LineNo = -1):
- ItemNew = Item + TAB_VALUE_SPLIT
- List = GetSplitValueList(ItemNew)
- if List[1] != '':
- CheckPcdTokenInfo(List[1], Type, ContainerFile, LineNo)
-
- return (List[0], List[1])
-
-## Get Guids/Protocols/Ppis
-#
-# Get Guids/Protocols/Ppis of Dec as <GuidCName>=<GuidValue>
-#
-# @param Item: String as <GuidCName>=<GuidValue>
-# @param Type: Type of parsing string
-# @param ContainerFile: The file which describes the library class, used for error report
-#
-# @retval (List[0], List[1])
-#
-def GetGuidsProtocolsPpisOfDec(Item, Type, ContainerFile, LineNo = -1):
- List = GetSplitValueList(Item, DataType.TAB_EQUAL_SPLIT)
- if len(List) != 2:
- RaiseParserError(Item, Type, ContainerFile, '<CName>=<GuidValue>', LineNo)
-
- return (List[0], List[1])
-
-## GetPackage
-#
-# Get Package of Inf as <PackagePath>[|<PcdFeatureFlag>]
-#
-# @param Item: String as <PackagePath>[|<PcdFeatureFlag>]
-# @param Type: Type of parsing string
-# @param ContainerFile: The file which describes the library class, used for error report
-#
-# @retval (List[0], List[1])
-#
-def GetPackage(Item, ContainerFile, FileRelativePath, LineNo = -1):
- ItemNew = Item + TAB_VALUE_SPLIT
- List = GetSplitValueList(ItemNew)
- CheckFileType(List[0], '.Dec', ContainerFile, 'package', List[0], LineNo)
- CheckFileExist(FileRelativePath, List[0], ContainerFile, 'Packages', List[0], LineNo)
-
- if List[1] != '':
- CheckPcdTokenInfo(List[1], 'Packages', ContainerFile, LineNo)
-
- return (List[0], List[1])
-
-## Get Pcd Values of Inf
-#
-# Get Pcd of Inf as <TokenSpaceGuidCName>.<PcdCName>[|<Value>]
-#
-# @param Item: The string describes pcd
-# @param Type: The type of Pcd
-# @param File: The file which describes the pcd, used for error report
-#
-# @retval (TokenSpcCName, TokenCName, Value, ItemType) Formatted Pcd Item
-#
-def GetPcdOfInf(Item, Type, File, LineNo):
- Format = '<TokenSpaceGuidCName>.<PcdCName>[|<Value>]'
- TokenGuid, TokenName, Value, InfType = '', '', '', ''
-
- if Type == TAB_PCDS_FIXED_AT_BUILD:
- InfType = TAB_INF_FIXED_PCD
- elif Type == TAB_PCDS_PATCHABLE_IN_MODULE:
- InfType = TAB_INF_PATCH_PCD
- elif Type == TAB_PCDS_FEATURE_FLAG:
- InfType = TAB_INF_FEATURE_PCD
- elif Type == TAB_PCDS_DYNAMIC_EX:
- InfType = TAB_INF_PCD_EX
- elif Type == TAB_PCDS_DYNAMIC:
- InfType = TAB_INF_PCD
- List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT)
- if len(List) < 2 or len(List) > 3:
- RaiseParserError(Item, InfType, File, Format, LineNo)
- else:
- Value = List[1]
- TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
- if len(TokenInfo) != 2:
- RaiseParserError(Item, InfType, File, Format, LineNo)
- else:
- TokenGuid = TokenInfo[0]
- TokenName = TokenInfo[1]
-
- return (TokenGuid, TokenName, Value, Type)
-
-
-## Get Pcd Values of Dec
-#
-# Get Pcd of Dec as <TokenSpcCName>.<TokenCName>|<Value>|<DatumType>|<Token>
-# @retval (TokenSpcCName, TokenCName, Value, DatumType, Token, ItemType) Formatted Pcd Item
-#
-def GetPcdOfDec(Item, Type, File, LineNo = -1):
- Format = '<TokenSpaceGuidCName>.<PcdCName>|<Value>|<DatumType>|<Token>'
- TokenGuid, TokenName, Value, DatumType, Token = '', '', '', '', ''
- List = GetSplitValueList(Item)
- if len(List) != 4:
- RaiseParserError(Item, 'Pcds' + Type, File, Format, LineNo)
- else:
- Value = List[1]
- DatumType = List[2]
- Token = List[3]
- TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
- if len(TokenInfo) != 2:
- RaiseParserError(Item, 'Pcds' + Type, File, Format, LineNo)
- else:
- TokenGuid = TokenInfo[0]
- TokenName = TokenInfo[1]
-
- return (TokenGuid, TokenName, Value, DatumType, Token, Type)
-
-## Parse DEFINE statement
-#
-# Get DEFINE macros
-#
-# 1. Insert a record into TblDec
-# Value1: Macro Name
-# Value2: Macro Value
-#
-def ParseDefine(LineValue, StartLine, Table, FileID, Filename, SectionName, SectionModel, Arch):
- EdkLogger.debug(EdkLogger.DEBUG_2, "DEFINE statement '%s' found in section %s" % (LineValue, SectionName))
- Define = GetSplitValueList(CleanString(LineValue[LineValue.upper().find(DataType.TAB_DEFINE.upper() + ' ') + len(DataType.TAB_DEFINE + ' ') : ]), TAB_EQUAL_SPLIT, 1)
- Table.Insert(MODEL_META_DATA_DEFINE, Define[0], Define[1], '', '', '', Arch, SectionModel, FileID, StartLine, -1, StartLine, -1, 0)
-
-## InsertSectionItems
-#
-# Insert item data of a section to a dict
-#
-def InsertSectionItems(Model, CurrentSection, SectionItemList, ArchList, ThirdList, RecordSet):
- # Insert each item data of a section
- for Index in range(0, len(ArchList)):
- Arch = ArchList[Index]
- Third = ThirdList[Index]
- if Arch == '':
- Arch = TAB_ARCH_COMMON
-
- Records = RecordSet[Model]
- for SectionItem in SectionItemList:
- BelongsToItem, EndLine, EndColumn = -1, -1, -1
- LineValue, StartLine, EndLine, Comment = SectionItem[0], SectionItem[1], SectionItem[1], SectionItem[2]
-
- EdkLogger.debug(4, "Parsing %s ..." %LineValue)
- # And then parse DEFINE statement
- if LineValue.upper().find(DataType.TAB_DEFINE.upper() + ' ') > -1:
- continue
-
- # At last parse other sections
- ID = -1
- Records.append([LineValue, Arch, StartLine, ID, Third, Comment])
-
- if RecordSet != {}:
- RecordSet[Model] = Records
-
-## Insert records to database
-#
-# Insert item data of a section to database
-# @param Table: The Table to be inserted
-# @param FileID: The ID of belonging file
-# @param Filename: The name of belonging file
-# @param CurrentSection: The name of currect section
-# @param SectionItemList: A list of items of the section
-# @param ArchList: A list of arches
-# @param ThirdList: A list of third parameters, ModuleType for LibraryClass and SkuId for Dynamic Pcds
-# @param IfDefList: A list of all conditional statements
-# @param RecordSet: A dict of all parsed records
-#
-def InsertSectionItemsIntoDatabase(Table, FileID, Filename, Model, CurrentSection, SectionItemList, ArchList, ThirdList, IfDefList, RecordSet):
- #
- # Insert each item data of a section
- #
- for Index in range(0, len(ArchList)):
- Arch = ArchList[Index]
- Third = ThirdList[Index]
- if Arch == '':
- Arch = TAB_ARCH_COMMON
-
- Records = RecordSet[Model]
- for SectionItem in SectionItemList:
- BelongsToItem, EndLine, EndColumn = -1, -1, -1
- LineValue, StartLine, EndLine = SectionItem[0], SectionItem[1], SectionItem[1]
-
- EdkLogger.debug(4, "Parsing %s ..." %LineValue)
- #
- # And then parse DEFINE statement
- #
- if LineValue.upper().find(DataType.TAB_DEFINE.upper() + ' ') > -1:
- ParseDefine(LineValue, StartLine, Table, FileID, Filename, CurrentSection, Model, Arch)
- continue
-
- #
- # At last parse other sections
- #
- ID = Table.Insert(Model, LineValue, Third, Third, '', '', Arch, -1, FileID, StartLine, -1, StartLine, -1, 0)
- Records.append([LineValue, Arch, StartLine, ID, Third])
-
- if RecordSet != {}:
- RecordSet[Model] = Records
-
-## GenMetaDatSectionItem
-def GenMetaDatSectionItem(Key, Value, List):
- if Key not in List:
- List[Key] = [Value]
- else:
- List[Key].append(Value)
-
-## IsValidWord
-#
-# Check whether the word is valid.
-# <Word> ::= (a-zA-Z0-9_)(a-zA-Z0-9_-){0,} Alphanumeric characters with
-# optional
-# dash "-" and/or underscore "_" characters. No whitespace
-# characters are permitted.
-#
-# @param Word: The word string need to be checked.
-#
-def IsValidWord(Word):
- if not Word:
- return False
- #
- # The first char should be alpha, _ or Digit.
- #
- if not Word[0].isalnum() and \
- not Word[0] == '_' and \
- not Word[0].isdigit():
- return False
-
- LastChar = ''
- for Char in Word[1:]:
- if (not Char.isalpha()) and \
- (not Char.isdigit()) and \
- Char != '-' and \
- Char != '_' and \
- Char != '.':
- return False
- if Char == '.' and LastChar == '.':
- return False
- LastChar = Char
-
- return True
diff --git a/BaseTools/Source/Python/Common/PyUtility.pyd b/BaseTools/Source/Python/Common/PyUtility.pyd
deleted file mode 100644
index 856b508e4e..0000000000
--- a/BaseTools/Source/Python/Common/PyUtility.pyd
+++ /dev/null
Binary files differ
diff --git a/BaseTools/Source/Python/Common/RangeExpression.py b/BaseTools/Source/Python/Common/RangeExpression.py
deleted file mode 100644
index b6c929fd88..0000000000
--- a/BaseTools/Source/Python/Common/RangeExpression.py
+++ /dev/null
@@ -1,737 +0,0 @@
-# # @file
-# This file is used to parse and evaluate range expression in Pcd declaration.
-#
-# Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-
-# # Import Modules
-#
-from Common.GlobalData import *
-from CommonDataClass.Exceptions import BadExpression
-from CommonDataClass.Exceptions import WrnExpression
-import uuid
-
-ERR_STRING_EXPR = 'This operator cannot be used in string expression: [%s].'
-ERR_SNYTAX = 'Syntax error, the rest of expression cannot be evaluated: [%s].'
-ERR_MATCH = 'No matching right parenthesis.'
-ERR_STRING_TOKEN = 'Bad string token: [%s].'
-ERR_MACRO_TOKEN = 'Bad macro token: [%s].'
-ERR_EMPTY_TOKEN = 'Empty token is not allowed.'
-ERR_PCD_RESOLVE = 'PCD token cannot be resolved: [%s].'
-ERR_VALID_TOKEN = 'No more valid token found from rest of string: [%s].'
-ERR_EXPR_TYPE = 'Different types found in expression.'
-ERR_OPERATOR_UNSUPPORT = 'Unsupported operator: [%s]'
-ERR_REL_NOT_IN = 'Expect "IN" after "not" operator.'
-WRN_BOOL_EXPR = 'Operand of boolean type cannot be used in arithmetic expression.'
-WRN_EQCMP_STR_OTHERS = '== Comparison between Operand of string type and Boolean/Number Type always return False.'
-WRN_NECMP_STR_OTHERS = '!= Comparison between Operand of string type and Boolean/Number Type always return True.'
-ERR_RELCMP_STR_OTHERS = 'Operator taking Operand of string type and Boolean/Number Type is not allowed: [%s].'
-ERR_STRING_CMP = 'Unicode string and general string cannot be compared: [%s %s %s]'
-ERR_ARRAY_TOKEN = 'Bad C array or C format GUID token: [%s].'
-ERR_ARRAY_ELE = 'This must be HEX value for NList or Array: [%s].'
-ERR_EMPTY_EXPR = 'Empty expression is not allowed.'
-ERR_IN_OPERAND = 'Macro after IN operator can only be: $(FAMILY), $(ARCH), $(TOOL_CHAIN_TAG) and $(TARGET).'
-
-def MaxOfType(DataType):
- if DataType == 'UINT8':
- return int('0xFF', 16)
- if DataType == 'UINT16':
- return int('0xFFFF', 16)
- if DataType == 'UINT32':
- return int('0xFFFFFFFF', 16)
- if DataType == 'UINT64':
- return int('0xFFFFFFFFFFFFFFFF', 16)
-
-class RangeObject(object):
- def __init__(self, start, end, empty = False):
-
- if int(start) < int(end):
- self.start = int(start)
- self.end = int(end)
- else:
- self.start = int(end)
- self.end = int(start)
- self.empty = empty
-
-class RangeContainer(object):
- def __init__(self):
- self.rangelist = []
-
- def push(self, RangeObject):
- self.rangelist.append(RangeObject)
- self.rangelist = sorted(self.rangelist, key = lambda rangeobj : rangeobj.start)
- self.merge()
-
- def pop(self):
- for item in self.rangelist:
- yield item
-
- def __clean__(self):
- newrangelist = []
- for rangeobj in self.rangelist:
- if rangeobj.empty == True:
- continue
- else:
- newrangelist.append(rangeobj)
- self.rangelist = newrangelist
- def merge(self):
- self.__clean__()
- for i in range(0, len(self.rangelist) - 1):
- if self.rangelist[i + 1].start > self.rangelist[i].end:
- continue
- else:
- self.rangelist[i + 1].start = self.rangelist[i].start
- self.rangelist[i + 1].end = self.rangelist[i + 1].end > self.rangelist[i].end and self.rangelist[i + 1].end or self.rangelist[i].end
- self.rangelist[i].empty = True
-
- self.__clean__()
-
- def dump(self):
- print "----------------------"
- rangelist = ""
- for object in self.rangelist:
- rangelist = rangelist + "[%d , %d]" % (object.start, object.end)
- print rangelist
-
-
-class XOROperatorObject(object):
- def __init__(self):
- pass
- def Calculate(self, Operand, DataType, SymbolTable):
- if type(Operand) == type('') and not Operand.isalnum():
- Expr = "XOR ..."
- raise BadExpression(ERR_SNYTAX % Expr)
- rangeId = str(uuid.uuid1())
- rangeContainer = RangeContainer()
- rangeContainer.push(RangeObject(0, int(Operand) - 1))
- rangeContainer.push(RangeObject(int(Operand) + 1, MaxOfType(DataType)))
- SymbolTable[rangeId] = rangeContainer
- return rangeId
-
-class LEOperatorObject(object):
- def __init__(self):
- pass
- def Calculate(self, Operand, DataType, SymbolTable):
- if type(Operand) == type('') and not Operand.isalnum():
- Expr = "LE ..."
- raise BadExpression(ERR_SNYTAX % Expr)
- rangeId1 = str(uuid.uuid1())
- rangeContainer = RangeContainer()
- rangeContainer.push(RangeObject(0, int(Operand)))
- SymbolTable[rangeId1] = rangeContainer
- return rangeId1
-class LTOperatorObject(object):
- def __init__(self):
- pass
- def Calculate(self, Operand, DataType, SymbolTable):
- if type(Operand) == type('') and not Operand.isalnum():
- Expr = "LT ..."
- raise BadExpression(ERR_SNYTAX % Expr)
- rangeId1 = str(uuid.uuid1())
- rangeContainer = RangeContainer()
- rangeContainer.push(RangeObject(0, int(Operand) - 1))
- SymbolTable[rangeId1] = rangeContainer
- return rangeId1
-
-class GEOperatorObject(object):
- def __init__(self):
- pass
- def Calculate(self, Operand, DataType, SymbolTable):
- if type(Operand) == type('') and not Operand.isalnum():
- Expr = "GE ..."
- raise BadExpression(ERR_SNYTAX % Expr)
- rangeId1 = str(uuid.uuid1())
- rangeContainer = RangeContainer()
- rangeContainer.push(RangeObject(int(Operand), MaxOfType(DataType)))
- SymbolTable[rangeId1] = rangeContainer
- return rangeId1
-
-class GTOperatorObject(object):
- def __init__(self):
- pass
- def Calculate(self, Operand, DataType, SymbolTable):
- if type(Operand) == type('') and not Operand.isalnum():
- Expr = "GT ..."
- raise BadExpression(ERR_SNYTAX % Expr)
- rangeId1 = str(uuid.uuid1())
- rangeContainer = RangeContainer()
- rangeContainer.push(RangeObject(int(Operand) + 1, MaxOfType(DataType)))
- SymbolTable[rangeId1] = rangeContainer
- return rangeId1
-
-class EQOperatorObject(object):
- def __init__(self):
- pass
- def Calculate(self, Operand, DataType, SymbolTable):
- if type(Operand) == type('') and not Operand.isalnum():
- Expr = "EQ ..."
- raise BadExpression(ERR_SNYTAX % Expr)
- rangeId1 = str(uuid.uuid1())
- rangeContainer = RangeContainer()
- rangeContainer.push(RangeObject(int(Operand) , int(Operand)))
- SymbolTable[rangeId1] = rangeContainer
- return rangeId1
-
-def GetOperatorObject(Operator):
- if Operator == '>':
- return GTOperatorObject()
- elif Operator == '>=':
- return GEOperatorObject()
- elif Operator == '<':
- return LTOperatorObject()
- elif Operator == '<=':
- return LEOperatorObject()
- elif Operator == '==':
- return EQOperatorObject()
- elif Operator == '^':
- return XOROperatorObject()
- else:
- raise BadExpression("Bad Operator")
-
-class RangeExpression(object):
- # Logical operator mapping
- LogicalOperators = {
- '&&' : 'and', '||' : 'or',
- '!' : 'not', 'AND': 'and',
- 'OR' : 'or' , 'NOT': 'not',
- 'XOR': '^' , 'xor': '^',
- 'EQ' : '==' , 'NE' : '!=',
- 'GT' : '>' , 'LT' : '<',
- 'GE' : '>=' , 'LE' : '<=',
- 'IN' : 'in'
- }
-
- NonLetterOpLst = ['+', '-', '&', '|', '^', '!', '=', '>', '<']
-
- PcdPattern = re.compile(r'[_a-zA-Z][0-9A-Za-z_]*\.[_a-zA-Z][0-9A-Za-z_]*$')
- HexPattern = re.compile(r'0[xX][0-9a-fA-F]+')
- RegGuidPattern = re.compile(r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}')
- ExRegGuidPattern = re.compile(r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$')
-
- SymbolPattern = re.compile("("
- "\$\([A-Z][A-Z0-9_]*\)|\$\(\w+\.\w+\)|\w+\.\w+|"
- "&&|\|\||!(?!=)|"
- "(?<=\W)AND(?=\W)|(?<=\W)OR(?=\W)|(?<=\W)NOT(?=\W)|(?<=\W)XOR(?=\W)|"
- "(?<=\W)EQ(?=\W)|(?<=\W)NE(?=\W)|(?<=\W)GT(?=\W)|(?<=\W)LT(?=\W)|(?<=\W)GE(?=\W)|(?<=\W)LE(?=\W)"
- ")")
-
- RangePattern = re.compile(r'[0-9]+ - [0-9]+')
-
- def preProcessRangeExpr(self, expr):
- # convert hex to int
- # convert interval to object index. ex. 1 - 10 to a GUID
- expr = expr.strip()
- NumberDict = {}
- for HexNumber in self.HexPattern.findall(expr):
- Number = str(int(HexNumber, 16))
- NumberDict[HexNumber] = Number
- for HexNum in NumberDict:
- expr = expr.replace(HexNum, NumberDict[HexNum])
-
- rangedict = {}
- for validrange in self.RangePattern.findall(expr):
- start, end = validrange.split(" - ")
- start = start.strip()
- end = end.strip()
- rangeid = str(uuid.uuid1())
- rangeContainer = RangeContainer()
- rangeContainer.push(RangeObject(start, end))
- self.operanddict[str(rangeid)] = rangeContainer
- rangedict[validrange] = str(rangeid)
-
- for validrange in rangedict:
- expr = expr.replace(validrange, rangedict[validrange])
-
- self._Expr = expr
- return expr
-
-
- def EvalRange(self, Operator, Oprand):
-
- operatorobj = GetOperatorObject(Operator)
- return operatorobj.Calculate(Oprand, self.PcdDataType, self.operanddict)
-
- def Rangeintersection(self, Oprand1, Oprand2):
- rangeContainer1 = self.operanddict[Oprand1]
- rangeContainer2 = self.operanddict[Oprand2]
- rangeContainer = RangeContainer()
- for range1 in rangeContainer1.pop():
- for range2 in rangeContainer2.pop():
- start1 = range1.start
- end1 = range1.end
- start2 = range2.start
- end2 = range2.end
- if start1 >= start2:
- start1, start2 = start2, start1
- end1, end2 = end2, end1
- if range1.empty:
- rangeid = str(uuid.uuid1())
- rangeContainer.push(RangeObject(0, 0, True))
- if end1 < start2:
- rangeid = str(uuid.uuid1())
- rangeContainer.push(RangeObject(0, 0, True))
- elif end1 == start2:
- rangeid = str(uuid.uuid1())
- rangeContainer.push(RangeObject(end1, end1))
- elif end1 <= end2 and end1 > start2:
- rangeid = str(uuid.uuid1())
- rangeContainer.push(RangeObject(start2, end1))
- elif end1 >= end2:
- rangeid = str(uuid.uuid1())
- rangeContainer.push(RangeObject(start2, end2))
-
- self.operanddict[rangeid] = rangeContainer
-# rangeContainer.dump()
- return rangeid
-
- def Rangecollections(self, Oprand1, Oprand2):
-
- rangeContainer1 = self.operanddict[Oprand1]
- rangeContainer2 = self.operanddict[Oprand2]
- rangeContainer = RangeContainer()
-
- for rangeobj in rangeContainer2.pop():
- rangeContainer.push(rangeobj)
- for rangeobj in rangeContainer1.pop():
- rangeContainer.push(rangeobj)
-
- rangeid = str(uuid.uuid1())
- self.operanddict[rangeid] = rangeContainer
-
-# rangeContainer.dump()
- return rangeid
-
-
- def NegtiveRange(self, Oprand1):
- rangeContainer1 = self.operanddict[Oprand1]
-
-
- rangeids = []
-
- for rangeobj in rangeContainer1.pop():
- rangeContainer = RangeContainer()
- rangeid = str(uuid.uuid1())
- if rangeobj.empty:
- rangeContainer.push(RangeObject(0, MaxOfType(self.PcdDataType)))
- else:
- if rangeobj.start > 0:
- rangeContainer.push(RangeObject(0, rangeobj.start - 1))
- if rangeobj.end < MaxOfType(self.PcdDataType):
- rangeContainer.push(RangeObject(rangeobj.end + 1, MaxOfType(self.PcdDataType)))
- self.operanddict[rangeid] = rangeContainer
- rangeids.append(rangeid)
-
- if len(rangeids) == 0:
- rangeContainer = RangeContainer()
- rangeContainer.push(RangeObject(0, MaxOfType(self.PcdDataType)))
- rangeid = str(uuid.uuid1())
- self.operanddict[rangeid] = rangeContainer
- return rangeid
-
- if len(rangeids) == 1:
- return rangeids[0]
-
- re = self.Rangeintersection(rangeids[0], rangeids[1])
- for i in range(2, len(rangeids)):
- re = self.Rangeintersection(re, rangeids[i])
-
- rangeid2 = str(uuid.uuid1())
- self.operanddict[rangeid2] = self.operanddict[re]
- return rangeid2
-
- def Eval(self, Operator, Oprand1, Oprand2 = None):
-
- if Operator in ["!", "NOT", "not"]:
- if not self.RegGuidPattern.match(Oprand1.strip()):
- raise BadExpression(ERR_STRING_EXPR % Operator)
- return self.NegtiveRange(Oprand1)
- else:
- if Operator in ["==", ">=", "<=", ">", "<", '^']:
- return self.EvalRange(Operator, Oprand1)
- elif Operator == 'and' :
- if not self.ExRegGuidPattern.match(Oprand1.strip()) or not self.ExRegGuidPattern.match(Oprand2.strip()):
- raise BadExpression(ERR_STRING_EXPR % Operator)
- return self.Rangeintersection(Oprand1, Oprand2)
- elif Operator == 'or':
- if not self.ExRegGuidPattern.match(Oprand1.strip()) or not self.ExRegGuidPattern.match(Oprand2.strip()):
- raise BadExpression(ERR_STRING_EXPR % Operator)
- return self.Rangecollections(Oprand1, Oprand2)
- else:
- raise BadExpression(ERR_STRING_EXPR % Operator)
-
-
- def __init__(self, Expression, PcdDataType, SymbolTable = {}):
- self._NoProcess = False
- if type(Expression) != type(''):
- self._Expr = Expression
- self._NoProcess = True
- return
-
- self._Expr = Expression.strip()
-
- if not self._Expr.strip():
- raise BadExpression(ERR_EMPTY_EXPR)
-
- #
- # The symbol table including PCD and macro mapping
- #
- self._Symb = SymbolTable
- self._Symb.update(self.LogicalOperators)
- self._Idx = 0
- self._Len = len(self._Expr)
- self._Token = ''
- self._WarnExcept = None
-
-
- # Literal token without any conversion
- self._LiteralToken = ''
-
- # store the operand object
- self.operanddict = {}
- # The Pcd max value depends on PcdDataType
- self.PcdDataType = PcdDataType
-
- # Public entry for this class
- # @param RealValue: False: only evaluate if the expression is true or false, used for conditional expression
- # True : return the evaluated str(value), used for PCD value
- #
- # @return: True or False if RealValue is False
- # Evaluated value of string format if RealValue is True
- #
- def __call__(self, RealValue = False, Depth = 0):
- if self._NoProcess:
- return self._Expr
-
- self._Depth = Depth
-
- self._Expr = self._Expr.strip()
-
- self.preProcessRangeExpr(self._Expr)
-
- # check if the expression does not need to evaluate
- if RealValue and Depth == 0:
- self._Token = self._Expr
- if self.ExRegGuidPattern.match(self._Expr):
- return [self.operanddict[self._Expr] ]
-
- self._Idx = 0
- self._Token = ''
-
- Val = self._OrExpr()
- RealVal = Val
-
- RangeIdList = RealVal.split("or")
- RangeList = []
- for rangeid in RangeIdList:
- RangeList.append(self.operanddict[rangeid.strip()])
-
- return RangeList
-
- # Template function to parse binary operators which have same precedence
- # Expr [Operator Expr]*
- def _ExprFuncTemplate(self, EvalFunc, OpLst):
- Val = EvalFunc()
- while self._IsOperator(OpLst):
- Op = self._Token
- try:
- Val = self.Eval(Op, Val, EvalFunc())
- except WrnExpression, Warn:
- self._WarnExcept = Warn
- Val = Warn.result
- return Val
-
- # A [|| B]*
- def _OrExpr(self):
- return self._ExprFuncTemplate(self._AndExpr, ["OR", "or"])
-
- # A [&& B]*
- def _AndExpr(self):
- return self._ExprFuncTemplate(self._NeExpr, ["AND", "and"])
-
- def _NeExpr(self):
- Val = self._RelExpr()
- while self._IsOperator([ "!=", "NOT", "not"]):
- Op = self._Token
- if Op in ["!", "NOT", "not"]:
- if not self._IsOperator(["IN", "in"]):
- raise BadExpression(ERR_REL_NOT_IN)
- Op += ' ' + self._Token
- try:
- Val = self.Eval(Op, Val, self._RelExpr())
- except WrnExpression, Warn:
- self._WarnExcept = Warn
- Val = Warn.result
- return Val
-
- # [!]*A
- def _RelExpr(self):
- if self._IsOperator(["NOT" , "LE", "GE", "LT", "GT", "EQ", "XOR"]):
- Token = self._Token
- Val = self._NeExpr()
- try:
- return self.Eval(Token, Val)
- except WrnExpression, Warn:
- self._WarnExcept = Warn
- return Warn.result
- return self._IdenExpr()
-
- # Parse identifier or encapsulated expression
- def _IdenExpr(self):
- Tk = self._GetToken()
- if Tk == '(':
- Val = self._OrExpr()
- try:
- # _GetToken may also raise BadExpression
- if self._GetToken() != ')':
- raise BadExpression(ERR_MATCH)
- except BadExpression:
- raise BadExpression(ERR_MATCH)
- return Val
- return Tk
-
- # Skip whitespace or tab
- def __SkipWS(self):
- for Char in self._Expr[self._Idx:]:
- if Char not in ' \t':
- break
- self._Idx += 1
-
- # Try to convert string to number
- def __IsNumberToken(self):
- Radix = 10
- if self._Token.lower()[0:2] == '0x' and len(self._Token) > 2:
- Radix = 16
- try:
- self._Token = int(self._Token, Radix)
- return True
- except ValueError:
- return False
- except TypeError:
- return False
-
- # Parse array: {...}
- def __GetArray(self):
- Token = '{'
- self._Idx += 1
- self.__GetNList(True)
- Token += self._LiteralToken
- if self._Idx >= self._Len or self._Expr[self._Idx] != '}':
- raise BadExpression(ERR_ARRAY_TOKEN % Token)
- Token += '}'
-
- # All whitespace and tabs in array are already stripped.
- IsArray = IsGuid = False
- if len(Token.split(',')) == 11 and len(Token.split(',{')) == 2 \
- and len(Token.split('},')) == 1:
- HexLen = [11, 6, 6, 5, 4, 4, 4, 4, 4, 4, 6]
- HexList = Token.split(',')
- if HexList[3].startswith('{') and \
- not [Index for Index, Hex in enumerate(HexList) if len(Hex) > HexLen[Index]]:
- IsGuid = True
- if Token.lstrip('{').rstrip('}').find('{') == -1:
- if not [Hex for Hex in Token.lstrip('{').rstrip('}').split(',') if len(Hex) > 4]:
- IsArray = True
- if not IsArray and not IsGuid:
- raise BadExpression(ERR_ARRAY_TOKEN % Token)
- self._Idx += 1
- self._Token = self._LiteralToken = Token
- return self._Token
-
- # Parse string, the format must be: "..."
- def __GetString(self):
- Idx = self._Idx
-
- # Skip left quote
- self._Idx += 1
-
- # Replace escape \\\", \"
- Expr = self._Expr[self._Idx:].replace('\\\\', '//').replace('\\\"', '\\\'')
- for Ch in Expr:
- self._Idx += 1
- if Ch == '"':
- break
- self._Token = self._LiteralToken = self._Expr[Idx:self._Idx]
- if not self._Token.endswith('"'):
- raise BadExpression(ERR_STRING_TOKEN % self._Token)
- self._Token = self._Token[1:-1]
- return self._Token
-
- # Get token that is comprised by alphanumeric, underscore or dot(used by PCD)
- # @param IsAlphaOp: Indicate if parsing general token or script operator(EQ, NE...)
- def __GetIdToken(self, IsAlphaOp = False):
- IdToken = ''
- for Ch in self._Expr[self._Idx:]:
- if not self.__IsIdChar(Ch):
- break
- self._Idx += 1
- IdToken += Ch
-
- self._Token = self._LiteralToken = IdToken
- if not IsAlphaOp:
- self.__ResolveToken()
- return self._Token
-
- # Try to resolve token
- def __ResolveToken(self):
- if not self._Token:
- raise BadExpression(ERR_EMPTY_TOKEN)
-
- # PCD token
- if self.PcdPattern.match(self._Token):
- if self._Token not in self._Symb:
- Ex = BadExpression(ERR_PCD_RESOLVE % self._Token)
- Ex.Pcd = self._Token
- raise Ex
- self._Token = RangeExpression(self._Symb[self._Token], self._Symb)(True, self._Depth + 1)
- if type(self._Token) != type(''):
- self._LiteralToken = hex(self._Token)
- return
-
- if self._Token.startswith('"'):
- self._Token = self._Token[1:-1]
- elif self._Token in ["FALSE", "false", "False"]:
- self._Token = False
- elif self._Token in ["TRUE", "true", "True"]:
- self._Token = True
- else:
- self.__IsNumberToken()
-
- def __GetNList(self, InArray = False):
- self._GetSingleToken()
- if not self.__IsHexLiteral():
- if InArray:
- raise BadExpression(ERR_ARRAY_ELE % self._Token)
- return self._Token
-
- self.__SkipWS()
- Expr = self._Expr[self._Idx:]
- if not Expr.startswith(','):
- return self._Token
-
- NList = self._LiteralToken
- while Expr.startswith(','):
- NList += ','
- self._Idx += 1
- self.__SkipWS()
- self._GetSingleToken()
- if not self.__IsHexLiteral():
- raise BadExpression(ERR_ARRAY_ELE % self._Token)
- NList += self._LiteralToken
- self.__SkipWS()
- Expr = self._Expr[self._Idx:]
- self._Token = self._LiteralToken = NList
- return self._Token
-
- def __IsHexLiteral(self):
- if self._LiteralToken.startswith('{') and \
- self._LiteralToken.endswith('}'):
- return True
-
- if self.HexPattern.match(self._LiteralToken):
- Token = self._LiteralToken[2:]
- Token = Token.lstrip('0')
- if not Token:
- self._LiteralToken = '0x0'
- else:
- self._LiteralToken = '0x' + Token.lower()
- return True
- return False
-
- def _GetToken(self):
- return self.__GetNList()
-
- @staticmethod
- def __IsIdChar(Ch):
- return Ch in '._/:' or Ch.isalnum()
-
- # Parse operand
- def _GetSingleToken(self):
- self.__SkipWS()
- Expr = self._Expr[self._Idx:]
- if Expr.startswith('L"'):
- # Skip L
- self._Idx += 1
- UStr = self.__GetString()
- self._Token = 'L"' + UStr + '"'
- return self._Token
-
- self._Token = ''
- if Expr:
- Ch = Expr[0]
- Match = self.RegGuidPattern.match(Expr)
- if Match and not Expr[Match.end():Match.end() + 1].isalnum() \
- and Expr[Match.end():Match.end() + 1] != '_':
- self._Idx += Match.end()
- self._Token = Expr[0:Match.end()]
- return self._Token
- elif self.__IsIdChar(Ch):
- return self.__GetIdToken()
- elif Ch == '(' or Ch == ')':
- self._Idx += 1
- self._Token = Ch
- return self._Token
-
- raise BadExpression(ERR_VALID_TOKEN % Expr)
-
- # Parse operator
- def _GetOperator(self):
- self.__SkipWS()
- LegalOpLst = ['&&', '||', '!=', '==', '>=', '<='] + self.NonLetterOpLst
-
- self._Token = ''
- Expr = self._Expr[self._Idx:]
-
- # Reach end of expression
- if not Expr:
- return ''
-
- # Script operator: LT, GT, LE, GE, EQ, NE, and, or, xor, not
- if Expr[0].isalpha():
- return self.__GetIdToken(True)
-
- # Start to get regular operator: +, -, <, > ...
- if Expr[0] not in self.NonLetterOpLst:
- return ''
-
- OpToken = ''
- for Ch in Expr:
- if Ch in self.NonLetterOpLst:
- if '!' == Ch and OpToken:
- break
- self._Idx += 1
- OpToken += Ch
- else:
- break
-
- if OpToken not in LegalOpLst:
- raise BadExpression(ERR_OPERATOR_UNSUPPORT % OpToken)
- self._Token = OpToken
- return OpToken
-
- # Check if current token matches the operators given from OpList
- def _IsOperator(self, OpList):
- Idx = self._Idx
- self._GetOperator()
- if self._Token in OpList:
- if self._Token in self.LogicalOperators:
- self._Token = self.LogicalOperators[self._Token]
- return True
- self._Idx = Idx
- return False
-
-
-
-
-
-
-
-
-
-
-# UTRangeList()
diff --git a/BaseTools/Source/Python/Common/String.py b/BaseTools/Source/Python/Common/String.py
deleted file mode 100644
index 5c8d1e0ded..0000000000
--- a/BaseTools/Source/Python/Common/String.py
+++ /dev/null
@@ -1,868 +0,0 @@
-## @file
-# This file is used to define common string related functions used in parsing process
-#
-# Copyright (c) 2007 - 2015, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-##
-# Import Modules
-#
-import re
-import DataType
-import Common.LongFilePathOs as os
-import string
-import EdkLogger as EdkLogger
-
-import GlobalData
-from BuildToolError import *
-from CommonDataClass.Exceptions import *
-from Common.LongFilePathSupport import OpenLongFilePath as open
-from Common.MultipleWorkspace import MultipleWorkspace as mws
-
-gHexVerPatt = re.compile('0x[a-f0-9]{4}[a-f0-9]{4}$', re.IGNORECASE)
-gHumanReadableVerPatt = re.compile(r'([1-9][0-9]*|0)\.[0-9]{1,2}$')
-
-## GetSplitValueList
-#
-# Get a value list from a string with multiple values splited with SplitTag
-# The default SplitTag is DataType.TAB_VALUE_SPLIT
-# 'AAA|BBB|CCC' -> ['AAA', 'BBB', 'CCC']
-#
-# @param String: The input string to be splitted
-# @param SplitTag: The split key, default is DataType.TAB_VALUE_SPLIT
-# @param MaxSplit: The max number of split values, default is -1
-#
-# @retval list() A list for splitted string
-#
-def GetSplitValueList(String, SplitTag=DataType.TAB_VALUE_SPLIT, MaxSplit= -1):
- ValueList = []
- Last = 0
- Escaped = False
- InString = False
- for Index in range(0, len(String)):
- Char = String[Index]
-
- if not Escaped:
- # Found a splitter not in a string, split it
- if not InString and Char == SplitTag:
- ValueList.append(String[Last:Index].strip())
- Last = Index + 1
- if MaxSplit > 0 and len(ValueList) >= MaxSplit:
- break
-
- if Char == '\\' and InString:
- Escaped = True
- elif Char == '"':
- if not InString:
- InString = True
- else:
- InString = False
- else:
- Escaped = False
-
- if Last < len(String):
- ValueList.append(String[Last:].strip())
- elif Last == len(String):
- ValueList.append('')
-
- return ValueList
-
-## GetSplitList
-#
-# Get a value list from a string with multiple values splited with SplitString
-# The default SplitTag is DataType.TAB_VALUE_SPLIT
-# 'AAA|BBB|CCC' -> ['AAA', 'BBB', 'CCC']
-#
-# @param String: The input string to be splitted
-# @param SplitStr: The split key, default is DataType.TAB_VALUE_SPLIT
-# @param MaxSplit: The max number of split values, default is -1
-#
-# @retval list() A list for splitted string
-#
-def GetSplitList(String, SplitStr=DataType.TAB_VALUE_SPLIT, MaxSplit= -1):
- return map(lambda l: l.strip(), String.split(SplitStr, MaxSplit))
-
-## MergeArches
-#
-# Find a key's all arches in dict, add the new arch to the list
-# If not exist any arch, set the arch directly
-#
-# @param Dict: The input value for Dict
-# @param Key: The input value for Key
-# @param Arch: The Arch to be added or merged
-#
-def MergeArches(Dict, Key, Arch):
- if Key in Dict.keys():
- Dict[Key].append(Arch)
- else:
- Dict[Key] = Arch.split()
-
-## GenDefines
-#
-# Parse a string with format "DEFINE <VarName> = <PATH>"
-# Generate a map Defines[VarName] = PATH
-# Return False if invalid format
-#
-# @param String: String with DEFINE statement
-# @param Arch: Supportted Arch
-# @param Defines: DEFINE statement to be parsed
-#
-# @retval 0 DEFINE statement found, and valid
-# @retval 1 DEFINE statement found, but not valid
-# @retval -1 DEFINE statement not found
-#
-def GenDefines(String, Arch, Defines):
- if String.find(DataType.TAB_DEFINE + ' ') > -1:
- List = String.replace(DataType.TAB_DEFINE + ' ', '').split(DataType.TAB_EQUAL_SPLIT)
- if len(List) == 2:
- Defines[(CleanString(List[0]), Arch)] = CleanString(List[1])
- return 0
- else:
- return -1
-
- return 1
-
-## GenInclude
-#
-# Parse a string with format "!include <Filename>"
-# Return the file path
-# Return False if invalid format or NOT FOUND
-#
-# @param String: String with INCLUDE statement
-# @param IncludeFiles: INCLUDE statement to be parsed
-# @param Arch: Supportted Arch
-#
-# @retval True
-# @retval False
-#
-def GenInclude(String, IncludeFiles, Arch):
- if String.upper().find(DataType.TAB_INCLUDE.upper() + ' ') > -1:
- IncludeFile = CleanString(String[String.upper().find(DataType.TAB_INCLUDE.upper() + ' ') + len(DataType.TAB_INCLUDE + ' ') : ])
- MergeArches(IncludeFiles, IncludeFile, Arch)
- return True
- else:
- return False
-
-## GetLibraryClassesWithModuleType
-#
-# Get Library Class definition when no module type defined
-#
-# @param Lines: The content to be parsed
-# @param Key: Reserved
-# @param KeyValues: To store data after parsing
-# @param CommentCharacter: Comment char, used to ignore comment content
-#
-# @retval True Get library classes successfully
-#
-def GetLibraryClassesWithModuleType(Lines, Key, KeyValues, CommentCharacter):
- newKey = SplitModuleType(Key)
- Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
- LineList = Lines.splitlines()
- for Line in LineList:
- Line = CleanString(Line, CommentCharacter)
- if Line != '' and Line[0] != CommentCharacter:
- KeyValues.append([CleanString(Line, CommentCharacter), newKey[1]])
-
- return True
-
-## GetDynamics
-#
-# Get Dynamic Pcds
-#
-# @param Lines: The content to be parsed
-# @param Key: Reserved
-# @param KeyValues: To store data after parsing
-# @param CommentCharacter: Comment char, used to ignore comment content
-#
-# @retval True Get Dynamic Pcds successfully
-#
-def GetDynamics(Lines, Key, KeyValues, CommentCharacter):
- #
- # Get SkuId Name List
- #
- SkuIdNameList = SplitModuleType(Key)
-
- Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
- LineList = Lines.splitlines()
- for Line in LineList:
- Line = CleanString(Line, CommentCharacter)
- if Line != '' and Line[0] != CommentCharacter:
- KeyValues.append([CleanString(Line, CommentCharacter), SkuIdNameList[1]])
-
- return True
-
-## SplitModuleType
-#
-# Split ModuleType out of section defien to get key
-# [LibraryClass.Arch.ModuleType|ModuleType|ModuleType] -> [ 'LibraryClass.Arch', ['ModuleType', 'ModuleType', 'ModuleType'] ]
-#
-# @param Key: String to be parsed
-#
-# @retval ReturnValue A list for module types
-#
-def SplitModuleType(Key):
- KeyList = Key.split(DataType.TAB_SPLIT)
- #
- # Fill in for arch
- #
- KeyList.append('')
- #
- # Fill in for moduletype
- #
- KeyList.append('')
- ReturnValue = []
- KeyValue = KeyList[0]
- if KeyList[1] != '':
- KeyValue = KeyValue + DataType.TAB_SPLIT + KeyList[1]
- ReturnValue.append(KeyValue)
- ReturnValue.append(GetSplitValueList(KeyList[2]))
-
- return ReturnValue
-
-## Replace macro in strings list
-#
-# This method replace macros used in a given string list. The macros are
-# given in a dictionary.
-#
-# @param StringList StringList to be processed
-# @param MacroDefinitions The macro definitions in the form of dictionary
-# @param SelfReplacement To decide whether replace un-defined macro to ''
-#
-# @retval NewList A new string list whose macros are replaced
-#
-def ReplaceMacros(StringList, MacroDefinitions={}, SelfReplacement=False):
- NewList = []
- for String in StringList:
- if type(String) == type(''):
- NewList.append(ReplaceMacro(String, MacroDefinitions, SelfReplacement))
- else:
- NewList.append(String)
-
- return NewList
-
-## Replace macro in string
-#
-# This method replace macros used in given string. The macros are given in a
-# dictionary.
-#
-# @param String String to be processed
-# @param MacroDefinitions The macro definitions in the form of dictionary
-# @param SelfReplacement To decide whether replace un-defined macro to ''
-#
-# @retval string The string whose macros are replaced
-#
-def ReplaceMacro(String, MacroDefinitions={}, SelfReplacement=False, RaiseError=False):
- LastString = String
- while String and MacroDefinitions:
- MacroUsed = GlobalData.gMacroRefPattern.findall(String)
- # no macro found in String, stop replacing
- if len(MacroUsed) == 0:
- break
-
- for Macro in MacroUsed:
- if Macro not in MacroDefinitions:
- if RaiseError:
- raise SymbolNotFound("%s not defined" % Macro)
- if SelfReplacement:
- String = String.replace("$(%s)" % Macro, '')
- continue
- if "$(%s)" % Macro not in MacroDefinitions[Macro]:
- String = String.replace("$(%s)" % Macro, MacroDefinitions[Macro])
- # in case there's macro not defined
- if String == LastString:
- break
- LastString = String
-
- return String
-
-## NormPath
-#
-# Create a normal path
-# And replace DFEINE in the path
-#
-# @param Path: The input value for Path to be converted
-# @param Defines: A set for DEFINE statement
-#
-# @retval Path Formatted path
-#
-def NormPath(Path, Defines={}):
- IsRelativePath = False
- if Path:
- if Path[0] == '.':
- IsRelativePath = True
- #
- # Replace with Define
- #
- if Defines:
- Path = ReplaceMacro(Path, Defines)
- #
- # To local path format
- #
- Path = os.path.normpath(Path)
- if Path.startswith(GlobalData.gWorkspace) and not os.path.exists(Path):
- Path = Path[len (GlobalData.gWorkspace):]
- if Path[0] == os.path.sep:
- Path = Path[1:]
- Path = mws.join(GlobalData.gWorkspace, Path)
-
- if IsRelativePath and Path[0] != '.':
- Path = os.path.join('.', Path)
-
- return Path
-
-## CleanString
-#
-# Remove comments in a string
-# Remove spaces
-#
-# @param Line: The string to be cleaned
-# @param CommentCharacter: Comment char, used to ignore comment content, default is DataType.TAB_COMMENT_SPLIT
-#
-# @retval Path Formatted path
-#
-def CleanString(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False, BuildOption=False):
- #
- # remove whitespace
- #
- Line = Line.strip();
- #
- # Replace Edk's comment character
- #
- if AllowCppStyleComment:
- Line = Line.replace(DataType.TAB_COMMENT_EDK_SPLIT, CommentCharacter)
- #
- # remove comments, but we should escape comment character in string
- #
- InString = False
- CommentInString = False
- for Index in range(0, len(Line)):
- if Line[Index] == '"':
- InString = not InString
- elif Line[Index] == CommentCharacter and InString :
- CommentInString = True
- elif Line[Index] == CommentCharacter and not InString :
- Line = Line[0: Index]
- break
-
- if CommentInString and BuildOption:
- Line = Line.replace('"', '')
- ChIndex = Line.find('#')
- while ChIndex >= 0:
- if GlobalData.gIsWindows:
- if ChIndex == 0 or Line[ChIndex - 1] != '^':
- Line = Line[0:ChIndex] + '^' + Line[ChIndex:]
- ChIndex = Line.find('#', ChIndex + 2)
- else:
- ChIndex = Line.find('#', ChIndex + 1)
- else:
- if ChIndex == 0 or Line[ChIndex - 1] != '\\':
- Line = Line[0:ChIndex] + '\\' + Line[ChIndex:]
- ChIndex = Line.find('#', ChIndex + 2)
- else:
- ChIndex = Line.find('#', ChIndex + 1)
- #
- # remove whitespace again
- #
- Line = Line.strip();
-
- return Line
-
-## CleanString2
-#
-# Split statement with comments in a string
-# Remove spaces
-#
-# @param Line: The string to be cleaned
-# @param CommentCharacter: Comment char, used to ignore comment content, default is DataType.TAB_COMMENT_SPLIT
-#
-# @retval Path Formatted path
-#
-def CleanString2(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
- #
- # remove whitespace
- #
- Line = Line.strip();
- #
- # Replace Edk's comment character
- #
- if AllowCppStyleComment:
- Line = Line.replace(DataType.TAB_COMMENT_EDK_SPLIT, CommentCharacter)
- #
- # separate comments and statements, but we should escape comment character in string
- #
- InString = False
- CommentInString = False
- Comment = ''
- for Index in range(0, len(Line)):
- if Line[Index] == '"':
- InString = not InString
- elif Line[Index] == CommentCharacter and InString:
- CommentInString = True
- elif Line[Index] == CommentCharacter and not InString:
- Comment = Line[Index:].strip()
- Line = Line[0:Index].strip()
- break
-
- return Line, Comment
-
-## GetMultipleValuesOfKeyFromLines
-#
-# Parse multiple strings to clean comment and spaces
-# The result is saved to KeyValues
-#
-# @param Lines: The content to be parsed
-# @param Key: Reserved
-# @param KeyValues: To store data after parsing
-# @param CommentCharacter: Comment char, used to ignore comment content
-#
-# @retval True Successfully executed
-#
-def GetMultipleValuesOfKeyFromLines(Lines, Key, KeyValues, CommentCharacter):
- Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
- LineList = Lines.split('\n')
- for Line in LineList:
- Line = CleanString(Line, CommentCharacter)
- if Line != '' and Line[0] != CommentCharacter:
- KeyValues += [Line]
-
- return True
-
-## GetDefineValue
-#
-# Parse a DEFINE statement to get defined value
-# DEFINE Key Value
-#
-# @param String: The content to be parsed
-# @param Key: The key of DEFINE statement
-# @param CommentCharacter: Comment char, used to ignore comment content
-#
-# @retval string The defined value
-#
-def GetDefineValue(String, Key, CommentCharacter):
- String = CleanString(String)
- return String[String.find(Key + ' ') + len(Key + ' ') : ]
-
-## GetHexVerValue
-#
-# Get a Hex Version Value
-#
-# @param VerString: The version string to be parsed
-#
-#
-# @retval: If VerString is incorrectly formatted, return "None" which will break the build.
-# If VerString is correctly formatted, return a Hex value of the Version Number (0xmmmmnnnn)
-# where mmmm is the major number and nnnn is the adjusted minor number.
-#
-def GetHexVerValue(VerString):
- VerString = CleanString(VerString)
-
- if gHumanReadableVerPatt.match(VerString):
- ValueList = VerString.split('.')
- Major = ValueList[0]
- Minor = ValueList[1]
- if len(Minor) == 1:
- Minor += '0'
- DeciValue = (int(Major) << 16) + int(Minor);
- return "0x%08x" % DeciValue
- elif gHexVerPatt.match(VerString):
- return VerString
- else:
- return None
-
-
-## GetSingleValueOfKeyFromLines
-#
-# Parse multiple strings as below to get value of each definition line
-# Key1 = Value1
-# Key2 = Value2
-# The result is saved to Dictionary
-#
-# @param Lines: The content to be parsed
-# @param Dictionary: To store data after parsing
-# @param CommentCharacter: Comment char, be used to ignore comment content
-# @param KeySplitCharacter: Key split char, between key name and key value. Key1 = Value1, '=' is the key split char
-# @param ValueSplitFlag: Value split flag, be used to decide if has multiple values
-# @param ValueSplitCharacter: Value split char, be used to split multiple values. Key1 = Value1|Value2, '|' is the value split char
-#
-# @retval True Successfully executed
-#
-def GetSingleValueOfKeyFromLines(Lines, Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter):
- Lines = Lines.split('\n')
- Keys = []
- Value = ''
- DefineValues = ['']
- SpecValues = ['']
-
- for Line in Lines:
- #
- # Handle DEFINE and SPEC
- #
- if Line.find(DataType.TAB_INF_DEFINES_DEFINE + ' ') > -1:
- if '' in DefineValues:
- DefineValues.remove('')
- DefineValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_DEFINE, CommentCharacter))
- continue
- if Line.find(DataType.TAB_INF_DEFINES_SPEC + ' ') > -1:
- if '' in SpecValues:
- SpecValues.remove('')
- SpecValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_SPEC, CommentCharacter))
- continue
-
- #
- # Handle Others
- #
- LineList = Line.split(KeySplitCharacter, 1)
- if len(LineList) >= 2:
- Key = LineList[0].split()
- if len(Key) == 1 and Key[0][0] != CommentCharacter:
- #
- # Remove comments and white spaces
- #
- LineList[1] = CleanString(LineList[1], CommentCharacter)
- if ValueSplitFlag:
- Value = map(string.strip, LineList[1].split(ValueSplitCharacter))
- else:
- Value = CleanString(LineList[1], CommentCharacter).splitlines()
-
- if Key[0] in Dictionary:
- if Key[0] not in Keys:
- Dictionary[Key[0]] = Value
- Keys.append(Key[0])
- else:
- Dictionary[Key[0]].extend(Value)
- else:
- Dictionary[DataType.TAB_INF_DEFINES_MACRO][Key[0]] = Value[0]
-
- if DefineValues == []:
- DefineValues = ['']
- if SpecValues == []:
- SpecValues = ['']
- Dictionary[DataType.TAB_INF_DEFINES_DEFINE] = DefineValues
- Dictionary[DataType.TAB_INF_DEFINES_SPEC] = SpecValues
-
- return True
-
-## The content to be parsed
-#
-# Do pre-check for a file before it is parsed
-# Check $()
-# Check []
-#
-# @param FileName: Used for error report
-# @param FileContent: File content to be parsed
-# @param SupSectionTag: Used for error report
-#
-def PreCheck(FileName, FileContent, SupSectionTag):
- LineNo = 0
- IsFailed = False
- NewFileContent = ''
- for Line in FileContent.splitlines():
- LineNo = LineNo + 1
- #
- # Clean current line
- #
- Line = CleanString(Line)
-
- #
- # Remove commented line
- #
- if Line.find(DataType.TAB_COMMA_SPLIT) == 0:
- Line = ''
- #
- # Check $()
- #
- if Line.find('$') > -1:
- if Line.find('$(') < 0 or Line.find(')') < 0:
- EdkLogger.error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=EdkLogger.IsRaiseError)
-
- #
- # Check []
- #
- if Line.find('[') > -1 or Line.find(']') > -1:
- #
- # Only get one '[' or one ']'
- #
- if not (Line.find('[') > -1 and Line.find(']') > -1):
- EdkLogger.error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=EdkLogger.IsRaiseError)
-
- #
- # Regenerate FileContent
- #
- NewFileContent = NewFileContent + Line + '\r\n'
-
- if IsFailed:
- EdkLogger.error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=EdkLogger.IsRaiseError)
-
- return NewFileContent
-
-## CheckFileType
-#
-# Check if the Filename is including ExtName
-# Return True if it exists
-# Raise a error message if it not exists
-#
-# @param CheckFilename: Name of the file to be checked
-# @param ExtName: Ext name of the file to be checked
-# @param ContainerFilename: The container file which describes the file to be checked, used for error report
-# @param SectionName: Used for error report
-# @param Line: The line in container file which defines the file to be checked
-#
-# @retval True The file type is correct
-#
-def CheckFileType(CheckFilename, ExtName, ContainerFilename, SectionName, Line, LineNo= -1):
- if CheckFilename != '' and CheckFilename != None:
- (Root, Ext) = os.path.splitext(CheckFilename)
- if Ext.upper() != ExtName.upper():
- ContainerFile = open(ContainerFilename, 'r').read()
- if LineNo == -1:
- LineNo = GetLineNo(ContainerFile, Line)
- ErrorMsg = "Invalid %s. '%s' is found, but '%s' file is needed" % (SectionName, CheckFilename, ExtName)
- EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg, Line=LineNo,
- File=ContainerFilename, RaiseError=EdkLogger.IsRaiseError)
-
- return True
-
-## CheckFileExist
-#
-# Check if the file exists
-# Return True if it exists
-# Raise a error message if it not exists
-#
-# @param CheckFilename: Name of the file to be checked
-# @param WorkspaceDir: Current workspace dir
-# @param ContainerFilename: The container file which describes the file to be checked, used for error report
-# @param SectionName: Used for error report
-# @param Line: The line in container file which defines the file to be checked
-#
-# @retval The file full path if the file exists
-#
-def CheckFileExist(WorkspaceDir, CheckFilename, ContainerFilename, SectionName, Line, LineNo= -1):
- CheckFile = ''
- if CheckFilename != '' and CheckFilename != None:
- CheckFile = WorkspaceFile(WorkspaceDir, CheckFilename)
- if not os.path.isfile(CheckFile):
- ContainerFile = open(ContainerFilename, 'r').read()
- if LineNo == -1:
- LineNo = GetLineNo(ContainerFile, Line)
- ErrorMsg = "Can't find file '%s' defined in section '%s'" % (CheckFile, SectionName)
- EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg,
- File=ContainerFilename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
-
- return CheckFile
-
-## GetLineNo
-#
-# Find the index of a line in a file
-#
-# @param FileContent: Search scope
-# @param Line: Search key
-#
-# @retval int Index of the line
-# @retval -1 The line is not found
-#
-def GetLineNo(FileContent, Line, IsIgnoreComment=True):
- LineList = FileContent.splitlines()
- for Index in range(len(LineList)):
- if LineList[Index].find(Line) > -1:
- #
- # Ignore statement in comment
- #
- if IsIgnoreComment:
- if LineList[Index].strip()[0] == DataType.TAB_COMMENT_SPLIT:
- continue
- return Index + 1
-
- return -1
-
-## RaiseParserError
-#
-# Raise a parser error
-#
-# @param Line: String which has error
-# @param Section: Used for error report
-# @param File: File which has the string
-# @param Format: Correct format
-#
-def RaiseParserError(Line, Section, File, Format='', LineNo= -1):
- if LineNo == -1:
- LineNo = GetLineNo(open(os.path.normpath(File), 'r').read(), Line)
- ErrorMsg = "Invalid statement '%s' is found in section '%s'" % (Line, Section)
- if Format != '':
- Format = "Correct format is " + Format
- EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg, File=File, Line=LineNo, ExtraData=Format, RaiseError=EdkLogger.IsRaiseError)
-
-## WorkspaceFile
-#
-# Return a full path with workspace dir
-#
-# @param WorkspaceDir: Workspace dir
-# @param Filename: Relative file name
-#
-# @retval string A full path
-#
-def WorkspaceFile(WorkspaceDir, Filename):
- return mws.join(NormPath(WorkspaceDir), NormPath(Filename))
-
-## Split string
-#
-# Revmove '"' which startswith and endswith string
-#
-# @param String: The string need to be splited
-#
-# @retval String: The string after removed '""'
-#
-def SplitString(String):
- if String.startswith('\"'):
- String = String[1:]
- if String.endswith('\"'):
- String = String[:-1]
-
- return String
-
-## Convert To Sql String
-#
-# 1. Replace "'" with "''" in each item of StringList
-#
-# @param StringList: A list for strings to be converted
-#
-def ConvertToSqlString(StringList):
- return map(lambda s: s.replace("'", "''") , StringList)
-
-## Convert To Sql String
-#
-# 1. Replace "'" with "''" in the String
-#
-# @param String: A String to be converted
-#
-def ConvertToSqlString2(String):
- return String.replace("'", "''")
-
-#
-# Remove comment block
-#
-def RemoveBlockComment(Lines):
- IsFindBlockComment = False
- IsFindBlockCode = False
- ReservedLine = ''
- NewLines = []
-
- for Line in Lines:
- Line = Line.strip()
- #
- # Remove comment block
- #
- if Line.find(DataType.TAB_COMMENT_EDK_START) > -1:
- ReservedLine = GetSplitList(Line, DataType.TAB_COMMENT_EDK_START, 1)[0]
- IsFindBlockComment = True
- if Line.find(DataType.TAB_COMMENT_EDK_END) > -1:
- Line = ReservedLine + GetSplitList(Line, DataType.TAB_COMMENT_EDK_END, 1)[1]
- ReservedLine = ''
- IsFindBlockComment = False
- if IsFindBlockComment:
- NewLines.append('')
- continue
-
- NewLines.append(Line)
- return NewLines
-
-#
-# Get String of a List
-#
-def GetStringOfList(List, Split=' '):
- if type(List) != type([]):
- return List
- Str = ''
- for Item in List:
- Str = Str + Item + Split
-
- return Str.strip()
-
-#
-# Get HelpTextList from HelpTextClassList
-#
-def GetHelpTextList(HelpTextClassList):
- List = []
- if HelpTextClassList:
- for HelpText in HelpTextClassList:
- if HelpText.String.endswith('\n'):
- HelpText.String = HelpText.String[0: len(HelpText.String) - len('\n')]
- List.extend(HelpText.String.split('\n'))
-
- return List
-
-def StringToArray(String):
- if isinstance(String, unicode):
- if len(unicode) == 0:
- return "{0x00, 0x00}"
- return "{%s, 0x00, 0x00}" % ", ".join(["0x%02x, 0x00" % ord(C) for C in String])
- elif String.startswith('L"'):
- if String == "L\"\"":
- return "{0x00, 0x00}"
- else:
- return "{%s, 0x00, 0x00}" % ", ".join(["0x%02x, 0x00" % ord(C) for C in String[2:-1]])
- elif String.startswith('"'):
- if String == "\"\"":
- return "{0x00,0x00}"
- else:
- StringLen = len(String[1:-1])
- if StringLen % 2:
- return "{%s, 0x00}" % ", ".join(["0x%02x" % ord(C) for C in String[1:-1]])
- else:
- return "{%s, 0x00,0x00}" % ", ".join(["0x%02x" % ord(C) for C in String[1:-1]])
- elif String.startswith('{'):
- StringLen = len(String.split(","))
- if StringLen % 2:
- return "{%s, 0x00}" % ", ".join([ C for C in String[1:-1].split(',')])
- else:
- return "{%s}" % ", ".join([ C for C in String[1:-1].split(',')])
-
- else:
- if len(String.split()) % 2:
- return '{%s, 0}' % ', '.join(String.split())
- else:
- return '{%s, 0,0}' % ', '.join(String.split())
-
-def StringArrayLength(String):
- if isinstance(String, unicode):
- return (len(String) + 1) * 2 + 1;
- elif String.startswith('L"'):
- return (len(String) - 3 + 1) * 2
- elif String.startswith('"'):
- return (len(String) - 2 + 1)
- else:
- return len(String.split()) + 1
-
-def RemoveDupOption(OptionString, Which="/I", Against=None):
- OptionList = OptionString.split()
- ValueList = []
- if Against:
- ValueList += Against
- for Index in range(len(OptionList)):
- Opt = OptionList[Index]
- if not Opt.startswith(Which):
- continue
- if len(Opt) > len(Which):
- Val = Opt[len(Which):]
- else:
- Val = ""
- if Val in ValueList:
- OptionList[Index] = ""
- else:
- ValueList.append(Val)
- return " ".join(OptionList)
-
-##
-#
-# This acts like the main() function for the script, unless it is 'import'ed into another
-# script.
-#
-if __name__ == '__main__':
- pass
-
diff --git a/BaseTools/Source/Python/Common/TargetTxtClassObject.py b/BaseTools/Source/Python/Common/TargetTxtClassObject.py
deleted file mode 100644
index 387e515230..0000000000
--- a/BaseTools/Source/Python/Common/TargetTxtClassObject.py
+++ /dev/null
@@ -1,190 +0,0 @@
-## @file
-# This file is used to define each component of Target.txt file
-#
-# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-##
-# Import Modules
-#
-import Common.LongFilePathOs as os
-import EdkLogger
-import DataType
-from BuildToolError import *
-import GlobalData
-from Common.LongFilePathSupport import OpenLongFilePath as open
-
-gDefaultTargetTxtFile = "target.txt"
-
-## TargetTxtClassObject
-#
-# This class defined content used in file target.txt
-#
-# @param object: Inherited from object class
-# @param Filename: Input value for full path of target.txt
-#
-# @var TargetTxtDictionary: To store keys and values defined in target.txt
-#
-class TargetTxtClassObject(object):
- def __init__(self, Filename = None):
- self.TargetTxtDictionary = {
- DataType.TAB_TAT_DEFINES_ACTIVE_PLATFORM : '',
- DataType.TAB_TAT_DEFINES_ACTIVE_MODULE : '',
- DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF : '',
- DataType.TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER : '',
- DataType.TAB_TAT_DEFINES_TARGET : [],
- DataType.TAB_TAT_DEFINES_TOOL_CHAIN_TAG : [],
- DataType.TAB_TAT_DEFINES_TARGET_ARCH : [],
- DataType.TAB_TAT_DEFINES_BUILD_RULE_CONF : '',
- }
- self.ConfDirectoryPath = ""
- if Filename != None:
- self.LoadTargetTxtFile(Filename)
-
- ## LoadTargetTxtFile
- #
- # Load target.txt file and parse it, return a set structure to store keys and values
- #
- # @param Filename: Input value for full path of target.txt
- #
- # @retval set() A set structure to store keys and values
- # @retval 1 Error happenes in parsing
- #
- def LoadTargetTxtFile(self, Filename):
- if os.path.exists(Filename) and os.path.isfile(Filename):
- return self.ConvertTextFileToDict(Filename, '#', '=')
- else:
- EdkLogger.error("Target.txt Parser", FILE_NOT_FOUND, ExtraData=Filename)
- return 1
-
- ## ConvertTextFileToDict
- #
- # Convert a text file to a dictionary of (name:value) pairs.
- # The data is saved to self.TargetTxtDictionary
- #
- # @param FileName: Text filename
- # @param CommentCharacter: Comment char, be used to ignore comment content
- # @param KeySplitCharacter: Key split char, between key name and key value. Key1 = Value1, '=' is the key split char
- #
- # @retval 0 Convert successfully
- # @retval 1 Open file failed
- #
- def ConvertTextFileToDict(self, FileName, CommentCharacter, KeySplitCharacter):
- F = None
- try:
- F = open(FileName, 'r')
- self.ConfDirectoryPath = os.path.dirname(FileName)
- except:
- EdkLogger.error("build", FILE_OPEN_FAILURE, ExtraData=FileName)
- if F != None:
- F.close()
-
- for Line in F:
- Line = Line.strip()
- if Line.startswith(CommentCharacter) or Line == '':
- continue
-
- LineList = Line.split(KeySplitCharacter, 1)
- Key = LineList[0].strip()
- if len(LineList) == 2:
- Value = LineList[1].strip()
- else:
- Value = ""
-
- if Key in [DataType.TAB_TAT_DEFINES_ACTIVE_PLATFORM, DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF, \
- DataType.TAB_TAT_DEFINES_ACTIVE_MODULE, DataType.TAB_TAT_DEFINES_BUILD_RULE_CONF]:
- self.TargetTxtDictionary[Key] = Value.replace('\\', '/')
- if Key == DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF and self.TargetTxtDictionary[Key]:
- if self.TargetTxtDictionary[Key].startswith("Conf/"):
- Tools_Def = os.path.join(self.ConfDirectoryPath, self.TargetTxtDictionary[Key].strip())
- if not os.path.exists(Tools_Def) or not os.path.isfile(Tools_Def):
- # If Conf/Conf does not exist, try just the Conf/ directory
- Tools_Def = os.path.join(self.ConfDirectoryPath, self.TargetTxtDictionary[Key].replace("Conf/", "", 1).strip())
- else:
- # The File pointed to by TOOL_CHAIN_CONF is not in a Conf/ directory
- Tools_Def = os.path.join(self.ConfDirectoryPath, self.TargetTxtDictionary[Key].strip())
- self.TargetTxtDictionary[Key] = Tools_Def
- if Key == DataType.TAB_TAT_DEFINES_BUILD_RULE_CONF and self.TargetTxtDictionary[Key]:
- if self.TargetTxtDictionary[Key].startswith("Conf/"):
- Build_Rule = os.path.join(self.ConfDirectoryPath, self.TargetTxtDictionary[Key].strip())
- if not os.path.exists(Build_Rule) or not os.path.isfile(Build_Rule):
- # If Conf/Conf does not exist, try just the Conf/ directory
- Build_Rule = os.path.join(self.ConfDirectoryPath, self.TargetTxtDictionary[Key].replace("Conf/", "", 1).strip())
- else:
- # The File pointed to by BUILD_RULE_CONF is not in a Conf/ directory
- Build_Rule = os.path.join(self.ConfDirectoryPath, self.TargetTxtDictionary[Key].strip())
- self.TargetTxtDictionary[Key] = Build_Rule
- elif Key in [DataType.TAB_TAT_DEFINES_TARGET, DataType.TAB_TAT_DEFINES_TARGET_ARCH, \
- DataType.TAB_TAT_DEFINES_TOOL_CHAIN_TAG]:
- self.TargetTxtDictionary[Key] = Value.split()
- elif Key == DataType.TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER:
- try:
- V = int(Value, 0)
- except:
- EdkLogger.error("build", FORMAT_INVALID, "Invalid number of [%s]: %s." % (Key, Value),
- File=FileName)
- self.TargetTxtDictionary[Key] = Value
- #elif Key not in GlobalData.gGlobalDefines:
- # GlobalData.gGlobalDefines[Key] = Value
-
- F.close()
- return 0
-
- ## Print the dictionary
- #
- # Print all items of dictionary one by one
- #
- # @param Dict: The dictionary to be printed
- #
- def printDict(Dict):
- if Dict != None:
- KeyList = Dict.keys()
- for Key in KeyList:
- if Dict[Key] != '':
- print Key + ' = ' + str(Dict[Key])
-
- ## Print the dictionary
- #
- # Print the items of dictionary which matched with input key
- #
- # @param list: The dictionary to be printed
- # @param key: The key of the item to be printed
- #
- def printList(Key, List):
- if type(List) == type([]):
- if len(List) > 0:
- if Key.find(TAB_SPLIT) != -1:
- print "\n" + Key
- for Item in List:
- print Item
-## TargetTxtDict
-#
-# Load target.txt in input Conf dir
-#
-# @param ConfDir: Conf dir
-#
-# @retval Target An instance of TargetTxtClassObject() with loaded target.txt
-#
-def TargetTxtDict(ConfDir):
- Target = TargetTxtClassObject()
- Target.LoadTargetTxtFile(os.path.normpath(os.path.join(ConfDir, gDefaultTargetTxtFile)))
- return Target
-
-##
-#
-# This acts like the main() function for the script, unless it is 'import'ed into another
-# script.
-#
-if __name__ == '__main__':
- pass
- Target = TargetTxtDict(os.getenv("WORKSPACE"))
- print Target.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER]
- print Target.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET]
- print Target.TargetTxtDictionary
diff --git a/BaseTools/Source/Python/Common/ToolDefClassObject.py b/BaseTools/Source/Python/Common/ToolDefClassObject.py
deleted file mode 100644
index c65cb8a36d..0000000000
--- a/BaseTools/Source/Python/Common/ToolDefClassObject.py
+++ /dev/null
@@ -1,286 +0,0 @@
-## @file
-# This file is used to define each component of tools_def.txt file
-#
-# Copyright (c) 2007 - 2016, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-##
-# Import Modules
-#
-import Common.LongFilePathOs as os
-import re
-import EdkLogger
-
-from Dictionary import *
-from BuildToolError import *
-from TargetTxtClassObject import *
-from Common.LongFilePathSupport import OpenLongFilePath as open
-from Common.Misc import PathClass
-from Common.String import NormPath
-import Common.GlobalData as GlobalData
-from Common import GlobalData
-from Common.MultipleWorkspace import MultipleWorkspace as mws
-
-##
-# Static variables used for pattern
-#
-gMacroRefPattern = re.compile('(DEF\([^\(\)]+\))')
-gEnvRefPattern = re.compile('(ENV\([^\(\)]+\))')
-gMacroDefPattern = re.compile("DEFINE\s+([^\s]+)")
-gDefaultToolsDefFile = "tools_def.txt"
-
-## ToolDefClassObject
-#
-# This class defined content used in file tools_def.txt
-#
-# @param object: Inherited from object class
-# @param Filename: Input value for full path of tools_def.txt
-#
-# @var ToolsDefTxtDictionary: To store keys and values defined in target.txt
-# @var MacroDictionary: To store keys and values defined in DEFINE statement
-#
-class ToolDefClassObject(object):
- def __init__(self, FileName=None):
- self.ToolsDefTxtDictionary = {}
- self.MacroDictionary = {}
- for Env in os.environ:
- self.MacroDictionary["ENV(%s)" % Env] = os.environ[Env]
-
- if FileName != None:
- self.LoadToolDefFile(FileName)
-
- ## LoadToolDefFile
- #
- # Load target.txt file and parse it
- #
- # @param Filename: Input value for full path of tools_def.txt
- #
- def LoadToolDefFile(self, FileName):
- # set multiple workspace
- PackagesPath = os.getenv("PACKAGES_PATH")
- mws.setWs(GlobalData.gWorkspace, PackagesPath)
-
- self.ToolsDefTxtDatabase = {
- TAB_TOD_DEFINES_TARGET : [],
- TAB_TOD_DEFINES_TOOL_CHAIN_TAG : [],
- TAB_TOD_DEFINES_TARGET_ARCH : [],
- TAB_TOD_DEFINES_COMMAND_TYPE : []
- }
-
- self.IncludeToolDefFile(FileName)
-
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TARGET] = list(set(self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TARGET]))
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG] = list(set(self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]))
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TARGET_ARCH] = list(set(self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TARGET_ARCH]))
-
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_COMMAND_TYPE] = list(set(self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_COMMAND_TYPE]))
-
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TARGET].sort()
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG].sort()
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TARGET_ARCH].sort()
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_COMMAND_TYPE].sort()
-
- KeyList = [TAB_TOD_DEFINES_TARGET, TAB_TOD_DEFINES_TOOL_CHAIN_TAG, TAB_TOD_DEFINES_TARGET_ARCH, TAB_TOD_DEFINES_COMMAND_TYPE]
- for Index in range(3, -1, -1):
- for Key in dict(self.ToolsDefTxtDictionary):
- List = Key.split('_')
- if List[Index] == '*':
- for String in self.ToolsDefTxtDatabase[KeyList[Index]]:
- List[Index] = String
- NewKey = '%s_%s_%s_%s_%s' % tuple(List)
- if NewKey not in self.ToolsDefTxtDictionary:
- self.ToolsDefTxtDictionary[NewKey] = self.ToolsDefTxtDictionary[Key]
- continue
- del self.ToolsDefTxtDictionary[Key]
- elif List[Index] not in self.ToolsDefTxtDatabase[KeyList[Index]]:
- del self.ToolsDefTxtDictionary[Key]
-
-
- ## IncludeToolDefFile
- #
- # Load target.txt file and parse it as if it's contents were inside the main file
- #
- # @param Filename: Input value for full path of tools_def.txt
- #
- def IncludeToolDefFile(self, FileName):
- FileContent = []
- if os.path.isfile(FileName):
- try:
- F = open(FileName, 'r')
- FileContent = F.readlines()
- except:
- EdkLogger.error("tools_def.txt parser", FILE_OPEN_FAILURE, ExtraData=FileName)
- else:
- EdkLogger.error("tools_def.txt parser", FILE_NOT_FOUND, ExtraData=FileName)
-
- for Index in range(len(FileContent)):
- Line = FileContent[Index].strip()
- if Line == "" or Line[0] == '#':
- continue
-
- if Line.startswith("!include"):
- IncFile = Line[8:].strip()
- Done, IncFile = self.ExpandMacros(IncFile)
- if not Done:
- EdkLogger.error("tools_def.txt parser", ATTRIBUTE_NOT_AVAILABLE,
- "Macro or Environment has not been defined",
- ExtraData=IncFile[4:-1], File=FileName, Line=Index+1)
- IncFile = NormPath(IncFile)
-
- if not os.path.isabs(IncFile):
- #
- # try WORKSPACE
- #
- IncFileTmp = PathClass(IncFile, GlobalData.gWorkspace)
- ErrorCode = IncFileTmp.Validate()[0]
- if ErrorCode != 0:
- #
- # try PACKAGES_PATH
- #
- IncFileTmp = mws.join(GlobalData.gWorkspace, IncFile)
- if not os.path.exists(IncFileTmp):
- #
- # try directory of current file
- #
- IncFileTmp = PathClass(IncFile, os.path.dirname(FileName))
- ErrorCode = IncFileTmp.Validate()[0]
- if ErrorCode != 0:
- EdkLogger.error("tools_def.txt parser", FILE_NOT_FOUND, ExtraData=IncFile)
-
- if type(IncFileTmp) is PathClass:
- IncFile = IncFileTmp.Path
- else:
- IncFile = IncFileTmp
-
- self.IncludeToolDefFile(IncFile)
- continue
-
- NameValuePair = Line.split("=", 1)
- if len(NameValuePair) != 2:
- EdkLogger.warn("tools_def.txt parser", "Line %d: not correct assignment statement, skipped" % (Index + 1))
- continue
-
- Name = NameValuePair[0].strip()
- Value = NameValuePair[1].strip()
-
- if Name == "IDENTIFIER":
- EdkLogger.debug(EdkLogger.DEBUG_8, "Line %d: Found identifier statement, skipped: %s" % ((Index + 1), Value))
- continue
-
- MacroDefinition = gMacroDefPattern.findall(Name)
- if MacroDefinition != []:
- Done, Value = self.ExpandMacros(Value)
- if not Done:
- EdkLogger.error("tools_def.txt parser", ATTRIBUTE_NOT_AVAILABLE,
- "Macro or Environment has not been defined",
- ExtraData=Value[4:-1], File=FileName, Line=Index+1)
-
- MacroName = MacroDefinition[0].strip()
- self.MacroDictionary["DEF(%s)" % MacroName] = Value
- EdkLogger.debug(EdkLogger.DEBUG_8, "Line %d: Found macro: %s = %s" % ((Index + 1), MacroName, Value))
- continue
-
- Done, Value = self.ExpandMacros(Value)
- if not Done:
- EdkLogger.error("tools_def.txt parser", ATTRIBUTE_NOT_AVAILABLE,
- "Macro or Environment has not been defined",
- ExtraData=Value[4:-1], File=FileName, Line=Index+1)
-
- List = Name.split('_')
- if len(List) != 5:
- EdkLogger.verbose("Line %d: Not a valid name of definition: %s" % ((Index + 1), Name))
- continue
- elif List[4] == '*':
- EdkLogger.verbose("Line %d: '*' is not allowed in last field: %s" % ((Index + 1), Name))
- continue
- else:
- self.ToolsDefTxtDictionary[Name] = Value
- if List[0] != '*':
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TARGET] += [List[0]]
- if List[1] != '*':
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG] += [List[1]]
- if List[2] != '*':
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TARGET_ARCH] += [List[2]]
- if List[3] != '*':
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_COMMAND_TYPE] += [List[3]]
- if List[4] == TAB_TOD_DEFINES_FAMILY and List[2] == '*' and List[3] == '*':
- if TAB_TOD_DEFINES_FAMILY not in self.ToolsDefTxtDatabase:
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY] = {}
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY][List[1]] = Value
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_BUILDRULEFAMILY] = {}
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_BUILDRULEFAMILY][List[1]] = Value
- elif List[1] not in self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY]:
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY][List[1]] = Value
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_BUILDRULEFAMILY][List[1]] = Value
- elif self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY][List[1]] != Value:
- EdkLogger.verbose("Line %d: No override allowed for the family of a tool chain: %s" % ((Index + 1), Name))
- if List[4] == TAB_TOD_DEFINES_BUILDRULEFAMILY and List[2] == '*' and List[3] == '*':
- if TAB_TOD_DEFINES_BUILDRULEFAMILY not in self.ToolsDefTxtDatabase \
- or List[1] not in self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY]:
- EdkLogger.verbose("Line %d: The family is not specified, but BuildRuleFamily is specified for the tool chain: %s" % ((Index + 1), Name))
- self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_BUILDRULEFAMILY][List[1]] = Value
-
- ## ExpandMacros
- #
- # Replace defined macros with real value
- #
- # @param Value: The string with unreplaced macros
- #
- # @retval Value: The string which has been replaced with real value
- #
- def ExpandMacros(self, Value):
- # os.environ contains all environment variables uppercase on Windows which cause the key in the self.MacroDictionary is uppercase, but Ref may not
- EnvReference = gEnvRefPattern.findall(Value)
- for Ref in EnvReference:
- if Ref not in self.MacroDictionary and Ref.upper() not in self.MacroDictionary:
- Value = Value.replace(Ref, "")
- else:
- if Ref in self.MacroDictionary:
- Value = Value.replace(Ref, self.MacroDictionary[Ref])
- else:
- Value = Value.replace(Ref, self.MacroDictionary[Ref.upper()])
-
- MacroReference = gMacroRefPattern.findall(Value)
- for Ref in MacroReference:
- if Ref not in self.MacroDictionary:
- return False, Ref
- Value = Value.replace(Ref, self.MacroDictionary[Ref])
-
- return True, Value
-
-## ToolDefDict
-#
-# Load tools_def.txt in input Conf dir
-#
-# @param ConfDir: Conf dir
-#
-# @retval ToolDef An instance of ToolDefClassObject() with loaded tools_def.txt
-#
-def ToolDefDict(ConfDir):
- Target = TargetTxtDict(ConfDir)
- ToolDef = ToolDefClassObject()
- if DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF in Target.TargetTxtDictionary:
- ToolsDefFile = Target.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF]
- if ToolsDefFile:
- ToolDef.LoadToolDefFile(os.path.normpath(ToolsDefFile))
- else:
- ToolDef.LoadToolDefFile(os.path.normpath(os.path.join(ConfDir, gDefaultToolsDefFile)))
- else:
- ToolDef.LoadToolDefFile(os.path.normpath(os.path.join(ConfDir, gDefaultToolsDefFile)))
- return ToolDef
-
-##
-#
-# This acts like the main() function for the script, unless it is 'import'ed into another
-# script.
-#
-if __name__ == '__main__':
- ToolDef = ToolDefDict(os.getenv("WORKSPACE"))
- pass
diff --git a/BaseTools/Source/Python/Common/VariableAttributes.py b/BaseTools/Source/Python/Common/VariableAttributes.py
deleted file mode 100644
index a2e22ca040..0000000000
--- a/BaseTools/Source/Python/Common/VariableAttributes.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# # @file
-#
-# This file is used to handle the variable attributes and property information
-#
-#
-# Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-class VariableAttributes(object):
- EFI_VARIABLE_NON_VOLATILE = 0x00000001
- EFI_VARIABLE_BOOTSERVICE_ACCESS = 0x00000002
- EFI_VARIABLE_RUNTIME_ACCESS = 0x00000004
- VAR_CHECK_VARIABLE_PROPERTY_READ_ONLY = 0x00000001
- VarAttributesMap = {
- "NV":EFI_VARIABLE_NON_VOLATILE,
- "BS":EFI_VARIABLE_BOOTSERVICE_ACCESS,
- "RT":EFI_VARIABLE_RUNTIME_ACCESS,
- "RO":VAR_CHECK_VARIABLE_PROPERTY_READ_ONLY
- }
-
- def __init__(self):
- pass
-
- @staticmethod
- def GetVarAttributes(var_attr_str):
- VarAttr = 0x00000000
- VarProp = 0x00000000
-
- attr_list = var_attr_str.split(",")
- for attr in attr_list:
- attr = attr.strip()
- if attr == 'RO':
- VarProp = VariableAttributes.VAR_CHECK_VARIABLE_PROPERTY_READ_ONLY
- else:
- VarAttr = VarAttr | VariableAttributes.VarAttributesMap.get(attr, 0x00000000)
- return VarAttr, VarProp
- @staticmethod
- def ValidateVarAttributes(var_attr_str):
- if not var_attr_str:
- return True, ""
- attr_list = var_attr_str.split(",")
- attr_temp = []
- for attr in attr_list:
- attr = attr.strip()
- attr_temp.append(attr)
- if attr not in VariableAttributes.VarAttributesMap:
- return False, "The variable attribute %s is not support to be specified in dsc file. Supported variable attribute are ['BS','NV','RT','RO'] "
- if 'RT' in attr_temp and 'BS' not in attr_temp:
- return False, "the RT attribute need the BS attribute to be present"
- return True, ""
diff --git a/BaseTools/Source/Python/Common/VpdInfoFile.py b/BaseTools/Source/Python/Common/VpdInfoFile.py
deleted file mode 100644
index d45fb4cf03..0000000000
--- a/BaseTools/Source/Python/Common/VpdInfoFile.py
+++ /dev/null
@@ -1,258 +0,0 @@
-## @file
-#
-# This package manage the VPD PCD information file which will be generated
-# by build tool's autogen.
-# The VPD PCD information file will be input for third-party BPDG tool which
-# is pointed by *_*_*_VPD_TOOL_GUID in conf/tools_def.txt
-#
-#
-# Copyright (c) 2010 - 2016, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-import Common.LongFilePathOs as os
-import re
-import Common.EdkLogger as EdkLogger
-import Common.BuildToolError as BuildToolError
-import subprocess
-import Common.GlobalData as GlobalData
-from Common.LongFilePathSupport import OpenLongFilePath as open
-from Common.Misc import SaveFileOnChange
-
-FILE_COMMENT_TEMPLATE = \
-"""
-## @file
-#
-# THIS IS AUTO-GENERATED FILE BY BUILD TOOLS AND PLEASE DO NOT MAKE MODIFICATION.
-#
-# This file lists all VPD informations for a platform collected by build.exe.
-#
-# Copyright (c) 2010, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-
-"""
-
-## The class manage VpdInfoFile.
-#
-# This file contains an ordered (based on position in the DSC file) list of the PCDs specified in the platform description file (DSC). The Value field that will be assigned to the PCD comes from the DSC file, INF file (if not defined in the DSC file) or the DEC file (if not defined in the INF file). This file is used as an input to the BPDG tool.
-# Format for this file (using EBNF notation) is:
-# <File> :: = [<CommentBlock>]
-# [<PcdEntry>]*
-# <CommentBlock> ::= ["#" <String> <EOL>]*
-# <PcdEntry> ::= <PcdName> "|" <Offset> "|" <Size> "|" <Value> <EOL>
-# <PcdName> ::= <TokenSpaceCName> "." <PcdCName>
-# <TokenSpaceCName> ::= C Variable Name of the Token Space GUID
-# <PcdCName> ::= C Variable Name of the PCD
-# <Offset> ::= {"*"} {<HexNumber>}
-# <HexNumber> ::= "0x" (a-fA-F0-9){1,8}
-# <Size> ::= <HexNumber>
-# <Value> ::= {<HexNumber>} {<NonNegativeInt>} {<QString>} {<Array>}
-# <NonNegativeInt> ::= (0-9)+
-# <QString> ::= ["L"] <DblQuote> <String> <DblQuote>
-# <DblQuote> ::= 0x22
-# <Array> ::= {<CArray>} {<NList>}
-# <CArray> ::= "{" <HexNumber> ["," <HexNumber>]* "}"
-# <NList> ::= <HexNumber> ["," <HexNumber>]*
-#
-class VpdInfoFile:
-
- ## The mapping dictionary from datum type to size string.
- _MAX_SIZE_TYPE = {"BOOLEAN":"1", "UINT8":"1", "UINT16":"2", "UINT32":"4", "UINT64":"8"}
- _rVpdPcdLine = None
- ## Constructor
- def __init__(self):
- ## Dictionary for VPD in following format
- #
- # Key : PcdClassObject instance.
- # @see BuildClassObject.PcdClassObject
- # Value : offset in different SKU such as [sku1_offset, sku2_offset]
- self._VpdArray = {}
-
- ## Add a VPD PCD collected from platform's autogen when building.
- #
- # @param vpds The list of VPD PCD collected for a platform.
- # @see BuildClassObject.PcdClassObject
- #
- # @param offset integer value for VPD's offset in specific SKU.
- #
- def Add(self, Vpd, Offset):
- if (Vpd == None):
- EdkLogger.error("VpdInfoFile", BuildToolError.ATTRIBUTE_UNKNOWN_ERROR, "Invalid VPD PCD entry.")
-
- if not (Offset >= 0 or Offset == "*"):
- EdkLogger.error("VpdInfoFile", BuildToolError.PARAMETER_INVALID, "Invalid offset parameter: %s." % Offset)
-
- if Vpd.DatumType == "VOID*":
- if Vpd.MaxDatumSize <= 0:
- EdkLogger.error("VpdInfoFile", BuildToolError.PARAMETER_INVALID,
- "Invalid max datum size for VPD PCD %s.%s" % (Vpd.TokenSpaceGuidCName, Vpd.TokenCName))
- elif Vpd.DatumType in ["BOOLEAN", "UINT8", "UINT16", "UINT32", "UINT64"]:
- if Vpd.MaxDatumSize == None or Vpd.MaxDatumSize == "":
- Vpd.MaxDatumSize = VpdInfoFile._MAX_SIZE_TYPE[Vpd.DatumType]
- else:
- EdkLogger.error("VpdInfoFile", BuildToolError.PARAMETER_INVALID,
- "Invalid DatumType %s for VPD PCD %s.%s" % (Vpd.DatumType, Vpd.TokenSpaceGuidCName, Vpd.TokenCName))
-
- if Vpd not in self._VpdArray.keys():
- #
- # If there is no Vpd instance in dict, that imply this offset for a given SKU is a new one
- #
- self._VpdArray[Vpd] = [Offset]
- else:
- #
- # If there is an offset for a specific SKU in dict, then append this offset for other sku to array.
- #
- self._VpdArray[Vpd].append(Offset)
-
-
- ## Generate VPD PCD information into a text file
- #
- # If parameter FilePath is invalid, then assert.
- # If
- # @param FilePath The given file path which would hold VPD information
- def Write(self, FilePath):
- if not (FilePath != None or len(FilePath) != 0):
- EdkLogger.error("VpdInfoFile", BuildToolError.PARAMETER_INVALID,
- "Invalid parameter FilePath: %s." % FilePath)
-
- Content = FILE_COMMENT_TEMPLATE
- Pcds = self._VpdArray.keys()
- Pcds.sort()
- for Pcd in Pcds:
- i = 0
- PcdTokenCName = Pcd.TokenCName
- for PcdItem in GlobalData.MixedPcd:
- if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdItem]:
- PcdTokenCName = PcdItem[0]
- for Offset in self._VpdArray[Pcd]:
- PcdValue = str(Pcd.SkuInfoList[Pcd.SkuInfoList.keys()[i]].DefaultValue).strip()
- if PcdValue == "" :
- PcdValue = Pcd.DefaultValue
-
- Content += "%s.%s|%s|%s|%s|%s \n" % (Pcd.TokenSpaceGuidCName, PcdTokenCName, str(Pcd.SkuInfoList.keys()[i]),str(Offset).strip(), str(Pcd.MaxDatumSize).strip(),PcdValue)
- i += 1
-
- return SaveFileOnChange(FilePath, Content, False)
-
- ## Read an existing VPD PCD info file.
- #
- # This routine will read VPD PCD information from existing file and construct
- # internal PcdClassObject array.
- # This routine could be used by third-party tool to parse VPD info file content.
- #
- # @param FilePath The full path string for existing VPD PCD info file.
- def Read(self, FilePath):
- try:
- fd = open(FilePath, "r")
- except:
- EdkLogger.error("VpdInfoFile",
- BuildToolError.FILE_OPEN_FAILURE,
- "Fail to open file %s for written." % FilePath)
- Lines = fd.readlines()
- for Line in Lines:
- Line = Line.strip()
- if len(Line) == 0 or Line.startswith("#"):
- continue
-
- #
- # the line must follow output format defined in BPDG spec.
- #
- try:
- PcdName, SkuId,Offset, Size, Value = Line.split("#")[0].split("|")
- PcdName, SkuId,Offset, Size, Value = PcdName.strip(), SkuId.strip(),Offset.strip(), Size.strip(), Value.strip()
- TokenSpaceName, PcdTokenName = PcdName.split(".")
- except:
- EdkLogger.error("BPDG", BuildToolError.PARSER_ERROR, "Fail to parse VPD information file %s" % FilePath)
-
- Found = False
-
- for VpdObject in self._VpdArray.keys():
- VpdObjectTokenCName = VpdObject.TokenCName
- for PcdItem in GlobalData.MixedPcd:
- if (VpdObject.TokenCName, VpdObject.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdItem]:
- VpdObjectTokenCName = PcdItem[0]
- for sku in VpdObject.SkuInfoList.keys():
- if VpdObject.TokenSpaceGuidCName == TokenSpaceName and VpdObjectTokenCName == PcdTokenName.strip() and sku == SkuId:
- if self._VpdArray[VpdObject][VpdObject.SkuInfoList.keys().index(sku)] == "*":
- if Offset == "*":
- EdkLogger.error("BPDG", BuildToolError.FORMAT_INVALID, "The offset of %s has not been fixed up by third-party BPDG tool." % PcdName)
- self._VpdArray[VpdObject][VpdObject.SkuInfoList.keys().index(sku)] = Offset
- Found = True
- if not Found:
- EdkLogger.error("BPDG", BuildToolError.PARSER_ERROR, "Can not find PCD defined in VPD guid file.")
-
- ## Get count of VPD PCD collected from platform's autogen when building.
- #
- # @return The integer count value
- def GetCount(self):
- Count = 0
- for OffsetList in self._VpdArray.values():
- Count += len(OffsetList)
-
- return Count
-
- ## Get an offset value for a given VPD PCD
- #
- # Because BPDG only support one Sku, so only return offset for SKU default.
- #
- # @param vpd A given VPD PCD
- def GetOffset(self, vpd):
- if not self._VpdArray.has_key(vpd):
- return None
-
- if len(self._VpdArray[vpd]) == 0:
- return None
-
- return self._VpdArray[vpd]
-
-## Call external BPDG tool to process VPD file
-#
-# @param ToolPath The string path name for BPDG tool
-# @param VpdFileName The string path name for VPD information guid.txt
-#
-def CallExtenalBPDGTool(ToolPath, VpdFileName):
- assert ToolPath != None, "Invalid parameter ToolPath"
- assert VpdFileName != None and os.path.exists(VpdFileName), "Invalid parameter VpdFileName"
-
- OutputDir = os.path.dirname(VpdFileName)
- FileName = os.path.basename(VpdFileName)
- BaseName, ext = os.path.splitext(FileName)
- OutputMapFileName = os.path.join(OutputDir, "%s.map" % BaseName)
- OutputBinFileName = os.path.join(OutputDir, "%s.bin" % BaseName)
-
- try:
- PopenObject = subprocess.Popen(' '.join([ToolPath,
- '-o', OutputBinFileName,
- '-m', OutputMapFileName,
- '-q',
- '-f',
- VpdFileName]),
- stdout=subprocess.PIPE,
- stderr= subprocess.PIPE,
- shell=True)
- except Exception, X:
- EdkLogger.error("BPDG", BuildToolError.COMMAND_FAILURE, ExtraData="%s" % (str(X)))
- (out, error) = PopenObject.communicate()
- print out
- while PopenObject.returncode == None :
- PopenObject.wait()
-
- if PopenObject.returncode != 0:
- if PopenObject.returncode != 0:
- EdkLogger.debug(EdkLogger.DEBUG_1, "Fail to call BPDG tool", str(error))
- EdkLogger.error("BPDG", BuildToolError.COMMAND_FAILURE, "Fail to execute BPDG tool with exit code: %d, the error message is: \n %s" % \
- (PopenObject.returncode, str(error)))
-
- return PopenObject.returncode
diff --git a/BaseTools/Source/Python/Common/__init__.py b/BaseTools/Source/Python/Common/__init__.py
deleted file mode 100644
index d8a96f9bfb..0000000000
--- a/BaseTools/Source/Python/Common/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-## @file
-# Python 'Common' package initialization file.
-#
-# This file is required to make Python interpreter treat the directory
-# as containing package.
-#
-# Copyright (c) 2007 - 2010, Intel Corporation. All rights reserved.<BR>
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#