diff --git a/aosp_diff/base_aaos/build/make/0002-WA-Replaced-python-files-from-A14.patch b/aosp_diff/base_aaos/build/make/0002-WA-Replaced-python-files-from-A14.patch deleted file mode 100644 index 5185a6cb2e..0000000000 --- a/aosp_diff/base_aaos/build/make/0002-WA-Replaced-python-files-from-A14.patch +++ /dev/null @@ -1,2066 +0,0 @@ -From 80b4c03fca9560dfd38abe957f00aa1eab4cc0f5 Mon Sep 17 00:00:00 2001 -From: Ankit Agarwal -Date: Thu, 20 Jun 2024 10:38:57 +0530 -Subject: [PATCH] [WA] Replaced python files from A14. - -Facing some build issues due to python. -So replacing files from A14 for now. - -Tests: Prepared EB and it is successfull. - -Tracked-On: NA -Signed-off-by: Ankit Agarwal ---- - tools/releasetools/add_img_to_target_files.py | 137 +++-- - tools/releasetools/common.py | 557 ++++++++---------- - .../releasetools/merge/merge_target_files.py | 51 +- - tools/releasetools/test_common.py | 261 ++++---- - tools/releasetools/validate_target_files.py | 24 +- - 5 files changed, 486 insertions(+), 544 deletions(-) - -diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py -index b39a82cf45..3b6fd08412 100644 ---- a/tools/releasetools/add_img_to_target_files.py -+++ b/tools/releasetools/add_img_to_target_files.py -@@ -42,10 +42,6 @@ Usage: add_img_to_target_files [flag] target_files - --is_signing - Skip building & adding the images for "userdata" and "cache" if we - are signing the target files. -- -- --avb-resolve-rollback-index-location-conflict -- If provided, resolve the conflict AVB rollback index location when -- necessary. - """ - - from __future__ import print_function -@@ -69,10 +65,9 @@ import verity_utils - import ota_metadata_pb2 - import rangelib - import sparse_img --from concurrent.futures import ThreadPoolExecutor -+ - from apex_utils import GetApexInfoFromTargetFiles - from common import ZipDelete, PARTITIONS_WITH_CARE_MAP, ExternalError, RunAndCheckOutput, IsSparseImage, MakeTempFile, ZipWrite --from build_image import FIXED_FILE_TIMESTAMP - - if sys.hexversion < 0x02070000: - print("Python 2.7 or newer is required.", file=sys.stderr) -@@ -85,7 +80,12 @@ OPTIONS.add_missing = False - OPTIONS.rebuild_recovery = False - OPTIONS.replace_updated_files_list = [] - OPTIONS.is_signing = False --OPTIONS.avb_resolve_rollback_index_location_conflict = False -+ -+# Use a fixed timestamp (01/01/2009 00:00:00 UTC) for files when packaging -+# images. (b/24377993, b/80600931) -+FIXED_FILE_TIMESTAMP = int(( -+ datetime.datetime(2009, 1, 1, 0, 0, 0, 0, None) - -+ datetime.datetime.utcfromtimestamp(0)).total_seconds()) - - - def ParseAvbFooter(img_path) -> avbtool.AvbFooter: -@@ -522,14 +522,12 @@ def AddPvmfw(output_zip): - return img.name - - --def AddCustomImages(output_zip, partition_name, image_list): -- """Adds and signs avb custom images as needed in IMAGES/. -+def AddCustomImages(output_zip, partition_name): -+ """Adds and signs custom images in IMAGES/. - - Args: - output_zip: The output zip file (needs to be already open), or None to - write images to OPTIONS.input_tmp/. -- partition_name: The custom image partition name. -- image_list: The image list of the custom image partition. - - Uses the image under IMAGES/ if it already exists. Otherwise looks for the - image under PREBUILT_IMAGES/, signs it as needed, and returns the image name. -@@ -538,20 +536,19 @@ def AddCustomImages(output_zip, partition_name, image_list): - AssertionError: If image can't be found. - """ - -- builder = None - key_path = OPTIONS.info_dict.get("avb_{}_key_path".format(partition_name)) -- if key_path is not None: -- algorithm = OPTIONS.info_dict.get("avb_{}_algorithm".format(partition_name)) -- extra_args = OPTIONS.info_dict.get( -- "avb_{}_add_hashtree_footer_args".format(partition_name)) -- partition_size = OPTIONS.info_dict.get( -- "avb_{}_partition_size".format(partition_name)) -- -- builder = verity_utils.CreateCustomImageBuilder( -- OPTIONS.info_dict, partition_name, partition_size, -- key_path, algorithm, extra_args) -- -- for img_name in image_list: -+ algorithm = OPTIONS.info_dict.get("avb_{}_algorithm".format(partition_name)) -+ extra_args = OPTIONS.info_dict.get( -+ "avb_{}_add_hashtree_footer_args".format(partition_name)) -+ partition_size = OPTIONS.info_dict.get( -+ "avb_{}_partition_size".format(partition_name)) -+ -+ builder = verity_utils.CreateCustomImageBuilder( -+ OPTIONS.info_dict, partition_name, partition_size, -+ key_path, algorithm, extra_args) -+ -+ for img_name in OPTIONS.info_dict.get( -+ "avb_{}_image_list".format(partition_name)).split(): - custom_image = OutputFile( - output_zip, OPTIONS.input_tmp, "IMAGES", img_name) - if os.path.exists(custom_image.name): -@@ -597,6 +594,15 @@ def CreateImage(input_dir, info_dict, what, output_file, block_list=None): - if block_list: - image_props["block_list"] = block_list.name - -+ # Use repeatable ext4 FS UUID and hash_seed UUID (based on partition name and -+ # build fingerprint). Also use the legacy build id, because the vbmeta digest -+ # isn't available at this point. -+ build_info = common.BuildInfo(info_dict, use_legacy_id=True) -+ uuid_seed = what + "-" + build_info.GetPartitionFingerprint(what) -+ image_props["uuid"] = str(uuid.uuid5(uuid.NAMESPACE_URL, uuid_seed)) -+ hash_seed = "hash_seed-" + uuid_seed -+ image_props["hash_seed"] = str(uuid.uuid5(uuid.NAMESPACE_URL, hash_seed)) -+ - build_image.BuildImage( - os.path.join(input_dir, what.upper()), image_props, output_file.name) - -@@ -687,12 +693,39 @@ def AddVBMeta(output_zip, partitions, name, needed_partitions): - logger.info("%s.img already exists; not rebuilding...", name) - return img.name - -- common.BuildVBMeta(img.name, partitions, name, needed_partitions, -- OPTIONS.avb_resolve_rollback_index_location_conflict) -+ common.BuildVBMeta(img.name, partitions, name, needed_partitions) - img.Write() - return img.name - - -+def AddPartitionTable(output_zip): -+ """Create a partition table image and store it in output_zip.""" -+ -+ img = OutputFile( -+ output_zip, OPTIONS.input_tmp, "IMAGES", "partition-table.img") -+ bpt = OutputFile( -+ output_zip, OPTIONS.input_tmp, "META", "partition-table.bpt") -+ -+ # use BPTTOOL from environ, or "bpttool" if empty or not set. -+ bpttool = os.getenv("BPTTOOL") or "bpttool" -+ cmd = [bpttool, "make_table", "--output_json", bpt.name, -+ "--output_gpt", img.name] -+ input_files_str = OPTIONS.info_dict["board_bpt_input_files"] -+ input_files = input_files_str.split() -+ for i in input_files: -+ cmd.extend(["--input", i]) -+ disk_size = OPTIONS.info_dict.get("board_bpt_disk_size") -+ if disk_size: -+ cmd.extend(["--disk_size", disk_size]) -+ args = OPTIONS.info_dict.get("board_bpt_make_table_args") -+ if args: -+ cmd.extend(shlex.split(args)) -+ common.RunAndCheckOutput(cmd) -+ -+ img.Write() -+ bpt.Write() -+ -+ - def AddCache(output_zip): - """Create an empty cache image and store it in output_zip.""" - -@@ -1049,15 +1082,8 @@ def AddImagesToTargetFiles(filename): - ("system_dlkm", has_system_dlkm, AddSystemDlkm, []), - ("system_other", has_system_other, AddSystemOther, []), - ) -- # If output_zip exists, each add_partition_calls writes bytes to the same output_zip, -- # which is not thread-safe. So, run them in serial if output_zip exists. -- if output_zip: -- for call in add_partition_calls: -- add_partition(*call) -- else: -- with ThreadPoolExecutor(max_workers=len(add_partition_calls)) as executor: -- for future in [executor.submit(add_partition, *call) for call in add_partition_calls]: -- future.result() -+ for call in add_partition_calls: -+ add_partition(*call) - - AddApexInfo(output_zip) - -@@ -1067,6 +1093,10 @@ def AddImagesToTargetFiles(filename): - banner("cache") - AddCache(output_zip) - -+ if OPTIONS.info_dict.get("board_bpt_enable") == "true": -+ banner("partition-table") -+ AddPartitionTable(output_zip) -+ - add_partition("dtbo", - OPTIONS.info_dict.get("has_dtbo") == "true", AddDtbo, []) - add_partition("pvmfw", -@@ -1074,29 +1104,18 @@ def AddImagesToTargetFiles(filename): - - # Custom images. - custom_partitions = OPTIONS.info_dict.get( -- "custom_images_partition_list", "").strip().split() -+ "avb_custom_images_partition_list", "").strip().split() - for partition_name in custom_partitions: - partition_name = partition_name.strip() - banner("custom images for " + partition_name) -- image_list = OPTIONS.info_dict.get( -- "{}_image_list".format(partition_name)).split() -- partitions[partition_name] = AddCustomImages(output_zip, partition_name, image_list) -- -- avb_custom_partitions = OPTIONS.info_dict.get( -- "avb_custom_images_partition_list", "").strip().split() -- for partition_name in avb_custom_partitions: -- partition_name = partition_name.strip() -- banner("avb custom images for " + partition_name) -- image_list = OPTIONS.info_dict.get( -- "avb_{}_image_list".format(partition_name)).split() -- partitions[partition_name] = AddCustomImages(output_zip, partition_name, image_list) -+ partitions[partition_name] = AddCustomImages(output_zip, partition_name) - - if OPTIONS.info_dict.get("avb_enable") == "true": - # vbmeta_partitions includes the partitions that should be included into - # top-level vbmeta.img, which are the ones that are not included in any - # chained VBMeta image plus the chained VBMeta images themselves. -- # Currently avb_custom_partitions are all chained to VBMeta image. -- vbmeta_partitions = common.AVB_PARTITIONS[:] + tuple(avb_custom_partitions) -+ # Currently custom_partitions are all chained to VBMeta image. -+ vbmeta_partitions = common.AVB_PARTITIONS[:] + tuple(custom_partitions) - - vbmeta_system = OPTIONS.info_dict.get("avb_vbmeta_system", "").strip() - if vbmeta_system: -@@ -1117,18 +1136,14 @@ def AddImagesToTargetFiles(filename): - item for item in vbmeta_partitions - if item not in vbmeta_vendor.split()] - vbmeta_partitions.append("vbmeta_vendor") -- custom_avb_partitions = OPTIONS.info_dict.get( -- "avb_custom_vbmeta_images_partition_list", "").strip().split() -+ custom_avb_partitions = OPTIONS.info_dict.get("avb_custom_vbmeta_images_partition_list", "").strip().split() - if custom_avb_partitions: - for avb_part in custom_avb_partitions: - partition_name = "vbmeta_" + avb_part -- included_partitions = OPTIONS.info_dict.get( -- "avb_vbmeta_{}".format(avb_part), "").strip().split() -- assert included_partitions, "Custom vbmeta partition {0} missing avb_vbmeta_{0} prop".format( -- avb_part) -+ included_partitions = OPTIONS.info_dict.get("avb_vbmeta_{}".format(avb_part), "").strip().split() -+ assert included_partitions, "Custom vbmeta partition {0} missing avb_vbmeta_{0} prop".format(avb_part) - banner(partition_name) -- logger.info("VBMeta partition {} needs {}".format( -- partition_name, included_partitions)) -+ logger.info("VBMeta partition {} needs {}".format(partition_name, included_partitions)) - partitions[partition_name] = AddVBMeta( - output_zip, partitions, partition_name, included_partitions) - vbmeta_partitions = [ -@@ -1136,6 +1151,7 @@ def AddImagesToTargetFiles(filename): - if item not in included_partitions] - vbmeta_partitions.append(partition_name) - -+ - if OPTIONS.info_dict.get("avb_building_vbmeta_image") == "true": - banner("vbmeta") - AddVBMeta(output_zip, partitions, "vbmeta", vbmeta_partitions) -@@ -1229,8 +1245,6 @@ def main(argv): - " please switch to AVB") - elif o == "--is_signing": - OPTIONS.is_signing = True -- elif o == "--avb_resolve_rollback_index_location_conflict": -- OPTIONS.avb_resolve_rollback_index_location_conflict = True - else: - return False - return True -@@ -1240,8 +1254,7 @@ def main(argv): - extra_long_opts=["add_missing", "rebuild_recovery", - "replace_verity_public_key=", - "replace_verity_private_key=", -- "is_signing", -- "avb_resolve_rollback_index_location_conflict"], -+ "is_signing"], - extra_option_handler=option_handler) - - if len(args) != 1: -diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py -index 0317cff897..a7b39c9684 100644 ---- a/tools/releasetools/common.py -+++ b/tools/releasetools/common.py -@@ -20,6 +20,7 @@ import copy - import datetime - import errno - import fnmatch -+from genericpath import isdir - import getopt - import getpass - import gzip -@@ -33,29 +34,21 @@ import re - import shlex - import shutil - import subprocess --import stat - import sys - import tempfile - import threading - import time - import zipfile -- --from typing import Iterable, Callable --from dataclasses import dataclass - from hashlib import sha1, sha256 - - import images -+import rangelib - import sparse_img - from blockimgdiff import BlockImageDiff - - logger = logging.getLogger(__name__) - - --@dataclass --class OptionHandler: -- extra_long_opts: Iterable[str] -- handler: Callable -- - class Options(object): - - def __init__(self): -@@ -80,7 +73,9 @@ class Options(object): - if "ANDROID_HOST_OUT" in os.environ: - self.search_path = os.environ["ANDROID_HOST_OUT"] - self.signapk_shared_library_path = "lib64" # Relative to search_path -+ self.sign_sepolicy_path = None - self.extra_signapk_args = [] -+ self.extra_sign_sepolicy_args = [] - self.aapt2_path = "aapt2" - self.java_path = "java" # Use the one on the path by default. - self.java_args = ["-Xmx4096m"] # The default JVM args. -@@ -100,6 +95,8 @@ class Options(object): - self.cache_size = None - self.stash_threshold = 0.8 - self.logfile = None -+ self.host_tools = {} -+ self.sepolicy_name = 'sepolicy.apex' - - - OPTIONS = Options() -@@ -115,18 +112,13 @@ SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL") - # descriptor into vbmeta.img. When adding a new entry here, the - # AVB_FOOTER_ARGS_BY_PARTITION in sign_target_files_apks need to be updated - # accordingly. --AVB_PARTITIONS = ('boot', 'init_boot', 'dtbo', 'odm', 'product', 'pvmfw', -- 'recovery', 'system', 'system_ext', 'vendor', 'vendor_boot', -- 'vendor_kernel_boot', 'vendor_dlkm', 'odm_dlkm', -- 'system_dlkm') -+AVB_PARTITIONS = ('boot', 'init_boot', 'dtbo', 'odm', 'product', 'pvmfw', 'recovery', -+ 'system', 'system_ext', 'vendor', 'vendor_boot', 'vendor_kernel_boot', -+ 'vendor_dlkm', 'odm_dlkm', 'system_dlkm') - - # Chained VBMeta partitions. - AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor') - --# avbtool arguments name --AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG = '--include_descriptors_from_image' --AVB_ARG_NAME_CHAIN_PARTITION = '--chain_partition' -- - # Partitions that should have their care_map added to META/care_map.pb - PARTITIONS_WITH_CARE_MAP = [ - 'system', -@@ -147,19 +139,6 @@ PARTITIONS_WITH_BUILD_PROP = PARTITIONS_WITH_CARE_MAP + ['boot', 'init_boot'] - RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop'] - - --@dataclass --class AvbChainedPartitionArg: -- """The required arguments for avbtool --chain_partition.""" -- partition: str -- rollback_index_location: int -- pubkey_path: str -- -- def to_string(self): -- """Convert to string command arguments.""" -- return '{}:{}:{}'.format( -- self.partition, self.rollback_index_location, self.pubkey_path) -- -- - class ErrorCode(object): - """Define error_codes for failures that happen during the actual - update package installation. -@@ -215,7 +194,7 @@ def InitLogging(): - '': { - 'handlers': ['default'], - 'propagate': True, -- 'level': 'NOTSET', -+ 'level': 'INFO', - } - } - } -@@ -245,15 +224,23 @@ def InitLogging(): - logging.config.dictConfig(config) - - -+def SetHostToolLocation(tool_name, location): -+ OPTIONS.host_tools[tool_name] = location -+ -+ - def FindHostToolPath(tool_name): - """Finds the path to the host tool. - - Args: - tool_name: name of the tool to find - Returns: -- path to the tool if found under the same directory as this binary is located at. If not found, -- tool_name is returned. -+ path to the tool if found under either one of the host_tools map or under -+ the same directory as this binary is located at. If not found, tool_name -+ is returned. - """ -+ if tool_name in OPTIONS.host_tools: -+ return OPTIONS.host_tools[tool_name] -+ - my_dir = os.path.dirname(os.path.realpath(sys.argv[0])) - tool_path = os.path.join(my_dir, tool_name) - if os.path.exists(tool_path): -@@ -463,26 +450,20 @@ class BuildInfo(object): - - @property - def is_vabc(self): -- return self.info_dict.get("virtual_ab_compression") == "true" -+ vendor_prop = self.info_dict.get("vendor.build.prop") -+ vabc_enabled = vendor_prop and \ -+ vendor_prop.GetProp("ro.virtual_ab.compression.enabled") == "true" -+ return vabc_enabled - - @property - def is_android_r(self): - system_prop = self.info_dict.get("system.build.prop") - return system_prop and system_prop.GetProp("ro.build.version.release") == "11" - -- @property -- def is_release_key(self): -- system_prop = self.info_dict.get("build.prop") -- return system_prop and system_prop.GetProp("ro.build.tags") == "release-key" -- - @property - def vabc_compression_param(self): - return self.get("virtual_ab_compression_method", "") - -- @property -- def vabc_cow_version(self): -- return self.get("virtual_ab_cow_version", "") -- - @property - def vendor_api_level(self): - vendor_prop = self.info_dict.get("vendor.build.prop") -@@ -490,15 +471,16 @@ class BuildInfo(object): - return -1 - - props = [ -+ "ro.board.api_level", - "ro.board.first_api_level", - "ro.product.first_api_level", - ] - for prop in props: - value = vendor_prop.GetProp(prop) - try: -- return int(value) -+ return int(value) - except: -- pass -+ pass - return -1 - - @property -@@ -779,33 +761,6 @@ def ReadFromInputFile(input_file, fn): - return ReadBytesFromInputFile(input_file, fn).decode() - - --def WriteBytesToInputFile(input_file, fn, data): -- """Write bytes |data| contents to fn of input zipfile or directory.""" -- if isinstance(input_file, zipfile.ZipFile): -- with input_file.open(fn, "w") as entry_fp: -- return entry_fp.write(data) -- elif zipfile.is_zipfile(input_file): -- with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp: -- with zfp.open(fn, "w") as entry_fp: -- return entry_fp.write(data) -- else: -- if not os.path.isdir(input_file): -- raise ValueError( -- "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file) -- path = os.path.join(input_file, *fn.split("/")) -- try: -- with open(path, "wb") as f: -- return f.write(data) -- except IOError as e: -- if e.errno == errno.ENOENT: -- raise KeyError(fn) -- -- --def WriteToInputFile(input_file, fn, str: str): -- """Write str content to fn of input file or directory""" -- return WriteBytesToInputFile(input_file, fn, str.encode()) -- -- - def ExtractFromInputFile(input_file, fn): - """Extracts the contents of fn from input zipfile or directory into a file.""" - if isinstance(input_file, zipfile.ZipFile): -@@ -953,14 +908,20 @@ def LoadInfoDict(input_file, repacking=False): - input_file, partition, ramdisk_format=ramdisk_format) - d["build.prop"] = d["system.build.prop"] - -+ # Set up the salt (based on fingerprint) that will be used when adding AVB -+ # hash / hashtree footers. - if d.get("avb_enable") == "true": - build_info = BuildInfo(d, use_legacy_id=True) -+ for partition in PARTITIONS_WITH_BUILD_PROP: -+ fingerprint = build_info.GetPartitionFingerprint(partition) -+ if fingerprint: -+ d["avb_{}_salt".format(partition)] = sha256( -+ fingerprint.encode()).hexdigest() -+ - # Set up the salt for partitions without build.prop - if build_info.fingerprint: -- if "fingerprint" not in d: -- d["fingerprint"] = build_info.fingerprint -- if "avb_salt" not in d: -- d["avb_salt"] = sha256(build_info.fingerprint.encode()).hexdigest() -+ d["avb_salt"] = sha256(build_info.fingerprint.encode()).hexdigest() -+ - # Set the vbmeta digest if exists - try: - d["vbmeta_digest"] = read_helper("META/vbmeta_digest.txt").rstrip() -@@ -1166,7 +1127,8 @@ class PartitionBuildProps(object): - return self.build_props.get(prop) - - --def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path): -+def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path, -+ system_root_image=False): - class Partition(object): - def __init__(self, mount_point, fs_type, device, length, context, slotselect): - self.mount_point = mount_point -@@ -1225,6 +1187,12 @@ def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path): - device=pieces[0], length=length, context=context, - slotselect=slotselect) - -+ # / is used for the system mount point when the root directory is included in -+ # system. Other areas assume system is always at "/system" so point /system -+ # at /. -+ if system_root_image: -+ assert '/system' not in d and '/' in d -+ d["/system"] = d["/"] - return d - - -@@ -1240,19 +1208,32 @@ def _FindAndLoadRecoveryFstab(info_dict, input_file, read_helper): - # ../RAMDISK/system/etc/recovery.fstab. This function has to handle both - # cases, since it may load the info_dict from an old build (e.g. when - # generating incremental OTAs from that build). -+ system_root_image = info_dict.get('system_root_image') == 'true' - if info_dict.get('no_recovery') != 'true': - recovery_fstab_path = 'RECOVERY/RAMDISK/system/etc/recovery.fstab' -- if not DoesInputFileContain(input_file, recovery_fstab_path): -- recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab' -+ if isinstance(input_file, zipfile.ZipFile): -+ if recovery_fstab_path not in input_file.namelist(): -+ recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab' -+ else: -+ path = os.path.join(input_file, *recovery_fstab_path.split('/')) -+ if not os.path.exists(path): -+ recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab' - return LoadRecoveryFSTab( -- read_helper, info_dict['fstab_version'], recovery_fstab_path) -+ read_helper, info_dict['fstab_version'], recovery_fstab_path, -+ system_root_image) - - if info_dict.get('recovery_as_boot') == 'true': - recovery_fstab_path = 'BOOT/RAMDISK/system/etc/recovery.fstab' -- if not DoesInputFileContain(input_file, recovery_fstab_path): -- recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab' -+ if isinstance(input_file, zipfile.ZipFile): -+ if recovery_fstab_path not in input_file.namelist(): -+ recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab' -+ else: -+ path = os.path.join(input_file, *recovery_fstab_path.split('/')) -+ if not os.path.exists(path): -+ recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab' - return LoadRecoveryFSTab( -- read_helper, info_dict['fstab_version'], recovery_fstab_path) -+ read_helper, info_dict['fstab_version'], recovery_fstab_path, -+ system_root_image) - - return None - -@@ -1319,11 +1300,7 @@ def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict): - key = "super_%s_partition_list" % partition_group - merged_dict[key] = uniq_concat( - framework_dict.get(key, ""), vendor_dict.get(key, "")) -- # in the case that vendor is on s build, but is taking a v3 -> v3 vabc ota, we want to fallback to v2 -- if "vabc_cow_version" not in vendor_dict or "vabc_cow_version" not in framework_dict: -- merged_dict["vabc_cow_version"] = '2' -- else: -- merged_dict["vabc_cow_version"] = min(vendor_dict["vabc_cow_version"], framework_dict["vabc_cow_version"]) -+ - # Various other flags should be copied from the vendor dict, if defined. - for key in ("virtual_ab", "virtual_ab_retrofit", "lpmake", - "super_metadata_device", "super_partition_error_limit", -@@ -1420,16 +1397,14 @@ def RunHostInitVerifier(product_out, partition_map): - return RunAndCheckOutput(cmd) - - --def AppendAVBSigningArgs(cmd, partition, avb_salt=None): -+def AppendAVBSigningArgs(cmd, partition): - """Append signing arguments for avbtool.""" - # e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096" -- key_path = ResolveAVBSigningPathArgs( -- OPTIONS.info_dict.get("avb_" + partition + "_key_path")) -+ key_path = ResolveAVBSigningPathArgs(OPTIONS.info_dict.get("avb_" + partition + "_key_path")) - algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm") - if key_path and algorithm: - cmd.extend(["--key", key_path, "--algorithm", algorithm]) -- if avb_salt is None: -- avb_salt = OPTIONS.info_dict.get("avb_salt") -+ avb_salt = OPTIONS.info_dict.get("avb_salt") - # make_vbmeta_image doesn't like "--salt" (and it's not needed). - if avb_salt and not partition.startswith("vbmeta"): - cmd.extend(["--salt", avb_salt]) -@@ -1440,12 +1415,11 @@ def ResolveAVBSigningPathArgs(split_args): - def ResolveBinaryPath(path): - if os.path.exists(path): - return path -- if OPTIONS.search_path: -- new_path = os.path.join(OPTIONS.search_path, path) -- if os.path.exists(new_path): -- return new_path -+ new_path = os.path.join(OPTIONS.search_path, path) -+ if os.path.exists(new_path): -+ return new_path - raise ExternalError( -- "Failed to find {}".format(path)) -+ "Failed to find {}".format(new_path)) - - if not split_args: - return split_args -@@ -1463,7 +1437,7 @@ def ResolveAVBSigningPathArgs(split_args): - - - def GetAvbPartitionArg(partition, image, info_dict=None): -- """Returns the VBMeta arguments for one partition. -+ """Returns the VBMeta arguments for partition. - - It sets up the VBMeta argument by including the partition descriptor from the - given 'image', or by configuring the partition as a chained partition. -@@ -1475,7 +1449,7 @@ def GetAvbPartitionArg(partition, image, info_dict=None): - OPTIONS.info_dict if None has been given. - - Returns: -- A list of VBMeta arguments for one partition. -+ A list of VBMeta arguments. - """ - if info_dict is None: - info_dict = OPTIONS.info_dict -@@ -1483,7 +1457,7 @@ def GetAvbPartitionArg(partition, image, info_dict=None): - # Check if chain partition is used. - key_path = info_dict.get("avb_" + partition + "_key_path") - if not key_path: -- return [AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG, image] -+ return ["--include_descriptors_from_image", image] - - # For a non-A/B device, we don't chain /recovery nor include its descriptor - # into vbmeta.img. The recovery image will be configured on an independent -@@ -1495,62 +1469,7 @@ def GetAvbPartitionArg(partition, image, info_dict=None): - - # Otherwise chain the partition into vbmeta. - chained_partition_arg = GetAvbChainedPartitionArg(partition, info_dict) -- return [AVB_ARG_NAME_CHAIN_PARTITION, chained_partition_arg] -- -- --def GetAvbPartitionsArg(partitions, -- resolve_rollback_index_location_conflict=False, -- info_dict=None): -- """Returns the VBMeta arguments for all AVB partitions. -- -- It sets up the VBMeta argument by calling GetAvbPartitionArg of all -- partitions. -- -- Args: -- partitions: A dict of all AVB partitions. -- resolve_rollback_index_location_conflict: If true, resolve conflicting avb -- rollback index locations by assigning the smallest unused value. -- info_dict: A dict returned by common.LoadInfoDict(). -- -- Returns: -- A list of VBMeta arguments for all partitions. -- """ -- # An AVB partition will be linked into a vbmeta partition by either -- # AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG or AVB_ARG_NAME_CHAIN_PARTITION, there -- # should be no other cases. -- valid_args = { -- AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG: [], -- AVB_ARG_NAME_CHAIN_PARTITION: [] -- } -- -- for partition, path in sorted(partitions.items()): -- avb_partition_arg = GetAvbPartitionArg(partition, path, info_dict) -- if not avb_partition_arg: -- continue -- arg_name, arg_value = avb_partition_arg -- assert arg_name in valid_args -- valid_args[arg_name].append(arg_value) -- -- # Copy the arguments for non-chained AVB partitions directly without -- # intervention. -- avb_args = [] -- for image in valid_args[AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG]: -- avb_args.extend([AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG, image]) -- -- # Handle chained AVB partitions. The rollback index location might be -- # adjusted if two partitions use the same value. This may happen when mixing -- # a shared system image with other vendor images. -- used_index_loc = set() -- for chained_partition_arg in valid_args[AVB_ARG_NAME_CHAIN_PARTITION]: -- if resolve_rollback_index_location_conflict: -- while chained_partition_arg.rollback_index_location in used_index_loc: -- chained_partition_arg.rollback_index_location += 1 -- -- used_index_loc.add(chained_partition_arg.rollback_index_location) -- avb_args.extend([AVB_ARG_NAME_CHAIN_PARTITION, -- chained_partition_arg.to_string()]) -- -- return avb_args -+ return ["--chain_partition", chained_partition_arg] - - - def GetAvbChainedPartitionArg(partition, info_dict, key=None): -@@ -1564,8 +1483,8 @@ def GetAvbChainedPartitionArg(partition, info_dict, key=None): - the key listed in info_dict. - - Returns: -- An AvbChainedPartitionArg object with rollback_index_location and -- pubkey_path that can be used to build or verify vbmeta image. -+ A string of form "partition:rollback_index_location:key" that can be used to -+ build or verify vbmeta image. - """ - if key is None: - key = info_dict["avb_" + partition + "_key_path"] -@@ -1573,14 +1492,54 @@ def GetAvbChainedPartitionArg(partition, info_dict, key=None): - pubkey_path = ExtractAvbPublicKey(info_dict["avb_avbtool"], key) - rollback_index_location = info_dict[ - "avb_" + partition + "_rollback_index_location"] -- return AvbChainedPartitionArg( -- partition=partition, -- rollback_index_location=int(rollback_index_location), -- pubkey_path=pubkey_path) -+ return "{}:{}:{}".format(partition, rollback_index_location, pubkey_path) -+ -+ -+def _HasGkiCertificationArgs(): -+ return ("gki_signing_key_path" in OPTIONS.info_dict and -+ "gki_signing_algorithm" in OPTIONS.info_dict) -+ -+ -+def _GenerateGkiCertificate(image, image_name): -+ key_path = OPTIONS.info_dict.get("gki_signing_key_path") -+ algorithm = OPTIONS.info_dict.get("gki_signing_algorithm") -+ -+ key_path = ResolveAVBSigningPathArgs(key_path) -+ -+ # Checks key_path exists, before processing --gki_signing_* args. -+ if not os.path.exists(key_path): -+ raise ExternalError( -+ 'gki_signing_key_path: "{}" not found'.format(key_path)) -+ -+ output_certificate = tempfile.NamedTemporaryFile() -+ cmd = [ -+ "generate_gki_certificate", -+ "--name", image_name, -+ "--algorithm", algorithm, -+ "--key", key_path, -+ "--output", output_certificate.name, -+ image, -+ ] -+ -+ signature_args = OPTIONS.info_dict.get("gki_signing_signature_args", "") -+ signature_args = signature_args.strip() -+ if signature_args: -+ cmd.extend(["--additional_avb_args", signature_args]) -+ -+ args = OPTIONS.info_dict.get("avb_boot_add_hash_footer_args", "") -+ args = args.strip() -+ if args: -+ cmd.extend(["--additional_avb_args", args]) -+ -+ RunAndCheckOutput(cmd) -+ -+ output_certificate.seek(os.SEEK_SET, 0) -+ data = output_certificate.read() -+ output_certificate.close() -+ return data - - --def BuildVBMeta(image_path, partitions, name, needed_partitions, -- resolve_rollback_index_location_conflict=False): -+def BuildVBMeta(image_path, partitions, name, needed_partitions): - """Creates a VBMeta image. - - It generates the requested VBMeta image. The requested image could be for -@@ -1595,8 +1554,6 @@ def BuildVBMeta(image_path, partitions, name, needed_partitions, - name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'. - needed_partitions: Partitions whose descriptors should be included into the - generated VBMeta image. -- resolve_rollback_index_location_conflict: If true, resolve conflicting avb -- rollback index locations by assigning the smallest unused value. - - Raises: - AssertionError: On invalid input args. -@@ -1610,8 +1567,7 @@ def BuildVBMeta(image_path, partitions, name, needed_partitions, - custom_avb_partitions = ["vbmeta_" + part for part in OPTIONS.info_dict.get( - "avb_custom_vbmeta_images_partition_list", "").strip().split()] - -- avb_partitions = {} -- for partition, path in sorted(partitions.items()): -+ for partition, path in partitions.items(): - if partition not in needed_partitions: - continue - assert (partition in AVB_PARTITIONS or -@@ -1621,9 +1577,7 @@ def BuildVBMeta(image_path, partitions, name, needed_partitions, - 'Unknown partition: {}'.format(partition) - assert os.path.exists(path), \ - 'Failed to find {} for {}'.format(path, partition) -- avb_partitions[partition] = path -- cmd.extend(GetAvbPartitionsArg(avb_partitions, -- resolve_rollback_index_location_conflict)) -+ cmd.extend(GetAvbPartitionArg(partition, path)) - - args = OPTIONS.info_dict.get("avb_{}_args".format(name)) - if args and args.strip(): -@@ -1634,7 +1588,7 @@ def BuildVBMeta(image_path, partitions, name, needed_partitions, - # same location when running this script (we have the input target_files - # zip only). For such cases, we additionally scan other locations (e.g. - # IMAGES/, RADIO/, etc) before bailing out. -- if arg == AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG: -+ if arg == '--include_descriptors_from_image': - chained_image = split_args[index + 1] - if os.path.exists(chained_image): - continue -@@ -1801,6 +1755,28 @@ def _BuildBootableImage(image_name, sourcedir, fs_config_file, - - RunAndCheckOutput(cmd) - -+ if _HasGkiCertificationArgs(): -+ if not os.path.exists(img.name): -+ raise ValueError("Cannot find GKI boot.img") -+ if kernel_path is None or not os.path.exists(kernel_path): -+ raise ValueError("Cannot find GKI kernel.img") -+ -+ # Certify GKI images. -+ boot_signature_bytes = b'' -+ boot_signature_bytes += _GenerateGkiCertificate(img.name, "boot") -+ boot_signature_bytes += _GenerateGkiCertificate( -+ kernel_path, "generic_kernel") -+ -+ BOOT_SIGNATURE_SIZE = 16 * 1024 -+ if len(boot_signature_bytes) > BOOT_SIGNATURE_SIZE: -+ raise ValueError('GKI boot_signature size must be <= {}'.format(BOOT_SIGNATURE_SIZE)) -+ boot_signature_bytes += ( -+ b'\0' * (BOOT_SIGNATURE_SIZE - len(boot_signature_bytes))) -+ assert len(boot_signature_bytes) == BOOT_SIGNATURE_SIZE -+ -+ with open(img.name, 'ab') as f: -+ f.write(boot_signature_bytes) -+ - # Sign the image if vboot is non-empty. - if info_dict.get("vboot"): - path = "/" + partition_name -@@ -1833,11 +1809,7 @@ def _BuildBootableImage(image_name, sourcedir, fs_config_file, - cmd = [avbtool, "add_hash_footer", "--image", img.name, - "--partition_size", str(part_size), "--partition_name", - partition_name] -- salt = None -- if kernel_path is not None: -- with open(kernel_path, "rb") as fp: -- salt = sha256(fp.read()).hexdigest() -- AppendAVBSigningArgs(cmd, partition_name, salt) -+ AppendAVBSigningArgs(cmd, partition_name) - args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args") - if args and args.strip(): - split_args = ResolveAVBSigningPathArgs(shlex.split(args)) -@@ -1879,18 +1851,7 @@ def _SignBootableImage(image_path, prebuilt_name, partition_name, - cmd = [avbtool, "add_hash_footer", "--image", image_path, - "--partition_size", str(part_size), "--partition_name", - partition_name] -- # Use sha256 of the kernel as salt for reproducible builds -- with tempfile.TemporaryDirectory() as tmpdir: -- RunAndCheckOutput(["unpack_bootimg", "--boot_img", image_path, "--out", tmpdir]) -- for filename in ["kernel", "ramdisk", "vendor_ramdisk00"]: -- path = os.path.join(tmpdir, filename) -- if os.path.exists(path) and os.path.getsize(path): -- print("Using {} as salt for avb footer of {}".format( -- filename, partition_name)) -- with open(path, "rb") as fp: -- salt = sha256(fp.read()).hexdigest() -- break -- AppendAVBSigningArgs(cmd, partition_name, salt) -+ AppendAVBSigningArgs(cmd, partition_name) - args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args") - if args and args.strip(): - split_args = ResolveAVBSigningPathArgs(shlex.split(args)) -@@ -1917,6 +1878,11 @@ def HasRamdisk(partition_name, info_dict=None): - if info_dict.get("gki_boot_image_without_ramdisk") == "true": - return False # A GKI boot.img has no ramdisk since Android-13. - -+ if info_dict.get("system_root_image") == "true": -+ # The ramdisk content is merged into the system.img, so there is NO -+ # ramdisk in the boot.img or boot-.img. -+ return False -+ - if info_dict.get("init_boot") == "true": - # The ramdisk is moved to the init_boot.img, so there is NO - # ramdisk in the boot.img or boot-.img. -@@ -1971,7 +1937,7 @@ def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir, - return None - - --def _BuildVendorBootImage(sourcedir, fs_config_file, partition_name, info_dict=None): -+def _BuildVendorBootImage(sourcedir, partition_name, info_dict=None): - """Build a vendor boot image from the specified sourcedir. - - Take a ramdisk, dtb, and vendor_cmdline from the input (in 'sourcedir'), and -@@ -1987,7 +1953,7 @@ def _BuildVendorBootImage(sourcedir, fs_config_file, partition_name, info_dict=N - img = tempfile.NamedTemporaryFile() - - ramdisk_format = GetRamdiskFormat(info_dict) -- ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file=fs_config_file, ramdisk_format=ramdisk_format) -+ ramdisk_img = _MakeRamdisk(sourcedir, ramdisk_format=ramdisk_format) - - # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set - mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg" -@@ -2063,11 +2029,11 @@ def _BuildVendorBootImage(sourcedir, fs_config_file, partition_name, info_dict=N - # AVB: if enabled, calculate and add hash. - if info_dict.get("avb_enable") == "true": - avbtool = info_dict["avb_avbtool"] -- part_size = info_dict[f'{partition_name}_size'] -+ part_size = info_dict['{}_size'.format(partition_name)] - cmd = [avbtool, "add_hash_footer", "--image", img.name, - "--partition_size", str(part_size), "--partition_name", partition_name] - AppendAVBSigningArgs(cmd, partition_name) -- args = info_dict.get(f'avb_{partition_name}_add_hash_footer_args') -+ args = info_dict.get('avb_{}_add_hash_footer_args'.format(partition_name)) - if args and args.strip(): - split_args = ResolveAVBSigningPathArgs(shlex.split(args)) - cmd.extend(split_args) -@@ -2101,9 +2067,8 @@ def GetVendorBootImage(name, prebuilt_name, unpack_dir, tree_subdir, - if info_dict is None: - info_dict = OPTIONS.info_dict - -- fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt" - data = _BuildVendorBootImage( -- os.path.join(unpack_dir, tree_subdir), os.path.join(unpack_dir, fs_config), "vendor_boot", info_dict) -+ os.path.join(unpack_dir, tree_subdir), "vendor_boot", info_dict) - if data: - return File(name, data) - return None -@@ -2127,7 +2092,7 @@ def GetVendorKernelBootImage(name, prebuilt_name, unpack_dir, tree_subdir, - info_dict = OPTIONS.info_dict - - data = _BuildVendorBootImage( -- os.path.join(unpack_dir, tree_subdir), None, "vendor_kernel_boot", info_dict) -+ os.path.join(unpack_dir, tree_subdir), "vendor_kernel_boot", info_dict) - if data: - return File(name, data) - return None -@@ -2140,39 +2105,6 @@ def Gunzip(in_filename, out_filename): - shutil.copyfileobj(in_file, out_file) - - --def UnzipSingleFile(input_zip: zipfile.ZipFile, info: zipfile.ZipInfo, dirname: str): -- # According to https://stackoverflow.com/questions/434641/how-do-i-set-permissions-attributes-on-a-file-in-a-zip-file-using-pythons-zip/6297838#6297838 -- # higher bits of |external_attr| are unix file permission and types -- unix_filetype = info.external_attr >> 16 -- file_perm = unix_filetype & 0o777 -- -- def CheckMask(a, mask): -- return (a & mask) == mask -- -- def IsSymlink(a): -- return CheckMask(a, stat.S_IFLNK) -- -- def IsDir(a): -- return CheckMask(a, stat.S_IFDIR) -- # python3.11 zipfile implementation doesn't handle symlink correctly -- if not IsSymlink(unix_filetype): -- target = input_zip.extract(info, dirname) -- # We want to ensure that the file is at least read/writable by owner and readable by all users -- if IsDir(unix_filetype): -- os.chmod(target, file_perm | 0o755) -- else: -- os.chmod(target, file_perm | 0o644) -- return target -- if dirname is None: -- dirname = os.getcwd() -- target = os.path.join(dirname, info.filename) -- os.makedirs(os.path.dirname(target), exist_ok=True) -- if os.path.exists(target): -- os.unlink(target) -- os.symlink(input_zip.read(info).decode(), target) -- return target -- -- - def UnzipToDir(filename, dirname, patterns=None): - """Unzips the archive to the given directory. - -@@ -2183,32 +2115,20 @@ def UnzipToDir(filename, dirname, patterns=None): - archvie. Non-matching patterns will be filtered out. If there's no match - after the filtering, no file will be unzipped. - """ -- with zipfile.ZipFile(filename, allowZip64=True, mode="r") as input_zip: -+ cmd = ["unzip", "-o", "-q", filename, "-d", dirname] -+ if patterns is not None: - # Filter out non-matching patterns. unzip will complain otherwise. -- entries = input_zip.infolist() -- # b/283033491 -- # Per https://en.wikipedia.org/wiki/ZIP_(file_format)#Central_directory_file_header -- # In zip64 mode, central directory record's header_offset field might be -- # set to 0xFFFFFFFF if header offset is > 2^32. In this case, the extra -- # fields will contain an 8 byte little endian integer at offset 20 -- # to indicate the actual local header offset. -- # As of python3.11, python does not handle zip64 central directories -- # correctly, so we will manually do the parsing here. -- for entry in entries: -- if entry.header_offset == 0xFFFFFFFF and len(entry.extra) >= 28: -- entry.header_offset = int.from_bytes(entry.extra[20:28], "little") -- if patterns is not None: -- filtered = [info for info in entries if any( -- [fnmatch.fnmatch(info.filename, p) for p in patterns])] -- -- # There isn't any matching files. Don't unzip anything. -- if not filtered: -- return -- for info in filtered: -- UnzipSingleFile(input_zip, info, dirname) -- else: -- for info in entries: -- UnzipSingleFile(input_zip, info, dirname) -+ with zipfile.ZipFile(filename, allowZip64=True) as input_zip: -+ names = input_zip.namelist() -+ filtered = [ -+ pattern for pattern in patterns if fnmatch.filter(names, pattern)] -+ -+ # There isn't any matching files. Don't unzip anything. -+ if not filtered: -+ return -+ cmd.extend(filtered) -+ -+ RunAndCheckOutput(cmd) - - - def UnzipTemp(filename, patterns=None): -@@ -2448,12 +2368,11 @@ def GetMinSdkVersion(apk_name): - apk_name, proc.returncode, stdoutdata, stderrdata)) - - for line in stdoutdata.split("\n"): -- # Due to ag/24161708, looking for lines such as minSdkVersion:'23',minSdkVersion:'M' -- # or sdkVersion:'23', sdkVersion:'M'. -- m = re.match(r'(?:minSdkVersion|sdkVersion):\'([^\']*)\'', line) -+ # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'. -+ m = re.match(r'sdkVersion:\'([^\']*)\'', line) - if m: - return m.group(1) -- raise ExternalError("No minSdkVersion returned by aapt2 for apk: {}".format(apk_name)) -+ raise ExternalError("No minSdkVersion returned by aapt2") - - - def GetMinSdkVersionInt(apk_name, codename_to_api_level_map): -@@ -2551,6 +2470,38 @@ def SignFile(input_name, output_name, key, password, min_api_level=None, - proc.returncode, stdoutdata)) - - -+def SignSePolicy(sepolicy, key, password): -+ """Sign the sepolicy zip, producing an fsverity .fsv_sig and -+ an RSA .sig signature files. -+ """ -+ -+ if OPTIONS.sign_sepolicy_path is None: -+ logger.info("No sign_sepolicy_path specified, %s was not signed", sepolicy) -+ return False -+ -+ java_library_path = os.path.join( -+ OPTIONS.search_path, OPTIONS.signapk_shared_library_path) -+ -+ cmd = ([OPTIONS.java_path] + OPTIONS.java_args + -+ ["-Djava.library.path=" + java_library_path, -+ "-jar", os.path.join(OPTIONS.search_path, OPTIONS.sign_sepolicy_path)] + -+ OPTIONS.extra_sign_sepolicy_args) -+ -+ cmd.extend([key + OPTIONS.public_key_suffix, -+ key + OPTIONS.private_key_suffix, -+ sepolicy, os.path.dirname(sepolicy)]) -+ -+ proc = Run(cmd, stdin=subprocess.PIPE) -+ if password is not None: -+ password += "\n" -+ stdoutdata, _ = proc.communicate(password) -+ if proc.returncode != 0: -+ raise ExternalError( -+ "Failed to run sign sepolicy: return code {}:\n{}".format( -+ proc.returncode, stdoutdata)) -+ return True -+ -+ - def CheckSize(data, target, info_dict): - """Checks the data string passed against the max size limit. - -@@ -2578,9 +2529,7 @@ def CheckSize(data, target, info_dict): - device = p.device - if "/" in device: - device = device[device.rfind("/")+1:] -- limit = info_dict.get(device + "_size", 0) -- if isinstance(limit, str): -- limit = int(limit, 0) -+ limit = info_dict.get(device + "_size") - if not fs_type or not limit: - return - -@@ -2717,25 +2666,19 @@ def Usage(docstring): - def ParseOptions(argv, - docstring, - extra_opts="", extra_long_opts=(), -- extra_option_handler: Iterable[OptionHandler] = None): -+ extra_option_handler=None): - """Parse the options in argv and return any arguments that aren't - flags. docstring is the calling module's docstring, to be displayed - for errors and -h. extra_opts and extra_long_opts are for flags - defined by the caller, which are processed by passing them to - extra_option_handler.""" -- extra_long_opts = list(extra_long_opts) -- if not isinstance(extra_option_handler, Iterable): -- extra_option_handler = [extra_option_handler] -- -- for handler in extra_option_handler: -- if isinstance(handler, OptionHandler): -- extra_long_opts.extend(handler.extra_long_opts) - - try: - opts, args = getopt.getopt( - argv, "hvp:s:x:" + extra_opts, - ["help", "verbose", "path=", "signapk_path=", -- "signapk_shared_library_path=", "extra_signapk_args=", "aapt2_path=", -+ "signapk_shared_library_path=", "extra_signapk_args=", -+ "sign_sepolicy_path=", "extra_sign_sepolicy_args=", "aapt2_path=", - "java_path=", "java_args=", "android_jar_path=", "public_key_suffix=", - "private_key_suffix=", "boot_signer_path=", "boot_signer_args=", - "verity_signer_path=", "verity_signer_args=", "device_specific=", -@@ -2759,6 +2702,10 @@ def ParseOptions(argv, - OPTIONS.signapk_shared_library_path = a - elif o in ("--extra_signapk_args",): - OPTIONS.extra_signapk_args = shlex.split(a) -+ elif o in ("--sign_sepolicy_path",): -+ OPTIONS.sign_sepolicy_path = a -+ elif o in ("--extra_sign_sepolicy_args",): -+ OPTIONS.extra_sign_sepolicy_args = shlex.split(a) - elif o in ("--aapt2_path",): - OPTIONS.aapt2_path = a - elif o in ("--java_path",): -@@ -2791,19 +2738,8 @@ def ParseOptions(argv, - elif o in ("--logfile",): - OPTIONS.logfile = a - else: -- if extra_option_handler is None: -- raise ValueError("unknown option \"%s\"" % (o,)) -- success = False -- for handler in extra_option_handler: -- if isinstance(handler, OptionHandler): -- if handler.handler(o, a): -- success = True -- break -- elif handler(o, a): -- success = True -- if not success: -- raise ValueError("unknown option \"%s\"" % (o,)) -- -+ if extra_option_handler is None or not extra_option_handler(o, a): -+ assert False, "unknown option \"%s\"" % (o,) - - if OPTIONS.search_path: - os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") + -@@ -2834,8 +2770,6 @@ def MakeTempDir(prefix='tmp', suffix=''): - - def Cleanup(): - for i in OPTIONS.tempfiles: -- if not os.path.exists(i): -- continue - if os.path.isdir(i): - shutil.rmtree(i, ignore_errors=True) - else: -@@ -3039,7 +2973,8 @@ def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None, - zip_file.writestr(zinfo, data) - zipfile.ZIP64_LIMIT = saved_zip64_limit - --def ZipExclude(input_zip, output_zip, entries, force=False): -+ -+def ZipDelete(zip_filename, entries, force=False): - """Deletes entries from a ZIP file. - - Args: -@@ -3050,38 +2985,22 @@ def ZipExclude(input_zip, output_zip, entries, force=False): - entries = [entries] - # If list is empty, nothing to do - if not entries: -- shutil.copy(input_zip, output_zip) - return - -- with zipfile.ZipFile(input_zip, 'r') as zin: -+ with zipfile.ZipFile(zip_filename, 'r') as zin: - if not force and len(set(zin.namelist()).intersection(entries)) == 0: - raise ExternalError( - "Failed to delete zip entries, name not matched: %s" % entries) - -- fd, new_zipfile = tempfile.mkstemp(dir=os.path.dirname(input_zip)) -+ fd, new_zipfile = tempfile.mkstemp(dir=os.path.dirname(zip_filename)) - os.close(fd) -- cmd = ["zip2zip", "-i", input_zip, "-o", new_zipfile] -+ cmd = ["zip2zip", "-i", zip_filename, "-o", new_zipfile] - for entry in entries: - cmd.append("-x") - cmd.append(entry) - RunAndCheckOutput(cmd) -- os.replace(new_zipfile, output_zip) - -- --def ZipDelete(zip_filename, entries, force=False): -- """Deletes entries from a ZIP file. -- -- Args: -- zip_filename: The name of the ZIP file. -- entries: The name of the entry, or the list of names to be deleted. -- """ -- if isinstance(entries, str): -- entries = [entries] -- # If list is empty, nothing to do -- if not entries: -- return -- -- ZipExclude(zip_filename, zip_filename, entries, force) -+ os.replace(new_zipfile, zip_filename) - - - def ZipClose(zip_file): -@@ -3787,11 +3706,14 @@ def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img, - output_sink(recovery_img_path, recovery_img.data) - - else: -+ system_root_image = info_dict.get("system_root_image") == "true" - include_recovery_dtbo = info_dict.get("include_recovery_dtbo") == "true" - include_recovery_acpio = info_dict.get("include_recovery_acpio") == "true" - path = os.path.join(input_dir, recovery_resource_dat_path) -- # Use bsdiff to handle mismatching entries (Bug: 72731506) -- if include_recovery_dtbo or include_recovery_acpio: -+ # With system-root-image, boot and recovery images will have mismatching -+ # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff -+ # to handle such a case. -+ if system_root_image or include_recovery_dtbo or include_recovery_acpio: - diff_program = ["bsdiff"] - bonus_args = "" - assert not os.path.exists(path) -@@ -4183,18 +4105,7 @@ def IsSparseImage(filepath): - return fp.read(4) == b'\x3A\xFF\x26\xED' - - --def UnsparseImage(filepath, target_path=None): -- if not IsSparseImage(filepath): -- return -- if target_path is None: -- tmp_img = MakeTempFile(suffix=".img") -- RunAndCheckOutput(["simg2img", filepath, tmp_img]) -- os.rename(tmp_img, filepath) -- else: -- RunAndCheckOutput(["simg2img", filepath, target_path]) -- -- --def ParseUpdateEngineConfig(path: str): -+def ParseUpdateEngineConfig(path): - """Parse the update_engine config stored in file `path` - Args - path: Path to update_engine_config.txt file in target_files -@@ -4210,9 +4121,9 @@ def ParseUpdateEngineConfig(path: str): - major = re.search(r"PAYLOAD_MAJOR_VERSION=(\d+)", data) - if not major: - raise ValueError( -- f"{path} is an invalid update_engine config, missing PAYLOAD_MAJOR_VERSION {data}") -+ "{path} is an invalid update_engine config, missing PAYLOAD_MAJOR_VERSION {data}" + path) - minor = re.search(r"PAYLOAD_MINOR_VERSION=(\d+)", data) - if not minor: - raise ValueError( -- f"{path} is an invalid update_engine config, missing PAYLOAD_MINOR_VERSION {data}") -+ "{path} is an invalid update_engine config, missing PAYLOAD_MINOR_VERSION {data}" + path) - return (int(major.group(1)), int(minor.group(1))) -diff --git a/tools/releasetools/merge/merge_target_files.py b/tools/releasetools/merge/merge_target_files.py -index fdba927db9..ba2b14f014 100755 ---- a/tools/releasetools/merge/merge_target_files.py -+++ b/tools/releasetools/merge/merge_target_files.py -@@ -46,10 +46,6 @@ Usage: merge_target_files [args] - The optional path to a newline-separated config file of items that - are extracted as-is from the vendor target files package. - -- --boot-image-dir-path -- The input boot image directory path. This path contains IMAGES/boot.img -- file. -- - --output-target-files output-target-files-package - If provided, the output merged target files package. Also a zip archive. - -@@ -94,14 +90,6 @@ Usage: merge_target_files [args] - --keep-tmp - Keep tempoary files for debugging purposes. - -- --avb-resolve-rollback-index-location-conflict -- If provided, resolve the conflict AVB rollback index location when -- necessary. -- -- --allow-partial-ab -- If provided, allow merging non-AB framework target files with AB vendor -- target files, which means that only the vendor has AB partitions. -- - The following only apply when using the VSDK to perform dexopt on vendor apps: - - --framework-dexpreopt-config -@@ -144,7 +132,6 @@ OPTIONS.framework_item_list = [] - OPTIONS.framework_misc_info_keys = [] - OPTIONS.vendor_target_files = None - OPTIONS.vendor_item_list = [] --OPTIONS.boot_image_dir_path = None - OPTIONS.output_target_files = None - OPTIONS.output_dir = None - OPTIONS.output_item_list = [] -@@ -157,8 +144,6 @@ OPTIONS.allow_duplicate_apkapex_keys = False - OPTIONS.vendor_otatools = None - OPTIONS.rebuild_sepolicy = False - OPTIONS.keep_tmp = False --OPTIONS.avb_resolve_rollback_index_location_conflict = False --OPTIONS.allow_partial_ab = False - OPTIONS.framework_dexpreopt_config = None - OPTIONS.framework_dexpreopt_tools = None - OPTIONS.vendor_dexpreopt_config = None -@@ -180,24 +165,17 @@ def remove_file_if_exists(file_name): - pass - - --def include_extra_in_list(item_list): -- """ -- 1. Include all `META/*` files in the item list. -+def include_meta_in_list(item_list): -+ """Include all `META/*` files in the item list. - - To ensure that `AddImagesToTargetFiles` can still be used with vendor item - list that do not specify all of the required META/ files, those files should - be included by default. This preserves the backward compatibility of - `rebuild_image_with_sepolicy`. -- -- 2. Include `SYSTEM/build.prop` file in the item list. -- -- To ensure that `AddImagesToTargetFiles` for GRF vendor images, can still -- access SYSTEM/build.prop to pass GetPartitionFingerprint check in BuildInfo -- constructor. - """ - if not item_list: - return None -- return list(item_list) + ['META/*'] + ['SYSTEM/build.prop'] -+ return list(item_list) + ['META/*'] - - - def create_merged_package(temp_dir): -@@ -220,19 +198,11 @@ def create_merged_package(temp_dir): - output_dir=output_target_files_temp_dir, - item_list=OPTIONS.vendor_item_list) - -- if OPTIONS.boot_image_dir_path: -- merge_utils.CollectTargetFiles( -- input_zipfile_or_dir=OPTIONS.boot_image_dir_path, -- output_dir=output_target_files_temp_dir, -- item_list=['IMAGES/boot.img']) -- - # Perform special case processing on META/* items. - # After this function completes successfully, all the files we need to create - # the output target files package are in place. - merge_meta.MergeMetaFiles( -- temp_dir=temp_dir, -- merged_dir=output_target_files_temp_dir, -- framework_partitions=OPTIONS.framework_partition_set) -+ temp_dir=temp_dir, merged_dir=output_target_files_temp_dir) - - merge_dexopt.MergeDexopt( - temp_dir=temp_dir, output_target_files_dir=output_target_files_temp_dir) -@@ -251,8 +221,6 @@ def generate_missing_images(target_files_dir): - ] - if OPTIONS.rebuild_recovery: - add_img_args.append('--rebuild_recovery') -- if OPTIONS.avb_resolve_rollback_index_location_conflict: -- add_img_args.append('--avb_resolve_rollback_index_location_conflict') - add_img_args.append(target_files_dir) - - add_img_to_target_files.main(add_img_args) -@@ -321,7 +289,7 @@ def rebuild_image_with_sepolicy(target_files_dir): - merge_utils.CollectTargetFiles( - input_zipfile_or_dir=OPTIONS.vendor_target_files, - output_dir=vendor_target_files_dir, -- item_list=include_extra_in_list(OPTIONS.vendor_item_list)) -+ item_list=include_meta_in_list(OPTIONS.vendor_item_list)) - - # Copy the partition contents from the merged target-files archive to the - # vendor target-files archive. -@@ -555,8 +523,6 @@ def main(): - OPTIONS.vendor_item_list = a - elif o == '--vendor-item-list': - OPTIONS.vendor_item_list = a -- elif o == '--boot-image-dir-path': -- OPTIONS.boot_image_dir_path = a - elif o == '--output-target-files': - OPTIONS.output_target_files = a - elif o == '--output-dir': -@@ -579,10 +545,6 @@ def main(): - OPTIONS.rebuild_sepolicy = True - elif o == '--keep-tmp': - OPTIONS.keep_tmp = True -- elif o == '--avb-resolve-rollback-index-location-conflict': -- OPTIONS.avb_resolve_rollback_index_location_conflict = True -- elif o == '--allow-partial-ab': -- OPTIONS.allow_partial_ab = True - elif o == '--framework-dexpreopt-config': - OPTIONS.framework_dexpreopt_config = a - elif o == '--framework-dexpreopt-tools': -@@ -607,7 +569,6 @@ def main(): - 'vendor-target-files=', - 'other-item-list=', - 'vendor-item-list=', -- 'boot-image-dir-path=', - 'output-target-files=', - 'output-dir=', - 'output-item-list=', -@@ -623,8 +584,6 @@ def main(): - 'vendor-otatools=', - 'rebuild-sepolicy', - 'keep-tmp', -- 'avb-resolve-rollback-index-location-conflict', -- 'allow-partial-ab', - ], - extra_option_handler=option_handler) - -diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py -index 89933a00fc..2dfd8c714a 100644 ---- a/tools/releasetools/test_common.py -+++ b/tools/releasetools/test_common.py -@@ -15,13 +15,14 @@ - # - - import copy -+import json - import os - import subprocess - import tempfile -+import time - import unittest - import zipfile - from hashlib import sha1 --from typing import BinaryIO - - import common - import test_utils -@@ -35,24 +36,14 @@ MiB = 1024 * KiB - GiB = 1024 * MiB - - --def get_2gb_file(): -+def get_2gb_string(): - size = int(2 * GiB + 1) - block_size = 4 * KiB - step_size = 4 * MiB -- tmpfile = tempfile.NamedTemporaryFile() -- tmpfile.truncate(size) -+ # Generate a long string with holes, e.g. 'xyz\x00abc\x00...'. - for _ in range(0, size, step_size): -- tmpfile.write(os.urandom(block_size)) -- tmpfile.seek(step_size - block_size, os.SEEK_CUR) -- return tmpfile -- -- --def hash_file(filename): -- sha1_hash = sha1() -- with open(filename, "rb") as fp: -- for data in iter(lambda: fp.read(4*MiB), b''): -- sha1_hash.update(data) -- return sha1_hash -+ yield os.urandom(block_size) -+ yield b'\0' * (step_size - block_size) - - - class BuildInfoTest(test_utils.ReleaseToolsTestCase): -@@ -231,17 +222,17 @@ class BuildInfoTest(test_utils.ReleaseToolsTestCase): - info_dict = copy.deepcopy(self.TEST_INFO_FINGERPRINT_DICT) - build_info = common.BuildInfo(info_dict) - self.assertEqual( -- 'product-brand/product-name/product-device:version-release/build-id/' -- 'version-incremental:build-type/build-tags', build_info.fingerprint) -+ 'product-brand/product-name/product-device:version-release/build-id/' -+ 'version-incremental:build-type/build-tags', build_info.fingerprint) - - build_props = info_dict['build.prop'].build_props - del build_props['ro.build.id'] - build_props['ro.build.legacy.id'] = 'legacy-build-id' - build_info = common.BuildInfo(info_dict, use_legacy_id=True) - self.assertEqual( -- 'product-brand/product-name/product-device:version-release/' -- 'legacy-build-id/version-incremental:build-type/build-tags', -- build_info.fingerprint) -+ 'product-brand/product-name/product-device:version-release/' -+ 'legacy-build-id/version-incremental:build-type/build-tags', -+ build_info.fingerprint) - - self.assertRaises(common.ExternalError, common.BuildInfo, info_dict, None, - False) -@@ -250,9 +241,9 @@ class BuildInfoTest(test_utils.ReleaseToolsTestCase): - info_dict['vbmeta_digest'] = 'abcde12345' - build_info = common.BuildInfo(info_dict, use_legacy_id=False) - self.assertEqual( -- 'product-brand/product-name/product-device:version-release/' -- 'legacy-build-id.abcde123/version-incremental:build-type/build-tags', -- build_info.fingerprint) -+ 'product-brand/product-name/product-device:version-release/' -+ 'legacy-build-id.abcde123/version-incremental:build-type/build-tags', -+ build_info.fingerprint) - - def test___getitem__(self): - target_info = common.BuildInfo(self.TEST_INFO_DICT, None) -@@ -385,7 +376,7 @@ class BuildInfoTest(test_utils.ReleaseToolsTestCase): - info_dict['build.prop'].build_props[ - 'ro.product.property_source_order'] = 'bad-source' - with self.assertRaisesRegexp(common.ExternalError, -- 'Invalid ro.product.property_source_order'): -+ 'Invalid ro.product.property_source_order'): - info = common.BuildInfo(info_dict, None) - info.GetBuildProp('ro.product.device') - -@@ -438,13 +429,6 @@ class CommonZipTest(test_utils.ReleaseToolsTestCase): - self.assertIsNone(zip_file.testzip()) - - def _test_ZipWrite(self, contents, extra_zipwrite_args=None): -- with tempfile.NamedTemporaryFile() as test_file: -- test_file_name = test_file.name -- for data in contents: -- test_file.write(bytes(data)) -- return self._test_ZipWriteFile(test_file_name, extra_zipwrite_args) -- -- def _test_ZipWriteFile(self, test_file_name, extra_zipwrite_args=None): - extra_zipwrite_args = dict(extra_zipwrite_args or {}) - - test_file = tempfile.NamedTemporaryFile(delete=False) -@@ -457,12 +441,17 @@ class CommonZipTest(test_utils.ReleaseToolsTestCase): - arcname = extra_zipwrite_args.get("arcname", test_file_name) - if arcname[0] == "/": - arcname = arcname[1:] -- sha1_hash = hash_file(test_file_name) - - zip_file.close() - zip_file = zipfile.ZipFile(zip_file_name, "w", allowZip64=True) - - try: -+ sha1_hash = sha1() -+ for data in contents: -+ sha1_hash.update(bytes(data)) -+ test_file.write(bytes(data)) -+ test_file.close() -+ - expected_mode = extra_zipwrite_args.get("perms", 0o644) - expected_compress_type = extra_zipwrite_args.get("compress_type", - zipfile.ZIP_STORED) -@@ -478,6 +467,7 @@ class CommonZipTest(test_utils.ReleaseToolsTestCase): - test_file_name, expected_stat, expected_mode, - expected_compress_type) - finally: -+ os.remove(test_file_name) - os.remove(zip_file_name) - - def _test_ZipWriteStr(self, zinfo_or_arcname, contents, extra_args=None): -@@ -512,13 +502,14 @@ class CommonZipTest(test_utils.ReleaseToolsTestCase): - finally: - os.remove(zip_file_name) - -- def _test_ZipWriteStr_large_file(self, large_file: BinaryIO, small, extra_args=None): -+ def _test_ZipWriteStr_large_file(self, large, small, extra_args=None): - extra_args = dict(extra_args or {}) - - zip_file = tempfile.NamedTemporaryFile(delete=False) - zip_file_name = zip_file.name - -- test_file_name = large_file.name -+ test_file = tempfile.NamedTemporaryFile(delete=False) -+ test_file_name = test_file.name - - arcname_large = test_file_name - arcname_small = "bar" -@@ -531,7 +522,11 @@ class CommonZipTest(test_utils.ReleaseToolsTestCase): - zip_file = zipfile.ZipFile(zip_file_name, "w", allowZip64=True) - - try: -- sha1_hash = hash_file(test_file_name) -+ sha1_hash = sha1() -+ for data in large: -+ sha1_hash.update(data) -+ test_file.write(data) -+ test_file.close() - - # Arbitrary timestamp, just to make sure common.ZipWrite() restores - # the timestamp after writing. -@@ -556,6 +551,7 @@ class CommonZipTest(test_utils.ReleaseToolsTestCase): - expected_compress_type=expected_compress_type) - finally: - os.remove(zip_file_name) -+ os.remove(test_file_name) - - def _test_reset_ZIP64_LIMIT(self, func, *args): - default_limit = (1 << 31) - 1 -@@ -581,10 +577,10 @@ class CommonZipTest(test_utils.ReleaseToolsTestCase): - }) - - def test_ZipWrite_large_file(self): -- with get_2gb_file() as tmpfile: -- self._test_ZipWriteFile(tmpfile.name, { -- "compress_type": zipfile.ZIP_DEFLATED, -- }) -+ file_contents = get_2gb_string() -+ self._test_ZipWrite(file_contents, { -+ "compress_type": zipfile.ZIP_DEFLATED, -+ }) - - def test_ZipWrite_resets_ZIP64_LIMIT(self): - self._test_reset_ZIP64_LIMIT(self._test_ZipWrite, "") -@@ -631,11 +627,11 @@ class CommonZipTest(test_utils.ReleaseToolsTestCase): - # zipfile.writestr() doesn't work when the str size is over 2GiB even with - # the workaround. We will only test the case of writing a string into a - # large archive. -+ long_string = get_2gb_string() - short_string = os.urandom(1024) -- with get_2gb_file() as large_file: -- self._test_ZipWriteStr_large_file(large_file, short_string, { -- "compress_type": zipfile.ZIP_DEFLATED, -- }) -+ self._test_ZipWriteStr_large_file(long_string, short_string, { -+ "compress_type": zipfile.ZIP_DEFLATED, -+ }) - - def test_ZipWriteStr_resets_ZIP64_LIMIT(self): - self._test_reset_ZIP64_LIMIT(self._test_ZipWriteStr, 'foo', b'') -@@ -825,9 +821,9 @@ class CommonApkUtilsTest(test_utils.ReleaseToolsTestCase): - ) - - APKCERTS_CERTMAP1 = { -- 'RecoveryLocalizer.apk': 'certs/devkey', -- 'Settings.apk': 'build/make/target/product/security/platform', -- 'TV.apk': 'PRESIGNED', -+ 'RecoveryLocalizer.apk' : 'certs/devkey', -+ 'Settings.apk' : 'build/make/target/product/security/platform', -+ 'TV.apk' : 'PRESIGNED', - } - - APKCERTS_TXT2 = ( -@@ -842,10 +838,10 @@ class CommonApkUtilsTest(test_utils.ReleaseToolsTestCase): - ) - - APKCERTS_CERTMAP2 = { -- 'Compressed1.apk': 'certs/compressed1', -- 'Compressed2a.apk': 'certs/compressed2', -- 'Compressed2b.apk': 'certs/compressed2', -- 'Compressed3.apk': 'certs/compressed3', -+ 'Compressed1.apk' : 'certs/compressed1', -+ 'Compressed2a.apk' : 'certs/compressed2', -+ 'Compressed2b.apk' : 'certs/compressed2', -+ 'Compressed3.apk' : 'certs/compressed3', - } - - APKCERTS_TXT3 = ( -@@ -854,7 +850,7 @@ class CommonApkUtilsTest(test_utils.ReleaseToolsTestCase): - ) - - APKCERTS_CERTMAP3 = { -- 'Compressed4.apk': 'certs/compressed4', -+ 'Compressed4.apk' : 'certs/compressed4', - } - - # Test parsing with no optional fields, both optional fields, and only the -@@ -871,9 +867,9 @@ class CommonApkUtilsTest(test_utils.ReleaseToolsTestCase): - ) - - APKCERTS_CERTMAP4 = { -- 'RecoveryLocalizer.apk': 'certs/devkey', -- 'Settings.apk': 'build/make/target/product/security/platform', -- 'TV.apk': 'PRESIGNED', -+ 'RecoveryLocalizer.apk' : 'certs/devkey', -+ 'Settings.apk' : 'build/make/target/product/security/platform', -+ 'TV.apk' : 'PRESIGNED', - } - - def setUp(self): -@@ -977,7 +973,7 @@ class CommonApkUtilsTest(test_utils.ReleaseToolsTestCase): - extracted_from_privkey = common.ExtractAvbPublicKey('avbtool', privkey) - extracted_from_pubkey = common.ExtractAvbPublicKey('avbtool', pubkey) - with open(extracted_from_privkey, 'rb') as privkey_fp, \ -- open(extracted_from_pubkey, 'rb') as pubkey_fp: -+ open(extracted_from_pubkey, 'rb') as pubkey_fp: - self.assertEqual(privkey_fp.read(), pubkey_fp.read()) - - def test_ParseCertificate(self): -@@ -1241,8 +1237,7 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - self.assertEqual( - '1-5 9-10', - sparse_image.file_map['//system/file1'].extra['text_str']) -- self.assertTrue( -- sparse_image.file_map['//system/file2'].extra['incomplete']) -+ self.assertTrue(sparse_image.file_map['//system/file2'].extra['incomplete']) - self.assertTrue( - sparse_image.file_map['/system/app/file3'].extra['incomplete']) - -@@ -1299,11 +1294,11 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - 'avb_system_key_path': pubkey, - 'avb_system_rollback_index_location': 2, - } -- chained_partition_args = common.GetAvbChainedPartitionArg( -- 'system', info_dict) -- self.assertEqual('system', chained_partition_args.partition) -- self.assertEqual(2, chained_partition_args.rollback_index_location) -- self.assertTrue(os.path.exists(chained_partition_args.pubkey_path)) -+ args = common.GetAvbChainedPartitionArg('system', info_dict).split(':') -+ self.assertEqual(3, len(args)) -+ self.assertEqual('system', args[0]) -+ self.assertEqual('2', args[1]) -+ self.assertTrue(os.path.exists(args[2])) - - @test_utils.SkipIfExternalToolsUnavailable() - def test_GetAvbChainedPartitionArg_withPrivateKey(self): -@@ -1313,11 +1308,11 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - 'avb_product_key_path': key, - 'avb_product_rollback_index_location': 2, - } -- chained_partition_args = common.GetAvbChainedPartitionArg( -- 'product', info_dict) -- self.assertEqual('product', chained_partition_args.partition) -- self.assertEqual(2, chained_partition_args.rollback_index_location) -- self.assertTrue(os.path.exists(chained_partition_args.pubkey_path)) -+ args = common.GetAvbChainedPartitionArg('product', info_dict).split(':') -+ self.assertEqual(3, len(args)) -+ self.assertEqual('product', args[0]) -+ self.assertEqual('2', args[1]) -+ self.assertTrue(os.path.exists(args[2])) - - @test_utils.SkipIfExternalToolsUnavailable() - def test_GetAvbChainedPartitionArg_withSpecifiedKey(self): -@@ -1327,11 +1322,12 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - 'avb_system_rollback_index_location': 2, - } - pubkey = os.path.join(self.testdata_dir, 'testkey.pubkey.pem') -- chained_partition_args = common.GetAvbChainedPartitionArg( -- 'system', info_dict, pubkey) -- self.assertEqual('system', chained_partition_args.partition) -- self.assertEqual(2, chained_partition_args.rollback_index_location) -- self.assertTrue(os.path.exists(chained_partition_args.pubkey_path)) -+ args = common.GetAvbChainedPartitionArg( -+ 'system', info_dict, pubkey).split(':') -+ self.assertEqual(3, len(args)) -+ self.assertEqual('system', args[0]) -+ self.assertEqual('2', args[1]) -+ self.assertTrue(os.path.exists(args[2])) - - @test_utils.SkipIfExternalToolsUnavailable() - def test_GetAvbChainedPartitionArg_invalidKey(self): -@@ -1348,7 +1344,8 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - INFO_DICT_DEFAULT = { - 'recovery_api_version': 3, - 'fstab_version': 2, -- 'no_recovery': 'true', -+ 'system_root_image': 'true', -+ 'no_recovery' : 'true', - 'recovery_as_boot': 'true', - } - -@@ -1376,8 +1373,14 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - info_values = ''.join( - ['{}={}\n'.format(k, v) for k, v in sorted(info_dict.items())]) - common.ZipWriteStr(target_files_zip, 'META/misc_info.txt', info_values) -- common.ZipWriteStr(target_files_zip, fstab_path, -- "/dev/block/system /system ext4 ro,barrier=1 defaults") -+ -+ FSTAB_TEMPLATE = "/dev/block/system {} ext4 ro,barrier=1 defaults" -+ if info_dict.get('system_root_image') == 'true': -+ fstab_values = FSTAB_TEMPLATE.format('/') -+ else: -+ fstab_values = FSTAB_TEMPLATE.format('/system') -+ common.ZipWriteStr(target_files_zip, fstab_path, fstab_values) -+ - common.ZipWriteStr( - target_files_zip, 'META/file_contexts', 'file-contexts') - return target_files -@@ -1390,6 +1393,7 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - loaded_dict = common.LoadInfoDict(target_files_zip) - self.assertEqual(3, loaded_dict['recovery_api_version']) - self.assertEqual(2, loaded_dict['fstab_version']) -+ self.assertIn('/', loaded_dict['fstab']) - self.assertIn('/system', loaded_dict['fstab']) - - def test_LoadInfoDict_legacyRecoveryFstabPath(self): -@@ -1400,6 +1404,7 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - loaded_dict = common.LoadInfoDict(target_files_zip) - self.assertEqual(3, loaded_dict['recovery_api_version']) - self.assertEqual(2, loaded_dict['fstab_version']) -+ self.assertIn('/', loaded_dict['fstab']) - self.assertIn('/system', loaded_dict['fstab']) - - @test_utils.SkipIfExternalToolsUnavailable() -@@ -1411,6 +1416,7 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - loaded_dict = common.LoadInfoDict(unzipped) - self.assertEqual(3, loaded_dict['recovery_api_version']) - self.assertEqual(2, loaded_dict['fstab_version']) -+ self.assertIn('/', loaded_dict['fstab']) - self.assertIn('/system', loaded_dict['fstab']) - - @test_utils.SkipIfExternalToolsUnavailable() -@@ -1422,11 +1428,15 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - loaded_dict = common.LoadInfoDict(unzipped) - self.assertEqual(3, loaded_dict['recovery_api_version']) - self.assertEqual(2, loaded_dict['fstab_version']) -+ self.assertIn('/', loaded_dict['fstab']) - self.assertIn('/system', loaded_dict['fstab']) - -- def test_LoadInfoDict_recoveryAsBootFalse(self): -+ def test_LoadInfoDict_systemRootImageFalse(self): -+ # Devices not using system-as-root nor recovery-as-boot. Non-A/B devices -+ # launched prior to P will likely have this config. - info_dict = copy.copy(self.INFO_DICT_DEFAULT) - del info_dict['no_recovery'] -+ del info_dict['system_root_image'] - del info_dict['recovery_as_boot'] - target_files = self._test_LoadInfoDict_createTargetFiles( - info_dict, -@@ -1438,6 +1448,22 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - self.assertNotIn('/', loaded_dict['fstab']) - self.assertIn('/system', loaded_dict['fstab']) - -+ def test_LoadInfoDict_recoveryAsBootFalse(self): -+ # Devices using system-as-root, but with standalone recovery image. Non-A/B -+ # devices launched since P will likely have this config. -+ info_dict = copy.copy(self.INFO_DICT_DEFAULT) -+ del info_dict['no_recovery'] -+ del info_dict['recovery_as_boot'] -+ target_files = self._test_LoadInfoDict_createTargetFiles( -+ info_dict, -+ 'RECOVERY/RAMDISK/system/etc/recovery.fstab') -+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as target_files_zip: -+ loaded_dict = common.LoadInfoDict(target_files_zip) -+ self.assertEqual(3, loaded_dict['recovery_api_version']) -+ self.assertEqual(2, loaded_dict['fstab_version']) -+ self.assertIn('/', loaded_dict['fstab']) -+ self.assertIn('/system', loaded_dict['fstab']) -+ - def test_LoadInfoDict_noRecoveryTrue(self): - # Device doesn't have a recovery partition at all. - info_dict = copy.copy(self.INFO_DICT_DEFAULT) -@@ -1469,6 +1495,7 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - loaded_dict = common.LoadInfoDict(unzipped, True) - self.assertEqual(3, loaded_dict['recovery_api_version']) - self.assertEqual(2, loaded_dict['fstab_version']) -+ self.assertIn('/', loaded_dict['fstab']) - self.assertIn('/system', loaded_dict['fstab']) - self.assertEqual( - os.path.join(unzipped, 'ROOT'), loaded_dict['root_dir']) -@@ -1515,7 +1542,6 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - 'super_group_a_group_size': '1000', - 'super_group_b_partition_list': 'product', - 'super_group_b_group_size': '2000', -- 'vabc_cow_version': '2', - } - self.assertEqual(merged_dict, expected_merged_dict) - -@@ -1526,7 +1552,6 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - 'dynamic_partition_list': 'system', - 'super_group_a_partition_list': 'system', - 'super_group_a_group_size': '5000', -- 'vabc_cow_version': '3', - } - vendor_dict = { - 'use_dynamic_partitions': 'true', -@@ -1548,7 +1573,6 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - 'super_group_a_group_size': '1000', - 'super_group_b_partition_list': 'product', - 'super_group_b_group_size': '2000', -- 'vabc_cow_version': '2', - } - self.assertEqual(merged_dict, expected_merged_dict) - -@@ -1556,8 +1580,7 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - info_dict = {} - cmd = common.GetAvbPartitionArg('system', '/path/to/system.img', info_dict) - self.assertEqual( -- [common.AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG, '/path/to/system.img'], -- cmd) -+ ['--include_descriptors_from_image', '/path/to/system.img'], cmd) - - @test_utils.SkipIfExternalToolsUnavailable() - def test_AppendVBMetaArgsForPartition_vendorAsChainedPartition(self): -@@ -1570,11 +1593,12 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - } - cmd = common.GetAvbPartitionArg('vendor', '/path/to/vendor.img', info_dict) - self.assertEqual(2, len(cmd)) -- self.assertEqual(common.AVB_ARG_NAME_CHAIN_PARTITION, cmd[0]) -- chained_partition_args = cmd[1] -- self.assertEqual('vendor', chained_partition_args.partition) -- self.assertEqual(5, chained_partition_args.rollback_index_location) -- self.assertTrue(os.path.exists(chained_partition_args.pubkey_path)) -+ self.assertEqual('--chain_partition', cmd[0]) -+ chained_partition_args = cmd[1].split(':') -+ self.assertEqual(3, len(chained_partition_args)) -+ self.assertEqual('vendor', chained_partition_args[0]) -+ self.assertEqual('5', chained_partition_args[1]) -+ self.assertTrue(os.path.exists(chained_partition_args[2])) - - @test_utils.SkipIfExternalToolsUnavailable() - def test_AppendVBMetaArgsForPartition_recoveryAsChainedPartition_nonAb(self): -@@ -1602,12 +1626,45 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase): - cmd = common.GetAvbPartitionArg( - 'recovery', '/path/to/recovery.img', info_dict) - self.assertEqual(2, len(cmd)) -- self.assertEqual(common.AVB_ARG_NAME_CHAIN_PARTITION, cmd[0]) -- chained_partition_args = cmd[1] -- self.assertEqual('recovery', chained_partition_args.partition) -- self.assertEqual(3, chained_partition_args.rollback_index_location) -- self.assertTrue(os.path.exists(chained_partition_args.pubkey_path)) -- -+ self.assertEqual('--chain_partition', cmd[0]) -+ chained_partition_args = cmd[1].split(':') -+ self.assertEqual(3, len(chained_partition_args)) -+ self.assertEqual('recovery', chained_partition_args[0]) -+ self.assertEqual('3', chained_partition_args[1]) -+ self.assertTrue(os.path.exists(chained_partition_args[2])) -+ -+ def test_GenerateGkiCertificate_KeyPathNotFound(self): -+ pubkey = os.path.join(self.testdata_dir, 'no_testkey_gki.pem') -+ self.assertFalse(os.path.exists(pubkey)) -+ -+ common.OPTIONS.info_dict = { -+ 'gki_signing_key_path': pubkey, -+ 'gki_signing_algorithm': 'SHA256_RSA4096', -+ 'gki_signing_signature_args': '--prop foo:bar', -+ } -+ test_file = tempfile.NamedTemporaryFile() -+ self.assertRaises(common.ExternalError, common._GenerateGkiCertificate, -+ test_file.name, 'generic_kernel') -+ -+ def test_GenerateGkiCertificate_SearchKeyPathNotFound(self): -+ pubkey = 'no_testkey_gki.pem' -+ self.assertFalse(os.path.exists(pubkey)) -+ -+ # Tests it should raise ExternalError if no key found under -+ # OPTIONS.search_path. -+ search_path_dir = common.MakeTempDir() -+ search_pubkey = os.path.join(search_path_dir, pubkey) -+ self.assertFalse(os.path.exists(search_pubkey)) -+ -+ common.OPTIONS.search_path = search_path_dir -+ common.OPTIONS.info_dict = { -+ 'gki_signing_key_path': pubkey, -+ 'gki_signing_algorithm': 'SHA256_RSA4096', -+ 'gki_signing_signature_args': '--prop foo:bar', -+ } -+ test_file = tempfile.NamedTemporaryFile() -+ self.assertRaises(common.ExternalError, common._GenerateGkiCertificate, -+ test_file.name, 'generic_kernel') - - class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase): - """Checks the format of install-recovery.sh. -@@ -1618,7 +1675,7 @@ class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase): - def setUp(self): - self._tempdir = common.MakeTempDir() - # Create a fake dict that contains the fstab info for boot&recovery. -- self._info = {"fstab": {}} -+ self._info = {"fstab" : {}} - fake_fstab = [ - "/dev/soc.0/by-name/boot /boot emmc defaults defaults", - "/dev/soc.0/by-name/recovery /recovery emmc defaults defaults"] -@@ -1965,11 +2022,11 @@ class PartitionBuildPropsTest(test_utils.ReleaseToolsTestCase): - input_zip, 'odm', placeholder_values) - - self.assertEqual({ -- 'ro.odm.build.date.utc': '1578430045', -- 'ro.odm.build.fingerprint': -- 'google/coral/coral:10/RP1A.200325.001/6337676:user/dev-keys', -- 'ro.product.odm.device': 'coral', -- 'ro.product.odm.name': 'product1', -+ 'ro.odm.build.date.utc': '1578430045', -+ 'ro.odm.build.fingerprint': -+ 'google/coral/coral:10/RP1A.200325.001/6337676:user/dev-keys', -+ 'ro.product.odm.device': 'coral', -+ 'ro.product.odm.name': 'product1', - }, partition_props.build_props) - - with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip: -@@ -2152,8 +2209,8 @@ class PartitionBuildPropsTest(test_utils.ReleaseToolsTestCase): - - copied_props = copy.deepcopy(partition_props) - self.assertEqual({ -- 'ro.odm.build.date.utc': '1578430045', -- 'ro.odm.build.fingerprint': -- 'google/coral/coral:10/RP1A.200325.001/6337676:user/dev-keys', -- 'ro.product.odm.device': 'coral', -+ 'ro.odm.build.date.utc': '1578430045', -+ 'ro.odm.build.fingerprint': -+ 'google/coral/coral:10/RP1A.200325.001/6337676:user/dev-keys', -+ 'ro.product.odm.device': 'coral', - }, copied_props.build_props) -diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py -index 88fd8929e5..beb9e75dfd 100755 ---- a/tools/releasetools/validate_target_files.py -+++ b/tools/releasetools/validate_target_files.py -@@ -132,7 +132,7 @@ def ValidateFileConsistency(input_zip, input_tmp, info_dict): - return - - # Verify IMAGES/system.img if applicable. -- # Some targets are system.img-less. -+ # Some targets, e.g., gki_arm64, gki_x86_64, etc., are system.img-less. - if 'IMAGES/system.img' in input_zip.namelist(): - CheckAllFiles('system') - -@@ -361,15 +361,18 @@ def ValidateVerifiedBootImages(input_tmp, info_dict, options): - "Mismatching mincrypt verity key files" - logging.info('Verified the content of /verity_key') - -- verity_key_ramdisk = os.path.join( -- input_tmp, 'BOOT', 'RAMDISK', 'verity_key') -- assert os.path.exists( -- verity_key_ramdisk), 'Missing verity_key in ramdisk' -+ # For devices with a separate ramdisk (i.e. non-system-as-root), there must -+ # be a copy in ramdisk. -+ if info_dict.get("system_root_image") != "true": -+ verity_key_ramdisk = os.path.join( -+ input_tmp, 'BOOT', 'RAMDISK', 'verity_key') -+ assert os.path.exists( -+ verity_key_ramdisk), 'Missing verity_key in ramdisk' - -- assert filecmp.cmp( -- verity_key_mincrypt, verity_key_ramdisk, shallow=False), \ -- 'Mismatching verity_key files in root and ramdisk' -- logging.info('Verified the content of /verity_key in ramdisk') -+ assert filecmp.cmp( -+ verity_key_mincrypt, verity_key_ramdisk, shallow=False), \ -+ 'Mismatching verity_key files in root and ramdisk' -+ logging.info('Verified the content of /verity_key in ramdisk') - - # Then verify the verity signed system/vendor/product images, against the - # verity pubkey in mincrypt format. -@@ -427,8 +430,7 @@ def ValidateVerifiedBootImages(input_tmp, info_dict, options): - key_file = options.get(key_name, info_dict[key_name]) - chained_partition_arg = common.GetAvbChainedPartitionArg( - partition, info_dict, key_file) -- cmd.extend(['--expected_chain_partition', -- chained_partition_arg.to_string()]) -+ cmd.extend(['--expected_chain_partition', chained_partition_arg]) - - # Handle the boot image with a non-default name, e.g. boot-5.4.img - boot_images = info_dict.get("boot_images") --- -2.34.1 - diff --git a/aosp_diff/base_aaos/prebuilts/build-tools/01_0001-WA-Fixed-Build-Error-For-Python.patch b/aosp_diff/base_aaos/prebuilts/build-tools/01_0001-WA-Fixed-Build-Error-For-Python.patch deleted file mode 100644 index da743357cc..0000000000 --- a/aosp_diff/base_aaos/prebuilts/build-tools/01_0001-WA-Fixed-Build-Error-For-Python.patch +++ /dev/null @@ -1,25 +0,0 @@ -From f4f107df96f7ebfd4916a45e211fc73af481a3e1 Mon Sep 17 00:00:00 2001 -From: Ankit Agrawal -Date: Thu, 18 May 2023 16:32:49 +0530 -Subject: [PATCH] WA-Fixed Build Error For Python. - -Configuring python2 for now to resolve build issue. - -Tracked-On: OAM-OAM-106853 -Signed-off-by: Tanuj Tekriwal ---- - path/linux-x86/python | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/path/linux-x86/python b/path/linux-x86/python -index 94246bc..8ec5dcd 120000 ---- a/path/linux-x86/python -+++ b/path/linux-x86/python -@@ -1 +1 @@ --../../linux-x86/bin/py3-cmd -\ No newline at end of file -+../../linux-x86/bin/py2-cmd -\ No newline at end of file --- -2.17.1 - diff --git a/bsp_diff/base_aaos/device/intel/build/0002-Fixed-build-error-due-to-python3.patch b/bsp_diff/base_aaos/device/intel/build/0002-Fixed-build-error-due-to-python3.patch deleted file mode 100644 index 53cd82665a..0000000000 --- a/bsp_diff/base_aaos/device/intel/build/0002-Fixed-build-error-due-to-python3.patch +++ /dev/null @@ -1,195 +0,0 @@ -From a423ef544e72847fd682d3d7691919322722a249 Mon Sep 17 00:00:00 2001 -From: Ankit Agarwal -Date: Tue, 18 Jun 2024 11:05:36 +0530 -Subject: [PATCH] Fixed build error due to python3. - -Observed errors when compiling using python3. -Fixed python3 based issues. - -Tests: Prepared EB, there is no issue. - -Tracked-On: NA -Signed-off-by: Ankit Agarwal ---- - releasetools/flashfiles_from_target_files | 44 +++++++++++------------ - releasetools/intel_common.py | 2 +- - 2 files changed, 23 insertions(+), 23 deletions(-) - -diff --git a/releasetools/flashfiles_from_target_files b/releasetools/flashfiles_from_target_files -index 02de5ff..70ac96e 100755 ---- a/releasetools/flashfiles_from_target_files -+++ b/releasetools/flashfiles_from_target_files -@@ -97,14 +97,14 @@ class VariantIpGenerator: - - def __add_variant_flashfile(self, ip, variant): - variant_flashfile = self.flashfile + "_" + variant + ".ini" -- print "Variant flashfile = %s"%variant_flashfile -+ print("Variant flashfile = {}".format(variant_flashfile)) - # Sanity check to avoid future silent removal - eg = self.empty_groups(ip) - if eg: - raise AssertionError("Unexpected malformed section %s" % eg[0]) - - if os.path.isfile(variant_flashfile): -- print "Reading INI configuration for %s ..."%variant -+ print("Reading INI configuration for {} ...".format(variant)) - with open(variant_flashfile, "r") as f: - ip.parse(f) - self.variant_files = self.variant_files_common -@@ -125,7 +125,7 @@ class VariantIpGenerator: - # This may happen when a mixin (platform level) disables a feature, while - # local flashfile.ini (variant level) is kept and customizes this feature. - for s in self.empty_groups(ip): -- print "Removing malformed section : ", s -+ print("Removing malformed section : {}".format(s)) - ip.delete_section(s) - - def empty_groups(self, ip): -@@ -214,7 +214,7 @@ def getFromZip(zip_path, filename): - with zipfile.ZipFile(zip_path, "r") as zf: - data = zf.open(filename).read() - info = zf.getinfo(filename) -- return (common.File(filename, data), (info.external_attr >> 16L) & 0xFFFF) -+ return (common.File(filename, data), (info.external_attr >> 16) & 0xFFFF) - - def getProvdataVariants(unpack_dir): - variants = [] -@@ -250,7 +250,7 @@ def process_image(unpack_dir, dest_zip, source, target, configs, variant=None, t - if target_out in flashfile_content: - return - else: -- print "-- Adding", target_out -+ print("-- Adding {}".format(target_out)) - # Default is no special permissions - perms = None - # retrieve file from target file package based on source & target strings -@@ -310,7 +310,7 @@ def process_image_fast(product_out, flashfiles_out, source, target, variant=None - if target_out in flashfile_content: - return - -- print "-- Adding", target_out -+ print("-- Adding {}".format(target_out)) - outfile = os.path.join(flashfiles_out, target_out) - if not os.path.exists(os.path.dirname(outfile)): - os.mkdir(os.path.dirname(outfile)) -@@ -374,7 +374,7 @@ def main(argv): - - flashfile = getIntermediates(product_out, "flashfiles", "flashfiles") - else: -- print "Unzipping target-files..." -+ print("Unzipping target-files...") - unpack_dir = common.UnzipTemp(args[0]) - if OPTIONS.add_image: - input_super = os.path.join(unpack_dir, "IMAGES") -@@ -392,13 +392,13 @@ def main(argv): - - # Retrieve "generic" PFT instructions from target file package - if os.path.isfile(flashfile + ".ini"): -- print "Reading INI configuration..." -+ print("Reading INI configuration...") - with open(flashfile + ".ini", "r") as f: - ip = iniparser.IniParser() - ip.parse(f) - configs, files = flash_cmd_generator.parse_config([ip], build_type, platform) - elif os.path.isfile(flashfile + ".json") and not OPTIONS.unified_variants: -- print "Reading JSON configuration..." -+ print("Reading JSON configuration...") - with open(flashfile + ".json", "r") as f: - conf = json.loads(f.read()) - configs, files = flashxml.parse_config(conf, build_type, platform) -@@ -406,25 +406,25 @@ def main(argv): - if not OPTIONS.mv_config_default: - common.Usage(__doc__) - sys.exit(1) -- print "Reading JSON FLS configuration..." -+ print("Reading JSON FLS configuration...") - with open(flashfile + "_fls.json", "r") as f: - conf = json.loads(f.read()) - configs, files = flashflsxml.parse_config(conf, build_type, platform, OPTIONS.mv_config_default, system) - else: -- print "Exiting, Missing correct flashfile configuration for generating Flashfiles." -+ print("Exiting, Missing correct flashfile configuration for generating Flashfiles.") - sys.exit(1) - - if OPTIONS.fast: - fastff_dir = args[1] - # If mega flashfile is enabled, create multi-variant version of PFT instructions - if OPTIONS.unified_variants or OPTIONS.variants : -- print "Adding variant specific configurations to ip..." -+ print("Adding variant specific configurations to ip...") - vip = VariantIpGenerator(ip, configs, OPTIONS.variants, variant_files, flashfile) - vip.generate_variant_ip() - configs, cmd_files = flash_cmd_generator.parse_config(vip.variant_ips, build_type, platform) - cmd_files = set([i for _,i in cmd_files]) - -- print "Adding required binaries..." -+ print("Adding required binaries...") - for src, target in files: - if OPTIONS.variants: - for variant in OPTIONS.variants: -@@ -442,7 +442,7 @@ def main(argv): - src,target = file.split(":") - process_image_fast(product_out, fastff_dir, src, target, variant, variantFilename(target, variant)) - -- print "Generating JSON flash configuration files..." -+ print("Generating JSON flash configuration files...") - for fn, data in configs: - with open(os.path.join(fastff_dir,fn), 'w') as file: - file.write(data) -@@ -450,14 +450,14 @@ def main(argv): - with zipfile.ZipFile(args[1], "w", zipfile.ZIP_DEFLATED,allowZip64=True) as dest_zip: - # If mega flashfile is enabled, create multi-variant version of PFT instructions - if OPTIONS.unified_variants or OPTIONS.variants : -- print "Adding variant specific configurations to ip..." -+ print("Adding variant specific configurations to ip...") - vip = VariantIpGenerator(ip, configs, OPTIONS.variants, variant_files, flashfile) - vip.generate_variant_ip() - configs, cmd_files = flash_cmd_generator.parse_config(vip.variant_ips, build_type, platform) - cmd_files = set([i for _,i in cmd_files]) - - # Using "generic" instructions as reference, grab required files & insert into flashfile zip -- print "Adding required binaries..." -+ print("Adding required binaries...") - for src, target in files: - if OPTIONS.variants: - for variant in OPTIONS.variants: -@@ -477,20 +477,20 @@ def main(argv): - process_image(unpack_dir, dest_zip, src, target, configs, variant, variantFilename(target, variant)) - - # Write flash_cmd_generator parsed PFT flashing instructions to file & insert into flashfile zip -- print "Generating JSON flash configuration files..." -+ print("Generating JSON flash configuration files...") - for fn, data in configs: - ifile = common.File(fn, data) - ifile.AddToZip(dest_zip) -- print "All done." -+ print("All done.") - - if __name__ == '__main__': - try: - common.CloseInheritedPipes() - main(sys.argv[1:]) -- except common.ExternalError, e: -- print -- print " ERROR: %s" % (e,) -- print -+ except common.ExternalError as e: -+ print("") -+ print(" ERROR: {}".format(e)) -+ print("") - sys.exit(1) - finally: - common.Cleanup() -diff --git a/releasetools/intel_common.py b/releasetools/intel_common.py -index c2267e9..32184f6 100644 ---- a/releasetools/intel_common.py -+++ b/releasetools/intel_common.py -@@ -1,4 +1,4 @@ -- -+#!/usr/bin/env python - # - # Copyright (C) 2014 The Android Open Source Project - # --- -2.34.1 - diff --git a/bsp_diff/caas/device/intel/build/0001-Migrate-python2-scripts-to-python3.patch b/bsp_diff/caas/device/intel/build/0001-Migrate-python2-scripts-to-python3.patch deleted file mode 100644 index bc7bd61d50..0000000000 --- a/bsp_diff/caas/device/intel/build/0001-Migrate-python2-scripts-to-python3.patch +++ /dev/null @@ -1,514 +0,0 @@ -From 887e6545aa7397d1c7b46194511bdcc24cfbcc6b Mon Sep 17 00:00:00 2001 -From: Salini Venate -Date: Mon, 22 Aug 2023 09:57:49 +0530 -Subject: [PATCH] Migrate python2 scripts to python3 - -Tests Done: Boot check - -Tracked-On: OAM-122386 -Signed-off-by: Salini Venate ---- - bootloader_from_zip | 8 ++--- - create_gpt_image.py | 37 ++++++++++---------- - generate_factory_images | 34 +++++++++--------- - releasetools/bootloader_from_target_files | 6 ++-- - releasetools/flash_cmd_generator.py | 8 ++++- - releasetools/flashfiles_from_target_files | 42 ++++++++++++----------- - releasetools/intel_common.py | 4 +-- - tasks/checkvendor.py | 2 +- - 8 files changed, 75 insertions(+), 66 deletions(-) - -diff --git a/bootloader_from_zip b/bootloader_from_zip -index d24af8f..d2cbed0 100755 ---- a/bootloader_from_zip -+++ b/bootloader_from_zip -@@ -83,12 +83,12 @@ def main(argv): - sys.exit(1) - - if not OPTIONS.zipfile: -- print "--zipfile is required" -+ print ("--zipfile is required") - common.Usage(__doc__) - sys.exit(1) - - tf = tempfile.NamedTemporaryFile() -- tf.write("foo") -+ tf.write(b"foo") - tf.flush() - - extra_files = OPTIONS.bootimage -@@ -106,9 +106,9 @@ if __name__ == '__main__': - try: - common.CloseInheritedPipes() - main(sys.argv[1:]) -- except common.ExternalError, e: -+ except (common.ExternalError,e): - print -- print " ERROR: %s" % (e,) -+ print (" ERROR: %s" % (e,)) - print - sys.exit(1) - -diff --git a/create_gpt_image.py b/create_gpt_image.py -index 16ad850..a925a1e 100755 ---- a/create_gpt_image.py -+++ b/create_gpt_image.py -@@ -17,7 +17,7 @@ - Script to create a GPT/UEFI image or to show information it contains. - """ - --from sys import version_info -+from sys import version_info, exit - - if version_info < (2, 7, 3): - exit('Python version must be 2.7.3 or higher') -@@ -35,7 +35,7 @@ from collections import namedtuple - if version_info < (3, 0, 1): - from ConfigParser import SafeConfigParser, ParsingError, NoOptionError, NoSectionError - else: -- from configparser import SafeConfigParser, ParsingError, NoOptionError, NoSectionError -+ from configparser import ConfigParser, ParsingError, NoOptionError, NoSectionError - from math import floor, log - - -@@ -135,8 +135,8 @@ class MBRInfos(object): - Used to write MBR in an image file - """ - self.raw = pack(MBRInfos._FMT, self.boot, self.os_type, -- self.lba_start, self.lba_size, '', -- MBRInfos._PART_ENTRY, '', self.sign) -+ self.lba_start, self.lba_size, b'', -+ MBRInfos._PART_ENTRY.encode('utf-8'), b'', self.sign.encode('utf-8')) - img_file.seek(offset) - img_file.write(self.raw) - -@@ -284,15 +284,15 @@ class GPTHeaderInfos(object): - """ - Used to write GPT header and backup in an image file - """ -- self.raw = pack(GPTHeaderInfos._FMT, self.sign, self.rev, -- self.size, 0, 1, self.lba_backup, -- self.lba_first, self.lba_last, self.uuid, -+ self.raw = pack(GPTHeaderInfos._FMT, self.sign.encode('utf-8'), self.rev.encode('utf-8'), -+ self.size, 0, 1, int(self.lba_backup), -+ int(self.lba_first), int(self.lba_last), self.uuid, - 2, self.table_length, self.entry_size, 0) - -- backup_raw = pack(GPTHeaderInfos._FMT, self.sign, self.rev, -- self.size, 0, self.lba_backup, 1, -- self.lba_first, self.lba_last, self.uuid, -- self.lba_start, self.table_length, -+ backup_raw = pack(GPTHeaderInfos._FMT, self.sign.encode('utf-8'), self.rev.encode('utf-8'), -+ self.size, 0, int(self.lba_backup), 1, -+ int(self.lba_first), int(self.lba_last), self.uuid, -+ int(self.lba_start), self.table_length, - self.entry_size, 0) - - # writes a new GPT header -@@ -300,14 +300,14 @@ class GPTHeaderInfos(object): - img_file.write(self.raw) - - # writes zero on unused blocks of GPT header -- raw_stuffing = '\x00' * (block_size - len(self.raw)) -+ raw_stuffing = b'\x00' * (block_size - len(self.raw)) - img_file.write(raw_stuffing) - - # saves the end of the GPT header - gpt_header_end = img_file.tell() - - # writes a new GPT backup -- backup_position = self.lba_backup * block_size -+ backup_position = int(self.lba_backup) * block_size - img_file.seek(backup_position) - img_file.write(backup_raw) - -@@ -357,6 +357,7 @@ class PartTableInfos(list): - """ - # erases the partition table entries - self = [] -+ offset=int(offset) - - # writes all new partition entries in GPT header - current_offset = offset -@@ -371,7 +372,7 @@ class PartTableInfos(list): - img_file.seek(offset) - raw_entries_size = current_offset - offset - raw_entries = img_file.read(raw_entries_size) -- img_file.seek(last_usable + 1) -+ img_file.seek(int(last_usable) + 1) - img_file.write(raw_entries) - - img_file.seek(current_offset) -@@ -738,7 +739,7 @@ class TLBInfos(list): - Used to read a INI TLB partition file - """ - # sets a parser to read the INI TLB partition file -- cfg = SafeConfigParser() -+ cfg = ConfigParser(strict=False) - try: - cfg.read(self.path) - -@@ -965,7 +966,7 @@ class GPTImage(object): - img_file.seek(2 * self.block_size) - raw_table = img_file.read(self.gpt_header.table_length * - self.gpt_header.entry_size) -- img_file.seek((self.gpt_header.lba_backup - 32) * self.block_size) -+ img_file.seek((int(self.gpt_header.lba_backup) - 32) * self.block_size) - raw_backup_table = img_file.read(self.gpt_header.table_length * - self.gpt_header.entry_size) - -@@ -1035,7 +1036,7 @@ class GPTImage(object): - # no binary file used to build the partition or slot_b case - label = tlb_part.label[0:] - if bin_path == 'none' or label[len(label)-2:] == '_b': -- line = '\0' -+ line = b'\0' - img_file.seek(offset) - img_file.write(line) - bin_size = 0 -@@ -1081,7 +1082,7 @@ class GPTImage(object): - - # fill output image header with 0x00: MBR size + GPT header size + - # (partition table length * entry size) -- zero = '\x00' * (2 * self.block_size + -+ zero = b'\x00' * (2 * self.block_size + - self.gpt_header.table_length * - self.gpt_header.entry_size) - img_file.seek(0) -diff --git a/generate_factory_images b/generate_factory_images -index 4987e81..92b2479 100755 ---- a/generate_factory_images -+++ b/generate_factory_images -@@ -32,7 +32,7 @@ import os - - _FLASHALL_FILENAME = "flash-all.sh" - # chmod (octal) -rwxr-x--x --_PERMS = 0751 -+_PERMS = 0o751 - _FLASH_HEADER = """#!/bin/bash - - # Copyright 2012 The Android Open Source Project -@@ -103,23 +103,23 @@ def ConvertToDOSFormat(filename): - - - def AddFlashScript(filename, tar, commands, windows): -- print "Archiving", filename -+ print ("Archiving", filename) - tf = tempfile.NamedTemporaryFile(delete=False) - if (windows): -- tf.write(_WIN_FLASH_HEADER) -+ tf.write(_WIN_FLASH_HEADER.encode('utf-8')) - else: -- tf.write(_FLASH_HEADER) -+ tf.write(_FLASH_HEADER.encode('utf-8')) - - for c in commands: - if windows: -- tf.write(c.get_windows_command()) -+ tf.write(c.get_windows_command().encode('utf-8')) - else: -- tf.write(c.get_linux_command()) -+ tf.write(c.get_linux_command().encode('utf-8')) - - if (windows): -- tf.write(_WIN_FLASH_FOOTER) -+ tf.write(_WIN_FLASH_FOOTER.encode('utf-8')) - else: -- tf.write(_FLASH_FOOTER) -+ tf.write(_FLASH_FOOTER.encode('utf-8')) - - tf.close() - if (windows): -@@ -146,8 +146,8 @@ class CommandlineParser(ArgumentParser): - self.description = __doc__ - - def error(self, message): -- print >>stderr, "ERROR: {}".format(message) -- print >>stderr, "\n------\n" -+ print ("ERROR: {}".format(message), file=sys.stderr) -+ print ("\n------\n", file=sys.stderr) - self.print_help() - exit(2) - -@@ -230,18 +230,18 @@ def main(): - archive_name = args.output - - # Create Archive -- print "Creating archive: " + archive_name -+ print ("Creating archive: " + archive_name) - tar = TarOpen(archive_name, "w:gz") - - for src_path, dst_path in files: -- print "Archiving " + src_path -+ print ("Archiving " + src_path) - RequireFile(src_path) - tar.add(src_path, arcname=dst_path) - - # 'fastboot update' covers the additional AOSP pieces, add this to the - # command list now - commands.append(UpdateCommand(update_fn, True)) -- print "Archiving " + args.update_archive -+ print ("Archiving " + args.update_archive) - RequireFile(args.update_archive) - tar.add(args.update_archive, update_fn) - AddFlashScript(_FLASHALL_FILENAME, tar, commands, windows=False) -@@ -249,12 +249,12 @@ def main(): - - tar.close() - -- print "Done." -+ print ("Done.") - - if __name__ == "__main__": - try: - exit(main()) -- except Usage, err: -- print >>stderr, "ERROR: {}".format(err.msg) -- print >>stderr, " for help use --help" -+ except Usage as err: -+ print ("ERROR: {}".format(err.msg), file=sys.stderr) -+ print (" for help use --help", file=sys.stderr) - exit(2) -diff --git a/releasetools/bootloader_from_target_files b/releasetools/bootloader_from_target_files -index 4162b00..af5f96b 100755 ---- a/releasetools/bootloader_from_target_files -+++ b/releasetools/bootloader_from_target_files -@@ -61,7 +61,7 @@ def main(argv): - common.Usage(__doc__) - sys.exit(1) - -- print "unzipping target-files..." -+ print ("unzipping target-files...") - #OPTIONS.input_tmp = common.UnzipTemp(args[0]) - OPTIONS.input_tmp = args[0] - #input_zip = zipfile.ZipFile(args[0], "r") -@@ -89,9 +89,9 @@ if __name__ == '__main__': - try: - common.CloseInheritedPipes() - main(sys.argv[1:]) -- except common.ExternalError, e: -+ except (common.ExternalError, e): - print -- print " ERROR: %s" % (e,) -+ print (" ERROR: %s" % (e,)) - print - sys.exit(1) - finally: -diff --git a/releasetools/flash_cmd_generator.py b/releasetools/flash_cmd_generator.py -index bd50b17..64b56a0 100755 ---- a/releasetools/flash_cmd_generator.py -+++ b/releasetools/flash_cmd_generator.py -@@ -330,7 +330,13 @@ def parse_config(ips, variant, platform): - results_list = [] - for k,v in results.items(): - results_list.append((k,v)) -- flist = [f.rsplit(':', 1) for f in set(files)] -+ unique_files = [] -+ for file in files: -+ # If the number is not already in the unique_numbers list, add it -+ if file not in unique_files: -+ unique_files.append(file) -+ -+ flist = [f.rsplit(':', 1) for f in unique_files] - return results_list, flist - - -diff --git a/releasetools/flashfiles_from_target_files b/releasetools/flashfiles_from_target_files -index 02de5ff..cc9b68e 100755 ---- a/releasetools/flashfiles_from_target_files -+++ b/releasetools/flashfiles_from_target_files -@@ -97,14 +97,14 @@ class VariantIpGenerator: - - def __add_variant_flashfile(self, ip, variant): - variant_flashfile = self.flashfile + "_" + variant + ".ini" -- print "Variant flashfile = %s"%variant_flashfile -+ print ("Variant flashfile = %s"%variant_flashfile) - # Sanity check to avoid future silent removal - eg = self.empty_groups(ip) - if eg: - raise AssertionError("Unexpected malformed section %s" % eg[0]) - - if os.path.isfile(variant_flashfile): -- print "Reading INI configuration for %s ..."%variant -+ print ("Reading INI configuration for %s ..."%variant) - with open(variant_flashfile, "r") as f: - ip.parse(f) - self.variant_files = self.variant_files_common -@@ -125,7 +125,7 @@ class VariantIpGenerator: - # This may happen when a mixin (platform level) disables a feature, while - # local flashfile.ini (variant level) is kept and customizes this feature. - for s in self.empty_groups(ip): -- print "Removing malformed section : ", s -+ print ("Removing malformed section : ", s) - ip.delete_section(s) - - def empty_groups(self, ip): -@@ -214,7 +214,7 @@ def getFromZip(zip_path, filename): - with zipfile.ZipFile(zip_path, "r") as zf: - data = zf.open(filename).read() - info = zf.getinfo(filename) -- return (common.File(filename, data), (info.external_attr >> 16L) & 0xFFFF) -+ return (common.File(filename, data), (info.external_attr >> 16) & 0xFFFF) - - def getProvdataVariants(unpack_dir): - variants = [] -@@ -250,7 +250,7 @@ def process_image(unpack_dir, dest_zip, source, target, configs, variant=None, t - if target_out in flashfile_content: - return - else: -- print "-- Adding", target_out -+ print ("-- Adding", target_out) - # Default is no special permissions - perms = None - # retrieve file from target file package based on source & target strings -@@ -310,7 +310,7 @@ def process_image_fast(product_out, flashfiles_out, source, target, variant=None - if target_out in flashfile_content: - return - -- print "-- Adding", target_out -+ print ("-- Adding", target_out) - outfile = os.path.join(flashfiles_out, target_out) - if not os.path.exists(os.path.dirname(outfile)): - os.mkdir(os.path.dirname(outfile)) -@@ -374,7 +374,7 @@ def main(argv): - - flashfile = getIntermediates(product_out, "flashfiles", "flashfiles") - else: -- print "Unzipping target-files..." -+ print ("Unzipping target-files...") - unpack_dir = common.UnzipTemp(args[0]) - if OPTIONS.add_image: - input_super = os.path.join(unpack_dir, "IMAGES") -@@ -392,13 +392,13 @@ def main(argv): - - # Retrieve "generic" PFT instructions from target file package - if os.path.isfile(flashfile + ".ini"): -- print "Reading INI configuration..." -+ print ("Reading INI configuration...") - with open(flashfile + ".ini", "r") as f: - ip = iniparser.IniParser() - ip.parse(f) - configs, files = flash_cmd_generator.parse_config([ip], build_type, platform) - elif os.path.isfile(flashfile + ".json") and not OPTIONS.unified_variants: -- print "Reading JSON configuration..." -+ print ("Reading JSON configuration...") - with open(flashfile + ".json", "r") as f: - conf = json.loads(f.read()) - configs, files = flashxml.parse_config(conf, build_type, platform) -@@ -406,25 +406,25 @@ def main(argv): - if not OPTIONS.mv_config_default: - common.Usage(__doc__) - sys.exit(1) -- print "Reading JSON FLS configuration..." -+ print ("Reading JSON FLS configuration...") - with open(flashfile + "_fls.json", "r") as f: - conf = json.loads(f.read()) - configs, files = flashflsxml.parse_config(conf, build_type, platform, OPTIONS.mv_config_default, system) - else: -- print "Exiting, Missing correct flashfile configuration for generating Flashfiles." -+ print ("Exiting, Missing correct flashfile configuration for generating Flashfiles.") - sys.exit(1) - - if OPTIONS.fast: - fastff_dir = args[1] - # If mega flashfile is enabled, create multi-variant version of PFT instructions - if OPTIONS.unified_variants or OPTIONS.variants : -- print "Adding variant specific configurations to ip..." -+ print ("Adding variant specific configurations to ip...") - vip = VariantIpGenerator(ip, configs, OPTIONS.variants, variant_files, flashfile) - vip.generate_variant_ip() - configs, cmd_files = flash_cmd_generator.parse_config(vip.variant_ips, build_type, platform) - cmd_files = set([i for _,i in cmd_files]) - -- print "Adding required binaries..." -+ print ("Adding required binaries...") - for src, target in files: - if OPTIONS.variants: - for variant in OPTIONS.variants: -@@ -442,7 +442,7 @@ def main(argv): - src,target = file.split(":") - process_image_fast(product_out, fastff_dir, src, target, variant, variantFilename(target, variant)) - -- print "Generating JSON flash configuration files..." -+ print ("Generating JSON flash configuration files...") - for fn, data in configs: - with open(os.path.join(fastff_dir,fn), 'w') as file: - file.write(data) -@@ -450,14 +450,14 @@ def main(argv): - with zipfile.ZipFile(args[1], "w", zipfile.ZIP_DEFLATED,allowZip64=True) as dest_zip: - # If mega flashfile is enabled, create multi-variant version of PFT instructions - if OPTIONS.unified_variants or OPTIONS.variants : -- print "Adding variant specific configurations to ip..." -+ print ("Adding variant specific configurations to ip...") - vip = VariantIpGenerator(ip, configs, OPTIONS.variants, variant_files, flashfile) - vip.generate_variant_ip() - configs, cmd_files = flash_cmd_generator.parse_config(vip.variant_ips, build_type, platform) - cmd_files = set([i for _,i in cmd_files]) - - # Using "generic" instructions as reference, grab required files & insert into flashfile zip -- print "Adding required binaries..." -+ print ("Adding required binaries...") - for src, target in files: - if OPTIONS.variants: - for variant in OPTIONS.variants: -@@ -477,19 +477,21 @@ def main(argv): - process_image(unpack_dir, dest_zip, src, target, configs, variant, variantFilename(target, variant)) - - # Write flash_cmd_generator parsed PFT flashing instructions to file & insert into flashfile zip -- print "Generating JSON flash configuration files..." -+ print ("Generating JSON flash configuration files...") - for fn, data in configs: -+ if isinstance(data, str): -+ data = data.encode('utf-8') - ifile = common.File(fn, data) - ifile.AddToZip(dest_zip) -- print "All done." -+ print ("All done.") - - if __name__ == '__main__': - try: - common.CloseInheritedPipes() - main(sys.argv[1:]) -- except common.ExternalError, e: -+ except common.ExternalError as e: - print -- print " ERROR: %s" % (e,) -+ print (" ERROR: %s" % (e,)) - print - sys.exit(1) - finally: -diff --git a/releasetools/intel_common.py b/releasetools/intel_common.py -index c2267e9..6c617ba 100644 ---- a/releasetools/intel_common.py -+++ b/releasetools/intel_common.py -@@ -394,7 +394,7 @@ def GetBootloaderImageFromTFP(unpack_dir, autosize=False, extra_files=None, vari - block_size=info["block_size"], - extra_files=extra_files) - -- bootloader = open(filename) -+ bootloader = open(filename,'rb') - data = bootloader.read() - bootloader.close() - os.unlink(filename) -@@ -482,7 +482,7 @@ def MakeVFATFilesystem(root_zip, filename, title="ANDROIDIA", size=0, block_size - cmd = ["mkdosfs"] - if block_size: - cmd.extend(["-S", str(block_size)]) -- cmd.extend(["-n", title, "-C", filename, str(size / 1024)]) -+ cmd.extend(["-n", title, "-C", filename, str(size // 1024)]) - try: - p = common.Run(cmd) - except Exception as exc: -diff --git a/tasks/checkvendor.py b/tasks/checkvendor.py -index 86e3ba2..b1483c3 100755 ---- a/tasks/checkvendor.py -+++ b/tasks/checkvendor.py -@@ -186,4 +186,4 @@ def main(): - return 0 - - if __name__ == "__main__": -- exit(main()) -+ sys.exit(main()) --- -2.34.1 - diff --git a/bsp_diff/caas/device/intel/common/0001-Migrate-gpt_ini2bin.py-to-python3.patch b/bsp_diff/caas/device/intel/common/0001-Migrate-gpt_ini2bin.py-to-python3.patch deleted file mode 100644 index 93cb1f183d..0000000000 --- a/bsp_diff/caas/device/intel/common/0001-Migrate-gpt_ini2bin.py-to-python3.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 85368cb0b890a42e72204383fad801db1809428f Mon Sep 17 00:00:00 2001 -From: Salini Venate -Date: Wed, 20 Jul 2024 10:55:21 +0000 -Subject: [PATCH] Migrate gpt_ini2bin.py to python3 - -Test Done: Boot check -Tracked-On: OAM-122386 -Signed-off-by: Salini Venate ---- - gpt_bin/gpt_ini2bin.py | 9 +++++---- - 1 file changed, 5 insertions(+), 4 deletions(-) - -diff --git a/gpt_bin/gpt_ini2bin.py b/gpt_bin/gpt_ini2bin.py -index 2153056..16bcae0 100755 ---- a/gpt_bin/gpt_ini2bin.py -+++ b/gpt_bin/gpt_ini2bin.py -@@ -2,6 +2,7 @@ - - import uuid - import struct -+import os - import sys - if sys.version_info < (3, 0, 1): - import ConfigParser -@@ -27,7 +28,7 @@ type_2_guid = { - def zero_pad(s, size): - if (len(s) > size): - print('error', len(s)) -- s += '\0' * (size - len(s)) -+ s += b'\0' * (size - len(s)) - return s - - def copy_section(cfg, a, b): -@@ -139,9 +140,9 @@ def main(): - gpt_in = sys.argv[1] - - if sys.version_info < (3, 0, 1): -- cfg = ConfigParser.SafeConfigParser() -+ cfg = ConfigParser.ConfigParser() - else: -- cfg = configparser.SafeConfigParser(strict=False) -+ cfg = configparser.ConfigParser(strict=False) - - cfg.read(gpt_in) - -@@ -154,7 +155,7 @@ def main(): - start_lba = cfg.getint('base', 'start_lba') - npart = len(part) - -- out = sys.stdout -+ out = os.fdopen(sys.stdout.fileno(), 'wb') - out.write(struct.pack('