From 21c2b762895111e5648ff53e5bd36b522500c561 Mon Sep 17 00:00:00 2001 From: Olivier Gayot Date: Tue, 16 Jul 2024 21:42:34 +0200 Subject: [PATCH 1/4] storage: add TODO to anticipate guided scenarios issues Expect for manual partitioning, we should ideally suggest guided scenarios only if we are strongly confident that they can be applied. Currently, we only perform some basic checks before suggesting scenarios. Going forward, I think we should be more cautious by trying to apply the scenarios in a throwaway filesystem model. If the scenario fails to apply, we should not suggest it. There is a lot of work involved so this is marked as a TODO for now. Signed-off-by: Olivier Gayot --- subiquity/server/controllers/filesystem.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/subiquity/server/controllers/filesystem.py b/subiquity/server/controllers/filesystem.py index c807a8f9e..2c5799218 100644 --- a/subiquity/server/controllers/filesystem.py +++ b/subiquity/server/controllers/filesystem.py @@ -1154,6 +1154,11 @@ async def v2_guided_GET(self, wait: bool = False) -> GuidedStorageResponseV2: if probe_resp is not None: return probe_resp + # TODO We should probably use temporary copies of the storage model and + # try to apply each scenario that we want to return. If an error is + # encountered when applying a scenario, we should skip it and log why. + # This should avoid problems such as "Exceeded number of available + # partitions" that we struggle to manually anticipate. scenarios = [] install_min = self.calculate_suggested_install_min() From 797f21ec4a24911f869e0a974ccf7613ed395034 Mon Sep 17 00:00:00 2001 From: Olivier Gayot Date: Wed, 17 Jul 2024 12:18:43 +0200 Subject: [PATCH 2/4] storage: add method to tell how many parts needed for a boot plan Signed-off-by: Olivier Gayot --- subiquity/common/filesystem/boot.py | 11 ++++ .../common/filesystem/tests/test_boot.py | 61 +++++++++++++++++-- 2 files changed, 68 insertions(+), 4 deletions(-) diff --git a/subiquity/common/filesystem/boot.py b/subiquity/common/filesystem/boot.py index 86aea952d..dd5915191 100644 --- a/subiquity/common/filesystem/boot.py +++ b/subiquity/common/filesystem/boot.py @@ -63,6 +63,9 @@ class MakeBootDevicePlan(abc.ABC): this a boot device" in sync. """ + def new_partition_count(self) -> int: + return 0 + @abc.abstractmethod def apply(self, manipulator): pass @@ -77,6 +80,10 @@ class CreatePartPlan(MakeBootDevicePlan): spec: dict = attr.ib(factory=dict) args: dict = attr.ib(factory=dict) + # TODO add @typing.override decorator when we switch to core24. + def new_partition_count(self) -> int: + return 1 + def apply(self, manipulator): manipulator.create_partition(self.gap.device, self.gap, self.spec, **self.args) @@ -156,6 +163,10 @@ def apply(self, manipulator): for plan in self.plans: plan.apply(manipulator) + # TODO add @typing.override decorator when we switch to core24. + def new_partition_count(self) -> int: + return sum([plan.new_partition_count() for plan in self.plans]) + def get_boot_device_plan_bios(device) -> Optional[MakeBootDevicePlan]: attr_plan = SetAttrPlan(device, "grub_device", True) diff --git a/subiquity/common/filesystem/tests/test_boot.py b/subiquity/common/filesystem/tests/test_boot.py index b261f149c..2e604925f 100644 --- a/subiquity/common/filesystem/tests/test_boot.py +++ b/subiquity/common/filesystem/tests/test_boot.py @@ -14,9 +14,20 @@ # along with this program. If not, see . import unittest -from unittest import mock +from unittest.mock import Mock, patch -from subiquity.common.filesystem.boot import _can_be_boot_device_disk +import attr + +from subiquity.common.filesystem.boot import ( + CreatePartPlan, + MountBootEfiPlan, + MultiStepPlan, + NoOpBootPlan, + ResizePlan, + SetAttrPlan, + SlidePlan, + _can_be_boot_device_disk, +) from subiquity.models.tests.test_filesystem import make_model_and_disk from subiquitycore.tests.parameterized import parameterized @@ -40,10 +51,10 @@ def test__can_be_boot_device_disk( model.opt_supports_nvme_tcp_booting = supports_nvme_tcp_boot - p_on_remote_storage = mock.patch.object( + p_on_remote_storage = patch.object( disk, "on_remote_storage", return_value=on_remote_storage ) - p_get_boot_device_plan = mock.patch( + p_get_boot_device_plan = patch( "subiquity.common.filesystem.boot.get_boot_device_plan", return_value=True ) @@ -54,3 +65,45 @@ def test__can_be_boot_device_disk( m_gbdp.assert_called_once_with(disk, resize_partition=None) else: m_gbdp.assert_not_called() + + +class TestMakeBootDevicePlan(unittest.TestCase): + @unittest.skipUnless( + hasattr(attr.validators, "disabled"), + "this test requires attr.validators.disabled context manager", + ) + def test_new_partition_count__single(self): + self.assertEqual(1, CreatePartPlan(Mock()).new_partition_count()) + with attr.validators.disabled(): + self.assertEqual(0, ResizePlan(Mock()).new_partition_count()) + with attr.validators.disabled(): + self.assertEqual(0, SlidePlan(Mock()).new_partition_count()) + self.assertEqual(0, SetAttrPlan(Mock(), Mock(), Mock()).new_partition_count()) + self.assertEqual(0, MountBootEfiPlan(Mock()).new_partition_count()) + self.assertEqual(0, NoOpBootPlan().new_partition_count()) + + def test_new_partition_count__multi_step(self): + self.assertEqual(0, MultiStepPlan([]).new_partition_count()) + + self.assertEqual( + 3, + MultiStepPlan( + [ + CreatePartPlan(Mock()), + CreatePartPlan(Mock()), + CreatePartPlan(Mock()), + ] + ).new_partition_count(), + ) + + self.assertEqual( + 2, + MultiStepPlan( + [ + CreatePartPlan(Mock()), + CreatePartPlan(Mock()), + MountBootEfiPlan(Mock()), + NoOpBootPlan(), + ] + ).new_partition_count(), + ) From 068c6929909e7ccc76fa80b65c4b64053fdc6bfa Mon Sep 17 00:00:00 2001 From: Olivier Gayot Date: Wed, 17 Jul 2024 12:20:07 +0200 Subject: [PATCH 3/4] storage: move available_use_gap_scenarios to a function Signed-off-by: Olivier Gayot --- subiquity/server/controllers/filesystem.py | 68 ++++++++++++---------- 1 file changed, 38 insertions(+), 30 deletions(-) diff --git a/subiquity/server/controllers/filesystem.py b/subiquity/server/controllers/filesystem.py index 2c5799218..1417eceb9 100644 --- a/subiquity/server/controllers/filesystem.py +++ b/subiquity/server/controllers/filesystem.py @@ -1146,6 +1146,43 @@ def get_classic_capabilities(self): classic_capabilities.update(info.capability_info.allowed) return sorted(classic_capabilities) + def available_use_gap_scenarios( + self, install_min + ) -> list[tuple[int, GuidedStorageTargetUseGap]]: + scenarios: list[tuple[int, GuidedStorageTargetUseGap]] = [] + for disk in self.potential_boot_disks(with_reformatting=False): + parts = [ + p + for p in disk.partitions() + if p.flag != "bios_grub" and not p._is_in_use + ] + if len(parts) < 1: + # On an (essentially) empty disk, don't bother to offer it + # with UseGap, as it's basically the same as the Reformat + # case. + continue + gap = gaps.largest_gap(disk) + if gap is None: + # Do we return a reason here? + continue + + capability_info = CapabilityInfo() + for variation in self._variation_info.values(): + if variation.is_core_boot_classic(): + continue + capability_info.combine( + variation.capability_info_for_gap(gap, install_min) + ) + api_gap = labels.for_client(gap) + use_gap = GuidedStorageTargetUseGap( + disk_id=disk.id, + gap=api_gap, + allowed=capability_info.allowed, + disallowed=capability_info.disallowed, + ) + scenarios.append((gap.size, use_gap)) + return scenarios + async def v2_guided_GET(self, wait: bool = False) -> GuidedStorageResponseV2: """Acquire a list of possible guided storage configuration scenarios. Results are sorted by the size of the space potentially available to @@ -1181,36 +1218,7 @@ async def v2_guided_GET(self, wait: bool = False) -> GuidedStorageResponseV2: ) scenarios.append((disk.size, reformat)) - for disk in self.potential_boot_disks(with_reformatting=False): - parts = [ - p - for p in disk.partitions() - if p.flag != "bios_grub" and not p._is_in_use - ] - if len(parts) < 1: - # On an (essentially) empty disk, don't bother to offer it - # with UseGap, as it's basically the same as the Reformat - # case. - continue - gap = gaps.largest_gap(disk) - if gap is None: - # Do we return a reason here? - continue - capability_info = CapabilityInfo() - for variation in self._variation_info.values(): - if variation.is_core_boot_classic(): - continue - capability_info.combine( - variation.capability_info_for_gap(gap, install_min) - ) - api_gap = labels.for_client(gap) - use_gap = GuidedStorageTargetUseGap( - disk_id=disk.id, - gap=api_gap, - allowed=capability_info.allowed, - disallowed=capability_info.disallowed, - ) - scenarios.append((gap.size, use_gap)) + scenarios.extend(self.available_use_gap_scenarios(install_min)) for disk in self.potential_boot_disks(check_boot=False): part_align = disk.alignment_data().part_align From 4f012e02008eb033a8d4d20e948c34588ac41c23 Mon Sep 17 00:00:00 2001 From: Olivier Gayot Date: Wed, 17 Jul 2024 12:20:38 +0200 Subject: [PATCH 4/4] storage: don't suggest use-gap if we can't fit more partitions If a disk has already reached its limit of (primary) partitions, suggesting a use-gap scenario can only lead to failures. In the same vein, if a disk only has room for one more partition but we also need to add an ESP, the scenario would fail to apply too. We now try to prevent Subiquity from suggesting such, bad, use-gap scenarios ; by guessing the number of primary partitions needed ; and comparing it with the limit. Although it isn't a perfect solution (see below), this change should hopefully address the most common problems. What makes it difficult to properly suggest scenarios is that we do not have all the cards in hand with the current API. For instance, the user can choose to add a recovery partition, which of course influences the number of partitions required. Sadly, we don't have a way with the current API to allow a scenario without recovery partition ; while rejecting the same scenario with a recovery partition. Capabilities can also influence the number of required partitions, supposedly. LP: #2073378 Signed-off-by: Olivier Gayot --- subiquity/server/controllers/filesystem.py | 17 ++++++ .../controllers/tests/test_filesystem.py | 60 +++++++++++++++++-- 2 files changed, 71 insertions(+), 6 deletions(-) diff --git a/subiquity/server/controllers/filesystem.py b/subiquity/server/controllers/filesystem.py index 1417eceb9..ed7923ee8 100644 --- a/subiquity/server/controllers/filesystem.py +++ b/subiquity/server/controllers/filesystem.py @@ -1166,6 +1166,23 @@ def available_use_gap_scenarios( # Do we return a reason here? continue + # Now we check if we have enough room for all the primary + # partitions. This isn't failproof but should limit the number of + # UseGaps scenarios that are suggested but can't be applied because + # we don't have enough room for partitions. + new_primary_parts = 0 + if not gap.in_extended: + new_primary_parts += 1 # For the rootfs to create. + new_primary_parts += boot.get_boot_device_plan(disk).new_partition_count() + # In theory, there could be a recovery partition as well. Not sure + # how to account for it since we don't know yet if one will be + # requested. + if new_primary_parts > gaps.remaining_primary_partitions( + disk, disk.alignment_data() + ): + log.error("skipping UseGap: not enough room for primary partitions") + continue + capability_info = CapabilityInfo() for variation in self._variation_info.values(): if variation.is_core_boot_classic(): diff --git a/subiquity/server/controllers/tests/test_filesystem.py b/subiquity/server/controllers/tests/test_filesystem.py index 15654f30f..4ca8e816a 100644 --- a/subiquity/server/controllers/tests/test_filesystem.py +++ b/subiquity/server/controllers/tests/test_filesystem.py @@ -24,7 +24,7 @@ from curtin.commands.extract import TrivialSourceHandler from jsonschema.validators import validator_for -from subiquity.common.filesystem import gaps, labels +from subiquity.common.filesystem import boot, gaps, labels from subiquity.common.filesystem.actions import DeviceAction from subiquity.common.types.storage import ( AddPartitionV2, @@ -1419,17 +1419,65 @@ async def test_in_use_reformat_and_gap(self, bootloader, ptable): GuidedStorageTargetReformat( disk_id=self.disk.id, allowed=default_capabilities ), - GuidedStorageTargetUseGap( - disk_id=self.disk.id, - allowed=default_capabilities, - gap=labels.for_client(gaps.largest_gap(self.disk)), - ), GuidedStorageTargetManual(), ] + # VTOC does not have enough room for an ESP + 3 partitions, presumably. + if ptable != "vtoc" or bootloader == Bootloader.NONE: + expected.insert( + 1, + GuidedStorageTargetUseGap( + disk_id=self.disk.id, + allowed=default_capabilities, + gap=labels.for_client(gaps.largest_gap(self.disk)), + ), + ) resp = await self.fsc.v2_guided_GET() self.assertEqual(expected, resp.targets) self.assertEqual(ProbeStatus.DONE, resp.status) + @parameterized.expand( + ( + (1, 4, True, True), + (1, 4, False, True), + (4, 4, True, False), + (4, 4, False, False), + (3, 4, False, True), + (3, 4, True, False), + (127, 128, True, False), + (127, 128, False, True), + ) + ) + async def test_available_use_gap_scenarios( + self, + existing_primaries: int, + max_primaries: int, + create_boot_part: bool, + expected_scenario: bool, + ): + await self._setup(Bootloader.NONE, "gpt", fix_bios=True) + install_min = self.fsc.calculate_suggested_install_min() + + for _ in range(existing_primaries): + make_partition(self.model, self.disk, preserve=True, size=4 << 20) + + p_max_primaries = mock.patch.object( + self.fsc.model._partition_alignment_data["gpt"], + "primary_part_limit", + max_primaries, + ) + if create_boot_part: + boot_plan = boot.CreatePartPlan(mock.Mock(), mock.Mock(), mock.Mock()) + else: + boot_plan = boot.NoOpBootPlan() + p_boot_plan = mock.patch( + "subiquity.server.controllers.filesystem.boot.get_boot_device_plan", + return_value=boot_plan, + ) + with p_max_primaries, p_boot_plan: + scenarios = self.fsc.available_use_gap_scenarios(install_min) + + self.assertEqual(expected_scenario, scenarios != []) + class TestManualBoot(IsolatedAsyncioTestCase): def _setup(self, bootloader, ptable, **kw):