From 909459e4caa1a34b85d11b7c2f6015868f4ae5ad Mon Sep 17 00:00:00 2001 From: Meni Yakove <441263+myakove@users.noreply.github.com> Date: Sat, 3 Aug 2024 17:49:04 +0300 Subject: [PATCH] Update resource useing generator script 2 (#1999) * Update kubevirt, machine resources. Fix class generator parsing * Update kubevirt, machine resources. Fix class generator parsing * Fix parser * Fix parser * Add TODO, * Address comments * supprt create resource without spec or fields, add test for such * move to own directory under root of the project * when run with --add-tests, run the tests * when run with --add-tests, run the tests * Address comments * Address comments --- .pre-commit-config.yaml | 3 +- .../resource => class_generator}/README.md | 14 +- .../resource => class_generator}/__init__.py | 0 .../class_generator.py | 167 +++----- .../resource => class_generator}/debug/.keep | 0 .../manifests/class_generator_template.j2 | 6 + .../api_server/api_server_debug.json | 0 .../manifests/api_server/api_server_res.py | 0 .../cluster_operator_debug.json | 4 + .../cluster_operator/cluster_operator_res.py | 22 + .../config_map/config_map_debug.json | 0 .../manifests/config_map/config_map_res.py | 0 .../deployment/deployment_debug.json | 0 .../manifests/deployment/deployment_res.py | 0 .../tests/manifests/pod/pod_debug.json | 0 .../tests/manifests/pod/pod_res.py | 0 .../tests/manifests/secret/secret_debug.json | 0 .../tests/manifests/secret/secret_res.py | 0 .../tests/manifests/test_parse_explain.j2 | 2 +- .../tests/test_camelcase_to_snake.py | 2 +- .../tests/test_class_generator.py | 17 +- ocp_resources/kubevirt.py | 379 +++++++++++++++++- ocp_resources/machine.py | 179 ++++++++- pyproject.toml | 11 +- tests/pytest.ini | 4 - 25 files changed, 662 insertions(+), 148 deletions(-) rename {scripts/resource => class_generator}/README.md (76%) rename {scripts/resource => class_generator}/__init__.py (100%) rename {scripts/resource => class_generator}/class_generator.py (82%) rename {scripts/resource => class_generator}/debug/.keep (100%) rename {scripts/resource => class_generator}/manifests/class_generator_template.j2 (94%) rename {scripts/resource => class_generator}/tests/manifests/api_server/api_server_debug.json (100%) rename {scripts/resource => class_generator}/tests/manifests/api_server/api_server_res.py (100%) create mode 100644 class_generator/tests/manifests/cluster_operator/cluster_operator_debug.json create mode 100644 class_generator/tests/manifests/cluster_operator/cluster_operator_res.py rename {scripts/resource => class_generator}/tests/manifests/config_map/config_map_debug.json (100%) rename {scripts/resource => class_generator}/tests/manifests/config_map/config_map_res.py (100%) rename {scripts/resource => class_generator}/tests/manifests/deployment/deployment_debug.json (100%) rename {scripts/resource => class_generator}/tests/manifests/deployment/deployment_res.py (100%) rename {scripts/resource => class_generator}/tests/manifests/pod/pod_debug.json (100%) rename {scripts/resource => class_generator}/tests/manifests/pod/pod_res.py (100%) rename {scripts/resource => class_generator}/tests/manifests/secret/secret_debug.json (100%) rename {scripts/resource => class_generator}/tests/manifests/secret/secret_res.py (100%) rename {scripts/resource => class_generator}/tests/manifests/test_parse_explain.j2 (91%) rename {scripts/resource => class_generator}/tests/test_camelcase_to_snake.py (96%) rename {scripts/resource => class_generator}/tests/test_class_generator.py (82%) delete mode 100644 tests/pytest.ini diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f20f9a3ec8..2753d95fde 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,7 +30,8 @@ repos: rev: v1.5.0 hooks: - id: detect-secrets - args: [--exclude-files=scripts/resource/tests/manifests/pod/pod_debug.json] + args: + [--exclude-files=class_generator/tests/manifests/pod/pod_debug.json] - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.5.5 diff --git a/scripts/resource/README.md b/class_generator/README.md similarity index 76% rename from scripts/resource/README.md rename to class_generator/README.md index b2a79e32a0..7a05ea1c8a 100644 --- a/scripts/resource/README.md +++ b/class_generator/README.md @@ -14,12 +14,18 @@ poetry install ``` +For shell completion Add this to ~/.bashrc or ~/.zshrc: + +```bash +if type class-generator > /dev/null; then eval "$(_CLASS_GENERATOR_COMPLETE=zsh_source class-generator)"; fi +``` + ###### Call the script - Running in normal mode with `--kind` flags: ```bash -poetry run python scripts/resource/class_generator.py --kind +class-generator --kind ``` @@ -30,7 +36,7 @@ poetry run python scripts/resource/class_generator.py --kind Run in interactive mode: ```bash -poetry run python scripts/resource/class_generator.py --interactive +class-generator --interactive ``` #### Adding tests @@ -39,7 +45,7 @@ poetry run python scripts/resource/class_generator.py --interactive - Replace `Pod` with the kind you want to add to the tests ```bash -poetry run python scripts/resource/class_generator.py --kind Pod --add-tests +class-generator --kind Pod --add-tests ``` ## Reporting an issue @@ -47,7 +53,7 @@ poetry run python scripts/resource/class_generator.py --kind Pod --add-tests - Running with debug mode and `--debug` flag: ```bash -poetry run python scripts/resource/class_generator.py --kind --debug +class-generator --kind --debug ``` `-debug.json` will be located under `scripts/resource/debug` diff --git a/scripts/resource/__init__.py b/class_generator/__init__.py similarity index 100% rename from scripts/resource/__init__.py rename to class_generator/__init__.py diff --git a/scripts/resource/class_generator.py b/class_generator/class_generator.py similarity index 82% rename from scripts/resource/class_generator.py rename to class_generator/class_generator.py index ef83bc208b..8ef0c8d0ff 100644 --- a/scripts/resource/class_generator.py +++ b/class_generator/class_generator.py @@ -5,13 +5,14 @@ import sys from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional import click import re import cloup from cloup.constraints import If, accept_none, mutually_exclusive, require_any from pyhelper_utils.shell import run_command +import pytest from rich.console import Console from rich.prompt import Prompt @@ -34,7 +35,7 @@ "": "bool", } LOGGER = get_logger(name="class_generator") -TESTS_MANIFESTS_DIR = "scripts/resource/tests/manifests" +TESTS_MANIFESTS_DIR = "class_generator/tests/manifests" def get_oc_or_kubectl() -> str: @@ -343,7 +344,7 @@ def generate_resource_file_from_dict( ) -> str: rendered = render_jinja_template( template_dict=resource_dict, - template_dir="scripts/resource/manifests", + template_dir="class_generator/manifests", template_name="class_generator_template.j2", ) @@ -392,69 +393,58 @@ def parse_explain( debug_content: Optional[Dict[str, str]] = None, add_tests: bool = False, ) -> Dict[str, Any]: - section_data: str = "" sections: List[str] = [] resource_dict: Dict[str, Any] = { "BASE_CLASS": "NamespacedResource" if namespaced else "Resource", } - new_sections_words: Tuple[str, str, str] = ("KIND:", "VERSION:", "GROUP:") - for line in output.splitlines(): - # If line is empty section is done - if not line.strip(): - if section_data: - sections.append(section_data) - section_data = "" + raw_resource_dict: Dict[str, str] = {} - continue + # Get all sections from output, section is [A-Z]: for example `KIND:` + sections = re.findall(r"([A-Z]+):.*", output) - section_data += f"{line}\n" - if line.startswith(new_sections_words): - if section_data: - sections.append(section_data) - section_data = "" - continue + # Get all sections indexes to be able to get needed test from output by indexes later + sections_indexes = [output.index(section) for section in sections] - # Last section data from last iteration - if section_data: - sections.append(section_data) + for idx, section_idx in enumerate(sections_indexes): + _section_name = sections[idx].strip(":") - start_fields_section: str = "" + # Get the end index of the section name, add +1 since we strip the `:` + _end_of_section_name_idx = section_idx + len(_section_name) + 1 - for section in sections: - if section.startswith(f"{FIELDS_STR}:"): - start_fields_section = section - continue + try: + # If we have next section we get the string from output till the next section + raw_resource_dict[_section_name] = output[_end_of_section_name_idx : output.index(sections[idx + 1])] + except IndexError: + # If this is the last section get the rest of output + raw_resource_dict[_section_name] = output[_end_of_section_name_idx:] - key, val = section.split(":", 1) - resource_dict[key.strip()] = val.strip() + resource_dict["KIND"] = raw_resource_dict["KIND"].strip() + resource_dict["DESCRIPTION"] = raw_resource_dict["DESCRIPTION"].strip() + resource_dict["GROUP"] = raw_resource_dict.get("GROUP", "").strip() + resource_dict["VERSION"] = raw_resource_dict.get("VERSION", "").strip() kind = resource_dict["KIND"] - keys_to_ignore = ["metadata", "kind", "apiVersion", "status"] + keys_to_ignore = ["metadata", "kind", "apiVersion", "status", SPEC_STR.lower()] resource_dict[SPEC_STR] = [] resource_dict[FIELDS_STR] = [] - first_field_indent: int = 0 - first_field_indent_str: str = "" - top_spec_indent: int = 0 - top_spec_indent_str: str = "" - first_field_spec_found: bool = False - field_spec_found: bool = False - - for field in start_fields_section.splitlines(): - if field.startswith(f"{FIELDS_STR}:"): - continue - start_spec_field = field.startswith(f"{first_field_indent_str}{SPEC_STR.lower()}") - ignored_field = field.split()[0] in keys_to_ignore - # Find first indent of spec, Needed in order to now when spec is done. - if not first_field_indent: - first_field_indent = len(re.findall(r" +", field)[0]) - first_field_indent_str = f"{' ' * first_field_indent}" - if not ignored_field and not start_spec_field: - resource_dict[FIELDS_STR].append( + # Get all spec fields till spec indent is done, section indent is 2 empty spaces + # ``` + # spec + # allocateLoadBalancerNodePorts + # type + # status + # ``` + if _spec_fields := re.findall(rf" {SPEC_STR.lower()}.*(?=\n [a-z])", raw_resource_dict[FIELDS_STR], re.DOTALL): + for field in [_field for _field in _spec_fields[0].splitlines() if _field]: + # If line is indented 4 spaces we know that this is a field under spec + if len(re.findall(r" +", field)[0]) == 4: + resource_dict[SPEC_STR].append( get_arg_params( - field=field, + field=field.strip(), kind=kind, + field_under_spec=True, debug=debug, debug_content=debug_content, output_debug_file_path=output_debug_file_path, @@ -462,69 +452,23 @@ def parse_explain( ) ) - continue - else: - if len(re.findall(r" +", field)[0]) == len(first_field_indent_str): - if not ignored_field and not start_spec_field: - resource_dict[FIELDS_STR].append( - get_arg_params( - field=field, - kind=kind, - debug=debug, - debug_content=debug_content, - output_debug_file_path=output_debug_file_path, - add_tests=add_tests, - ) - ) - - if start_spec_field: - first_field_spec_found = True - field_spec_found = True - continue + if _fields := re.findall(r" .*", raw_resource_dict[FIELDS_STR], re.DOTALL): + for line in [_line for _line in _fields[0].splitlines() if _line]: + if line.split()[0] in keys_to_ignore: + continue - if field_spec_found: - if not re.findall(rf"^{first_field_indent_str}\w", field): - if first_field_spec_found: - resource_dict[SPEC_STR].append( - get_arg_params( - field=field, - kind=kind, - field_under_spec=True, - debug=debug, - debug_content=debug_content, - output_debug_file_path=output_debug_file_path, - add_tests=add_tests, - ) + # Process only top level fields with 2 spaces indent + if len(re.findall(r" +", line)[0]) == 2: + resource_dict[FIELDS_STR].append( + get_arg_params( + field=line, + kind=kind, + debug=debug, + debug_content=debug_content, + output_debug_file_path=output_debug_file_path, + add_tests=add_tests, ) - - # Get top level keys inside spec indent, need to match only once. - top_spec_indent = len(re.findall(r" +", field)[0]) - top_spec_indent_str = f"{' ' * top_spec_indent}" - first_field_spec_found = False - continue - - if top_spec_indent_str: - # Get only top level keys from inside spec - if re.findall(rf"^{top_spec_indent_str}\w", field): - resource_dict[SPEC_STR].append( - get_arg_params( - field=field, - kind=kind, - field_under_spec=True, - debug=debug, - debug_content=debug_content, - output_debug_file_path=output_debug_file_path, - add_tests=add_tests, - ) - ) - continue - - else: - break - - if not resource_dict[SPEC_STR] and not resource_dict[FIELDS_STR]: - LOGGER.error(f"Unable to parse {kind} resource.") - return {} + ) api_group_real_name = resource_dict.get("GROUP") # If API Group is not present in resource, try to get it from VERSION @@ -673,8 +617,12 @@ def write_and_format_rendered(filepath: str, data: str) -> None: def generate_class_generator_tests() -> None: tests_info: Dict[str, List[Dict[str, str]]] = {"template": []} + dirs_to_ignore: List[str] = ["__pycache__"] for _dir in os.listdir(TESTS_MANIFESTS_DIR): + if _dir in dirs_to_ignore: + continue + dir_path = os.path.join(TESTS_MANIFESTS_DIR, _dir) if os.path.isdir(dir_path): test_data = {"kind": _dir} @@ -769,6 +717,7 @@ def main( if add_tests: generate_class_generator_tests() + pytest.main(["-k", "test_class_generator"]) if __name__ == "__main__": diff --git a/scripts/resource/debug/.keep b/class_generator/debug/.keep similarity index 100% rename from scripts/resource/debug/.keep rename to class_generator/debug/.keep diff --git a/scripts/resource/manifests/class_generator_template.j2 b/class_generator/manifests/class_generator_template.j2 similarity index 94% rename from scripts/resource/manifests/class_generator_template.j2 rename to class_generator/manifests/class_generator_template.j2 index 1450690664..0649872d0b 100644 --- a/scripts/resource/manifests/class_generator_template.j2 +++ b/class_generator/manifests/class_generator_template.j2 @@ -25,20 +25,25 @@ class {{ KIND }}({{ BASE_CLASS }}): def __init__( self, + {% if all_types_for_class_args %} {{ all_types_for_class_args|join(",\n ") }}, + {% endif %} **kwargs: Any, ) -> None: + {% if all_types_for_class_args %} """ Args: {% for value in all_names_types_for_docstring %} {{ value }}{% endfor %} """ + {% endif %} super().__init__(**kwargs) {% for arg in FIELDS + SPEC %} self.{{ arg["name-for-class-arg"] }} = {{ arg["name-for-class-arg"] }} {% endfor %} + {% if FIELDS or SPEC %} def to_dict(self) -> None: super().to_dict() @@ -74,3 +79,4 @@ class {{ KIND }}({{ BASE_CLASS }}): {% endif %} {% endfor %} + {% endif %} diff --git a/scripts/resource/tests/manifests/api_server/api_server_debug.json b/class_generator/tests/manifests/api_server/api_server_debug.json similarity index 100% rename from scripts/resource/tests/manifests/api_server/api_server_debug.json rename to class_generator/tests/manifests/api_server/api_server_debug.json diff --git a/scripts/resource/tests/manifests/api_server/api_server_res.py b/class_generator/tests/manifests/api_server/api_server_res.py similarity index 100% rename from scripts/resource/tests/manifests/api_server/api_server_res.py rename to class_generator/tests/manifests/api_server/api_server_res.py diff --git a/class_generator/tests/manifests/cluster_operator/cluster_operator_debug.json b/class_generator/tests/manifests/cluster_operator/cluster_operator_debug.json new file mode 100644 index 0000000000..b5014d1f51 --- /dev/null +++ b/class_generator/tests/manifests/cluster_operator/cluster_operator_debug.json @@ -0,0 +1,4 @@ +{ + "explain": "GROUP: config.openshift.io\nKIND: ClusterOperator\nVERSION: v1\n\nDESCRIPTION:\n ClusterOperator is the Custom Resource object which holds the current state\n of an operator. This object is used by operators to convey their state to\n the rest of the cluster. \n Compatibility level 1: Stable within a major release for a minimum of 12\n months or 3 minor releases (whichever is longer).\n \nFIELDS:\n apiVersion\t\n kind\t\n metadata\t\n annotations\t\n creationTimestamp\t\n deletionGracePeriodSeconds\t\n deletionTimestamp\t\n finalizers\t<[]string>\n generateName\t\n generation\t\n labels\t\n managedFields\t<[]ManagedFieldsEntry>\n apiVersion\t\n fieldsType\t\n fieldsV1\t\n manager\t\n operation\t\n subresource\t\n time\t\n name\t\n namespace\t\n ownerReferences\t<[]OwnerReference>\n apiVersion\t -required-\n blockOwnerDeletion\t\n controller\t\n kind\t -required-\n name\t -required-\n uid\t -required-\n resourceVersion\t\n selfLink\t\n uid\t\n spec\t -required-\n status\t\n conditions\t<[]Object>\n lastTransitionTime\t -required-\n message\t\n reason\t\n status\t -required-\n type\t -required-\n extension\t\n relatedObjects\t<[]Object>\n group\t -required-\n name\t -required-\n namespace\t\n resource\t -required-\n versions\t<[]Object>\n name\t -required-\n version\t -required-\n\n", + "namespace": "0\n" +} diff --git a/class_generator/tests/manifests/cluster_operator/cluster_operator_res.py b/class_generator/tests/manifests/cluster_operator/cluster_operator_res.py new file mode 100644 index 0000000000..10111f69cd --- /dev/null +++ b/class_generator/tests/manifests/cluster_operator/cluster_operator_res.py @@ -0,0 +1,22 @@ +# Generated using https://github.com/RedHatQE/openshift-python-wrapper/blob/main/scripts/resource/README.md + +from typing import Any +from ocp_resources.resource import Resource + + +class ClusterOperator(Resource): + """ + ClusterOperator is the Custom Resource object which holds the current state + of an operator. This object is used by operators to convey their state to + the rest of the cluster. + Compatibility level 1: Stable within a major release for a minimum of 12 + months or 3 minor releases (whichever is longer). + """ + + api_group: str = Resource.ApiGroup.CONFIG_OPENSHIFT_IO + + def __init__( + self, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) diff --git a/scripts/resource/tests/manifests/config_map/config_map_debug.json b/class_generator/tests/manifests/config_map/config_map_debug.json similarity index 100% rename from scripts/resource/tests/manifests/config_map/config_map_debug.json rename to class_generator/tests/manifests/config_map/config_map_debug.json diff --git a/scripts/resource/tests/manifests/config_map/config_map_res.py b/class_generator/tests/manifests/config_map/config_map_res.py similarity index 100% rename from scripts/resource/tests/manifests/config_map/config_map_res.py rename to class_generator/tests/manifests/config_map/config_map_res.py diff --git a/scripts/resource/tests/manifests/deployment/deployment_debug.json b/class_generator/tests/manifests/deployment/deployment_debug.json similarity index 100% rename from scripts/resource/tests/manifests/deployment/deployment_debug.json rename to class_generator/tests/manifests/deployment/deployment_debug.json diff --git a/scripts/resource/tests/manifests/deployment/deployment_res.py b/class_generator/tests/manifests/deployment/deployment_res.py similarity index 100% rename from scripts/resource/tests/manifests/deployment/deployment_res.py rename to class_generator/tests/manifests/deployment/deployment_res.py diff --git a/scripts/resource/tests/manifests/pod/pod_debug.json b/class_generator/tests/manifests/pod/pod_debug.json similarity index 100% rename from scripts/resource/tests/manifests/pod/pod_debug.json rename to class_generator/tests/manifests/pod/pod_debug.json diff --git a/scripts/resource/tests/manifests/pod/pod_res.py b/class_generator/tests/manifests/pod/pod_res.py similarity index 100% rename from scripts/resource/tests/manifests/pod/pod_res.py rename to class_generator/tests/manifests/pod/pod_res.py diff --git a/scripts/resource/tests/manifests/secret/secret_debug.json b/class_generator/tests/manifests/secret/secret_debug.json similarity index 100% rename from scripts/resource/tests/manifests/secret/secret_debug.json rename to class_generator/tests/manifests/secret/secret_debug.json diff --git a/scripts/resource/tests/manifests/secret/secret_res.py b/class_generator/tests/manifests/secret/secret_res.py similarity index 100% rename from scripts/resource/tests/manifests/secret/secret_res.py rename to class_generator/tests/manifests/secret/secret_res.py diff --git a/scripts/resource/tests/manifests/test_parse_explain.j2 b/class_generator/tests/manifests/test_parse_explain.j2 similarity index 91% rename from scripts/resource/tests/manifests/test_parse_explain.j2 rename to class_generator/tests/manifests/test_parse_explain.j2 index 0bbb52fdce..82d344466c 100644 --- a/scripts/resource/tests/manifests/test_parse_explain.j2 +++ b/class_generator/tests/manifests/test_parse_explain.j2 @@ -5,7 +5,7 @@ import filecmp import pytest -from scripts.resource.class_generator import TESTS_MANIFESTS_DIR, class_generator +from class_generator.class_generator import TESTS_MANIFESTS_DIR, class_generator @pytest.mark.parametrize( diff --git a/scripts/resource/tests/test_camelcase_to_snake.py b/class_generator/tests/test_camelcase_to_snake.py similarity index 96% rename from scripts/resource/tests/test_camelcase_to_snake.py rename to class_generator/tests/test_camelcase_to_snake.py index ebd195536a..0b2802d845 100644 --- a/scripts/resource/tests/test_camelcase_to_snake.py +++ b/class_generator/tests/test_camelcase_to_snake.py @@ -1,6 +1,6 @@ import pytest -from scripts.resource.class_generator import ( +from class_generator.class_generator import ( convert_camel_case_to_snake_case, ) diff --git a/scripts/resource/tests/test_class_generator.py b/class_generator/tests/test_class_generator.py similarity index 82% rename from scripts/resource/tests/test_class_generator.py rename to class_generator/tests/test_class_generator.py index fe199bd103..2662e0e995 100644 --- a/scripts/resource/tests/test_class_generator.py +++ b/class_generator/tests/test_class_generator.py @@ -5,17 +5,12 @@ import pytest -from scripts.resource.class_generator import TESTS_MANIFESTS_DIR, class_generator +from class_generator.class_generator import TESTS_MANIFESTS_DIR, class_generator @pytest.mark.parametrize( "kind, debug_file, result_file", ( - ( - "secret", - os.path.join(TESTS_MANIFESTS_DIR, "secret", "secret_debug.json"), - os.path.join(TESTS_MANIFESTS_DIR, "secret", "secret_res.py"), - ), ( "api_server", os.path.join(TESTS_MANIFESTS_DIR, "api_server", "api_server_debug.json"), @@ -36,6 +31,16 @@ os.path.join(TESTS_MANIFESTS_DIR, "pod", "pod_debug.json"), os.path.join(TESTS_MANIFESTS_DIR, "pod", "pod_res.py"), ), + ( + "secret", + os.path.join(TESTS_MANIFESTS_DIR, "secret", "secret_debug.json"), + os.path.join(TESTS_MANIFESTS_DIR, "secret", "secret_res.py"), + ), + ( + "cluster_operator", + os.path.join(TESTS_MANIFESTS_DIR, "cluster_operator", "cluster_operator_debug.json"), + os.path.join(TESTS_MANIFESTS_DIR, "cluster_operator", "cluster_operator_res.py"), + ), ), ) def test_parse_explain(tmpdir_factory, kind, debug_file, result_file): diff --git a/ocp_resources/kubevirt.py b/ocp_resources/kubevirt.py index c56934b446..a331fbea42 100644 --- a/ocp_resources/kubevirt.py +++ b/ocp_resources/kubevirt.py @@ -1,5 +1,382 @@ +# Generated using https://github.com/RedHatQE/openshift-python-wrapper/blob/main/scripts/resource/README.md + +from typing import Any, Dict, List, Optional from ocp_resources.resource import NamespacedResource class KubeVirt(NamespacedResource): - api_group = NamespacedResource.ApiGroup.KUBEVIRT_IO + """ + KubeVirt represents the object deploying all KubeVirt resources + """ + + api_group: str = NamespacedResource.ApiGroup.KUBEVIRT_IO + + def __init__( + self, + certificate_rotate_strategy: Optional[Dict[str, Any]] = None, + configuration: Optional[Dict[str, Any]] = None, + customize_components: Optional[Dict[str, Any]] = None, + image_pull_policy: Optional[str] = "", + image_pull_secrets: Optional[List[Any]] = None, + image_registry: Optional[str] = "", + image_tag: Optional[str] = "", + infra: Optional[Dict[str, Any]] = None, + monitor_account: Optional[str] = "", + monitor_namespace: Optional[str] = "", + product_component: Optional[str] = "", + product_name: Optional[str] = "", + product_version: Optional[str] = "", + service_monitor_namespace: Optional[str] = "", + uninstall_strategy: Optional[str] = "", + workload_update_strategy: Optional[Dict[str, Any]] = None, + workloads: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + """ + Args: + certificate_rotate_strategy(Dict[Any, Any]): + FIELDS: + selfSigned + + + configuration(Dict[Any, Any]): holds kubevirt configurations. + same as the virt-configMap + + FIELDS: + additionalGuestMemoryOverheadRatio + AdditionalGuestMemoryOverheadRatio can be used to increase the + virtualization infrastructure + overhead. This is useful, since the calculation of this overhead is not + accurate and cannot + be entirely known in advance. The ratio that is being set determines by + which factor to increase + the overhead calculated by Kubevirt. A higher ratio means that the VMs would + be less compromised + by node pressures, but would mean that fewer VMs could be scheduled to a + node. + If not set, the default is 1. + + apiConfiguration + ReloadableComponentConfiguration holds all generic k8s configuration options + which can + be reloaded by components without requiring a restart. + + architectureConfiguration + + + autoCPULimitNamespaceLabelSelector + When set, AutoCPULimitNamespaceLabelSelector will set a CPU limit on + virt-launcher for VMIs running inside + namespaces that match the label selector. + The CPU limit will equal the number of requested vCPUs. + This setting does not apply to VMIs with dedicated CPUs. + + controllerConfiguration + ReloadableComponentConfiguration holds all generic k8s configuration options + which can + be reloaded by components without requiring a restart. + + cpuModel + + + cpuRequest + + + defaultRuntimeClass + + + developerConfiguration + DeveloperConfiguration holds developer options + + emulatedMachines <[]string> + Deprecated. Use architectureConfiguration instead. + + evictionStrategy + EvictionStrategy defines at the cluster level if the VirtualMachineInstance + should be + migrated instead of shut-off in case of a node drain. If the + VirtualMachineInstance specific + field is set it overrides the cluster level one. + + handlerConfiguration + ReloadableComponentConfiguration holds all generic k8s configuration options + which can + be reloaded by components without requiring a restart. + + imagePullPolicy + PullPolicy describes a policy for if/when to pull a container image + + ksmConfiguration + KSMConfiguration holds the information regarding the enabling the KSM in the + nodes (if available). + + liveUpdateConfiguration + LiveUpdateConfiguration holds defaults for live update features + + machineType + Deprecated. Use architectureConfiguration instead. + + mediatedDevicesConfiguration + MediatedDevicesConfiguration holds information about MDEV types to be + defined, if available + + memBalloonStatsPeriod + + + migrations + MigrationConfiguration holds migration options. + Can be overridden for specific groups of VMs though migration policies. + Visit https://kubevirt.io/user-guide/operations/migration_policies/ for more + information. + + minCPUModel + + + network + NetworkConfiguration holds network options + + obsoleteCPUModels + + + ovmfPath + Deprecated. Use architectureConfiguration instead. + + permittedHostDevices + PermittedHostDevices holds information about devices allowed for passthrough + + seccompConfiguration + SeccompConfiguration holds Seccomp configuration for Kubevirt components + + selinuxLauncherType + + + smbios + + + supportContainerResources <[]Object> + SupportContainerResources specifies the resource requirements for various + types of supporting containers such as container disks/virtiofs/sidecars and + hotplug attachment pods. If omitted a sensible default will be supplied. + + supportedGuestAgentVersions <[]string> + deprecated + + tlsConfiguration + TLSConfiguration holds TLS options + + virtualMachineInstancesPerNode + + + virtualMachineOptions + VirtualMachineOptions holds the cluster level information regarding the + virtual machine. + + vmRolloutStrategy + VMRolloutStrategy defines how changes to a VM object propagate to its VMI + + vmStateStorageClass + VMStateStorageClass is the name of the storage class to use for the PVCs + created to preserve VM state, like TPM. + The storage class must support RWX in filesystem mode. + + webhookConfiguration + ReloadableComponentConfiguration holds all generic k8s configuration options + which can + be reloaded by components without requiring a restart. + + customize_components(Dict[Any, Any]): + FIELDS: + flags + Configure the value used for deployment and daemonset resources + + patches <[]Object> + + + image_pull_policy(str): The ImagePullPolicy to use. + + image_pull_secrets(List[Any]): The imagePullSecrets to pull the container images from + Defaults to none + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + + FIELDS: + name + Name of the referent. + More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + + image_registry(str): The image registry to pull the container images from + Defaults to the same registry the operator's container image is pulled from. + + image_tag(str): The image tag to use for the continer images installed. + Defaults to the same tag as the operator's container image. + + infra(Dict[Any, Any]): selectors and tolerations that should apply to KubeVirt infrastructure + components + + FIELDS: + nodePlacement + nodePlacement describes scheduling configuration for specific + KubeVirt components + + replicas + replicas indicates how many replicas should be created for each KubeVirt + infrastructure + component (like virt-api or virt-controller). Defaults to 2. + WARNING: this is an advanced feature that prevents auto-scaling for core + kubevirt components. Please use with caution! + + monitor_account(str): The name of the Prometheus service account that needs read-access to + KubeVirt endpoints + Defaults to prometheus-k8s + + monitor_namespace(str): The namespace Prometheus is deployed in + Defaults to openshift-monitor + + product_component(str): Designate the apps.kubevirt.io/component label for KubeVirt components. + Useful if KubeVirt is included as part of a product. + If ProductComponent is not specified, the component label default value is + kubevirt. + + product_name(str): Designate the apps.kubevirt.io/part-of label for KubeVirt components. + Useful if KubeVirt is included as part of a product. + If ProductName is not specified, the part-of label will be omitted. + + product_version(str): Designate the apps.kubevirt.io/version label for KubeVirt components. + Useful if KubeVirt is included as part of a product. + If ProductVersion is not specified, KubeVirt's version will be used. + + service_monitor_namespace(str): The namespace the service monitor will be deployed + When ServiceMonitorNamespace is set, then we'll install the service monitor + object in that namespace + otherwise we will use the monitoring namespace. + + uninstall_strategy(str): Specifies if kubevirt can be deleted if workloads are still present. + This is mainly a precaution to avoid accidental data loss + + workload_update_strategy(Dict[Any, Any]): WorkloadUpdateStrategy defines at the cluster level how to handle + automated workload updates + + FIELDS: + batchEvictionInterval + BatchEvictionInterval Represents the interval to wait before issuing the + next + batch of shutdowns + + + Defaults to 1 minute + + batchEvictionSize + BatchEvictionSize Represents the number of VMIs that can be forced updated + per + the BatchShutdownInteral interval + + + Defaults to 10 + + workloadUpdateMethods <[]string> + WorkloadUpdateMethods defines the methods that can be used to disrupt + workloads + during automated workload updates. + When multiple methods are present, the least disruptive method takes + precedence over more disruptive methods. For example if both LiveMigrate and + Shutdown + methods are listed, only VMs which are not live migratable will be + restarted/shutdown + + + An empty list defaults to no automated workload updating + + workloads(Dict[Any, Any]): selectors and tolerations that should apply to KubeVirt workloads + + FIELDS: + nodePlacement + nodePlacement describes scheduling configuration for specific + KubeVirt components + + replicas + replicas indicates how many replicas should be created for each KubeVirt + infrastructure + component (like virt-api or virt-controller). Defaults to 2. + WARNING: this is an advanced feature that prevents auto-scaling for core + kubevirt components. Please use with caution! + + """ + super().__init__(**kwargs) + + self.certificate_rotate_strategy = certificate_rotate_strategy + self.configuration = configuration + self.customize_components = customize_components + self.image_pull_policy = image_pull_policy + self.image_pull_secrets = image_pull_secrets + self.image_registry = image_registry + self.image_tag = image_tag + self.infra = infra + self.monitor_account = monitor_account + self.monitor_namespace = monitor_namespace + self.product_component = product_component + self.product_name = product_name + self.product_version = product_version + self.service_monitor_namespace = service_monitor_namespace + self.uninstall_strategy = uninstall_strategy + self.workload_update_strategy = workload_update_strategy + self.workloads = workloads + + def to_dict(self) -> None: + super().to_dict() + + if not self.yaml_file: + self.res["spec"] = {} + _spec = self.res["spec"] + + if self.certificate_rotate_strategy: + _spec["certificateRotateStrategy"] = self.certificate_rotate_strategy + + if self.configuration: + _spec["configuration"] = self.configuration + + if self.customize_components: + _spec["customizeComponents"] = self.customize_components + + if self.image_pull_policy: + _spec["imagePullPolicy"] = self.image_pull_policy + + if self.image_pull_secrets: + _spec["imagePullSecrets"] = self.image_pull_secrets + + if self.image_registry: + _spec["imageRegistry"] = self.image_registry + + if self.image_tag: + _spec["imageTag"] = self.image_tag + + if self.infra: + _spec["infra"] = self.infra + + if self.monitor_account: + _spec["monitorAccount"] = self.monitor_account + + if self.monitor_namespace: + _spec["monitorNamespace"] = self.monitor_namespace + + if self.product_component: + _spec["productComponent"] = self.product_component + + if self.product_name: + _spec["productName"] = self.product_name + + if self.product_version: + _spec["productVersion"] = self.product_version + + if self.service_monitor_namespace: + _spec["serviceMonitorNamespace"] = self.service_monitor_namespace + + if self.uninstall_strategy: + _spec["uninstallStrategy"] = self.uninstall_strategy + + if self.workload_update_strategy: + _spec["workloadUpdateStrategy"] = self.workload_update_strategy + + if self.workloads: + _spec["workloads"] = self.workloads diff --git a/ocp_resources/machine.py b/ocp_resources/machine.py index d37c504ec8..9b2b97d306 100644 --- a/ocp_resources/machine.py +++ b/ocp_resources/machine.py @@ -1,33 +1,172 @@ -from ocp_resources.constants import TIMEOUT_4MINUTES +# Generated using https://github.com/RedHatQE/openshift-python-wrapper/blob/main/scripts/resource/README.md + +from typing import Any, Dict, List, Optional from ocp_resources.resource import NamespacedResource class Machine(NamespacedResource): """ - Machine object. + Machine is the Schema for the machines API Compatibility level 2: Stable + within a major release for a minimum of 9 months or 3 minor releases + (whichever is longer). """ - api_group = NamespacedResource.ApiGroup.MACHINE_OPENSHIFT_IO + api_group: str = NamespacedResource.ApiGroup.MACHINE_OPENSHIFT_IO def __init__( self, - name=None, - namespace=None, - teardown=True, - client=None, - yaml_file=None, - delete_timeout=TIMEOUT_4MINUTES, - **kwargs, - ): - super().__init__( - name=name, - namespace=namespace, - client=client, - teardown=teardown, - yaml_file=yaml_file, - delete_timeout=delete_timeout, - **kwargs, - ) + lifecycle_hooks: Optional[Dict[str, Any]] = None, + metadata: Optional[Dict[str, Any]] = None, + provider_id: Optional[str] = "", + provider_spec: Optional[Dict[str, Any]] = None, + taints: Optional[List[Any]] = None, + **kwargs: Any, + ) -> None: + """ + Args: + lifecycle_hooks(Dict[Any, Any]): LifecycleHooks allow users to pause operations on the machine at certain + predefined points within the machine lifecycle. + + FIELDS: + preDrain <[]Object> + PreDrain hooks prevent the machine from being drained. This also blocks + further lifecycle events, such as termination. + + preTerminate <[]Object> + PreTerminate hooks prevent the machine from being terminated. PreTerminate + hooks be actioned after the Machine has been drained. + + metadata(Dict[Any, Any]): ObjectMeta will autopopulate the Node created. Use this to indicate what + labels, annotations, name prefix, etc., should be used when creating the + Node. + + FIELDS: + annotations + Annotations is an unstructured key value map stored with a resource that may + be set by external tools to store and retrieve arbitrary metadata. They are + not queryable and should be preserved when modifying objects. More info: + http://kubernetes.io/docs/user-guide/annotations + + generateName + GenerateName is an optional prefix, used by the server, to generate a unique + name ONLY IF the Name field has not been provided. If this field is used, + the name returned to the client will be different than the name passed. This + value will also be combined with a unique suffix. The provided value has the + same validation rules as the Name field, and may be truncated by the length + of the suffix required to make the value unique on the server. + If this field is specified and the generated name exists, the server will + NOT return a 409 - instead, it will either return 201 Created or 500 with + Reason ServerTimeout indicating a unique name could not be found in the time + allotted, and the client should retry (optionally after the time indicated + in the Retry-After header). + Applied only if Name is not specified. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + + labels + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. More info: http://kubernetes.io/docs/user-guide/labels + + name + Name must be unique within a namespace. Is required when creating resources, + although some resources may allow a client to request the generation of an + appropriate name automatically. Name is primarily intended for creation + idempotence and configuration definition. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/identifiers#names + + namespace + Namespace defines the space within each name must be unique. An empty + namespace is equivalent to the "default" namespace, but "default" is the + canonical representation. Not all objects are required to be scoped to a + namespace - the value of this field for those objects will be empty. + Must be a DNS_LABEL. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/namespaces + + ownerReferences <[]Object> + List of objects depended by this object. If ALL objects in the list have + been deleted, this object will be garbage collected. If this object is + managed by a controller, then an entry in this list will point to this + controller, with the controller field set to true. There cannot be more than + one managing controller. + + provider_id(str): ProviderID is the identification ID of the machine provided by the provider. + This field must match the provider ID as seen on the node object + corresponding to this machine. This field is required by higher level + consumers of cluster-api. Example use case is cluster autoscaler with + cluster-api as provider. Clean-up logic in the autoscaler compares machines + to nodes to find out machines at provider which could not get registered as + Kubernetes nodes. With cluster-api as a generic out-of-tree provider for + autoscaler, this field is required by autoscaler to be able to have a + provider view of the list of machines. Another list of nodes is queried from + the k8s apiserver and then a comparison is done to find out unregistered + machines and are marked for delete. This field will be set by the actuators + and consumed by higher level entities like autoscaler that will be + interfacing with cluster-api as generic provider. + + provider_spec(Dict[Any, Any]): ProviderSpec details Provider-specific configuration to use during node + creation. + + FIELDS: + value + Value is an inlined, serialized representation of the resource + configuration. It is recommended that providers maintain their own versioned + API types that should be serialized/deserialized from this field, akin to + component config. + + taints(List[Any]): The list of the taints to be applied to the corresponding Node in additive + manner. This list will not overwrite any other taints added to the Node on + an ongoing basis by other entities. These taints should be actively + reconciled e.g. if you ask the machine controller to apply a taint and then + manually remove the taint the machine controller will put it back) but not + have the machine controller remove any taints + The node this Taint is attached to has the "effect" on any pod that does not + tolerate the Taint. + + FIELDS: + effect -required- + Required. The effect of the taint on pods that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + + key -required- + Required. The taint key to be applied to a node. + + timeAdded + TimeAdded represents the time at which the taint was added. It is only + written for NoExecute taints. + + value + The taint value corresponding to the taint key. + + """ + super().__init__(**kwargs) + + self.lifecycle_hooks = lifecycle_hooks + self.metadata = metadata + self.provider_id = provider_id + self.provider_spec = provider_spec + self.taints = taints + + def to_dict(self) -> None: + super().to_dict() + + if not self.yaml_file: + self.res["spec"] = {} + _spec = self.res["spec"] + + if self.lifecycle_hooks: + _spec["lifecycleHooks"] = self.lifecycle_hooks + + if self.metadata: + _spec["metadata"] = self.metadata + + if self.provider_id: + _spec["providerID"] = self.provider_id + + if self.provider_spec: + _spec["providerSpec"] = self.provider_spec + + if self.taints: + _spec["taints"] = self.taints @property def cluster_name(self): diff --git a/pyproject.toml b/pyproject.toml index 5903b34c1d..3cd86b2580 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,7 +24,7 @@ description = "Wrapper around https://github.com/kubernetes-client/python" authors = ["Meni Yakove ", "Ruth Netser "] readme = "README.md" license = "Apache-2.0" -packages = [{ include = "ocp_resources" }] +packages = [{ include = "ocp_resources" }, { include = "class_generator" }] homepage = "https://github.com/RedHatQE/openshift-python-wrapper" documentation = "https://openshift-python-wrapper.readthedocs.io/en/latest/" keywords = ["Openshift", "Kubevirt", "Openshift Virtualization"] @@ -33,6 +33,9 @@ classifiers = [ "Operating System :: OS Independent", ] +[tool.poetry.scripts] +class-generator = "class_generator.class_generator:main" + [tool.poetry.urls] Download = "https://pypi.org/project/openshift-python-wrapper/" "Bug Tracker" = "https://github.com/RedHatQE/openshift-python-wrapper/issues" @@ -72,3 +75,9 @@ pyhelper-utils = "^0.0.31" ruff = "^0.5.3" rich = "^13.7.1" cloup = "^3.0.5" + +[tool.pytest.ini_options] +markers = [ + "incremental: Mark tests as incremental", + "kubevirt: Mark tests as kubevirt tests", +] diff --git a/tests/pytest.ini b/tests/pytest.ini deleted file mode 100644 index 8fb1c3e51a..0000000000 --- a/tests/pytest.ini +++ /dev/null @@ -1,4 +0,0 @@ -[pytest] -markers = - incremental: Mark tests as incremental - kubevirt: Mark tests as kubevirt tests