diff --git a/.dockerignore b/.dockerignore index 658149c1..62f42bb6 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,5 +1,4 @@ examples/ .*/ -tests/ tmp/ Dockerfile* diff --git a/.github/workflows/prepare_releaser_configuration.py b/.github/workflows/prepare_releaser_configuration.py new file mode 100644 index 00000000..c892daab --- /dev/null +++ b/.github/workflows/prepare_releaser_configuration.py @@ -0,0 +1,46 @@ +""" +This script generates the releaser configuration file for the following `Run chart-releaser` step. +Releases are separated into two categories: beta and stable. +Beta releases are created from branches with name pattern -beta +Stable releases are created from branches with a valid version number (e.g. `1.0.0`). +""" +import os +import re +import sys +from pathlib import Path +import fileinput + +ROOT = Path.cwd() +BRANCH = os.environ["GITHUB_REF_NAME"] +SHA = os.environ["GITHUB_SHA"][:7] +VERSION = ROOT.joinpath("version.txt").read_text().strip().lstrip("v") +CHARTS = [ + ROOT / "charts" / "vastcsi" / "Chart.yaml", + ROOT / "charts" / "vastcosi" / "Chart.yaml", +] + +if __name__ == '__main__': + if not re.search('[0-9]+\.[0-9]+\.?[0-9]*', BRANCH): + sys.stderr.write( + f"Branch name must contain a valid version number. " + f"Got: {BRANCH}. Skipping release...\n" + ) + sys.exit(0) + is_beta = "beta" in BRANCH + + release_name_template = "helm-{{ .Name }}-{{ .Version }}" + pages_branch = "gh-pages-beta" if is_beta else "gh-pages" + version = f"{VERSION}-beta.{SHA}" if is_beta else VERSION + + # Create unique release name based on version and commit sha + for chart in CHARTS: + for line in fileinput.input(chart, inplace=True): + if line.startswith("version:"): + line = line.replace(line, f"version: {version}\n") + sys.stdout.write(line) + + ROOT.joinpath("releaser-config.yml").open("w").write( + f""" + pages-branch: {pages_branch} + release-name-template: {release_name_template} + """) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000..35f440bb --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,33 @@ +name: Release Charts + +on: [push] + +permissions: + contents: write + +jobs: + release: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Prepare releaser configuration + run: | + python .github/workflows/prepare_releaser_configuration.py + + - name: Configure Git + if: ${{ hashFiles('releaser-config.yml') != '' }} + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Run chart-releaser + if: ${{ hashFiles('releaser-config.yml') != '' }} + uses: helm/chart-releaser-action@v1.6.0 + with: + config: releaser-config.yml + env: + CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.gitignore b/.gitignore index 1c0e1779..6d17dd3a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,171 @@ -.merge-chain +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +.idea/ + +# VsCode +.history/ +.lh/ +.DS_Store /tests/* .idea/* .pytest_cache -__pycache__/* \ No newline at end of file +__pycache__/* +/bundle/ +/artifacts/ diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ffa5e36e..e83cdefd 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,71 +1,138 @@ -image: docker:latest +stages: + - build_base + - build + - test + - deploy + - redhat + +# source: packaging/ci.Dockerfile +image: ${AWS_ACCOUNT_ID}.dkr.ecr.eu-west-1.amazonaws.com/dev/vast-csi-base:CI-2024-09-18 variables: + PLATFORMS: "linux/amd64,linux/arm64" # comma-separated list of platforms to build multi-arch images + RH_REGISTRY: registry.connect.redhat.com/vastdataorg DOCKER_REGISTRY: ${AWS_ACCOUNT_ID}.dkr.ecr.eu-west-1.amazonaws.com IMAGE_NAME: ${AWS_ACCOUNT_ID}.dkr.ecr.eu-west-1.amazonaws.com/dev/vast-csi:${CI_PIPELINE_ID} + # source: packaging/base.Dockerfile + BASE_IMAGE_NAME: ${AWS_ACCOUNT_ID}.dkr.ecr.eu-west-1.amazonaws.com/dev/vast-csi-base:2024-09-21 + ORION_BRANCH: comet/master + +build_csi_base: + stage: build_base + script: | + export IMAGE_TAG=${DOCKER_REGISTRY}/dev/vast-csi-base:$(date +"%Y-%m-%d") + export DOCKERFILE=base.Dockerfile + make build_image + echo "Image $IMAGE_TAG has been built and pushed to the registry. You can now specify it as base image in .gitlab-ci.yml" + when: manual + tags: + - vast-dev-builder + build_csi: stage: build script: | set -x VERSION=$(cat version.txt) + TAGGED=$(grep 'the version of the Vast CSI driver' charts/vastcsi/values.yaml | awk '{print $2}') + if [[ "$TAGGED" != "$VERSION" ]]; then + echo "version.txt has $VERSION, while our helm chart has $TAGGED (check charts/vastcsi/values.yaml)" + exit 5 + fi LATEST=${DOCKER_REGISTRY}/dev/vast-csi:latest if (docker pull $LATEST) ; then docker tag $LATEST vast-csi:latest # the cache-source for our subsequent build fi - /bin/sh -x packaging/build.sh # --> vast-csi:dev - docker tag vast-csi:dev ${IMAGE_NAME} - docker tag vast-csi:dev ${LATEST} - docker push ${IMAGE_NAME} - docker push ${LATEST} - echo "pushed ${IMAGE_NAME}" + export PLATFORMS="" # reset global variable (we don't need multi-arch for internal ERC repository) + export IMAGE_TAG=$IMAGE_NAME + export DOCKERFILE=Dockerfile + export CACHE_FROM=vast-csi:latest + make build_image + make run_csi_sanity + + docker run $IMAGE_TAG test + docker run $IMAGE_TAG system_info + docker tag $IMAGE_TAG $LATEST + docker push $IMAGE_TAG + docker push $LATEST + echo "pushed ${IMAGE_TAG}" tags: - vast-dev-builder -.test_csi: &test_csi +build_csi_operator: + stage: build + script: | + set -x + docker login -u ${RH_DOCKER_USER} -p ${RH_DOCKER_PASSWORD} registry.redhat.io + export PIPE=${CI_PIPELINE_ID} + make operator-build + make operator-push + make operator-bundle-build + make operator-bundle-push + tags: + - vast-dev-builder + +.common_vars: &common_vars + INSTALL_IMAGE: "prev_version" + VAST_upgrade_to: $INSTALL_IMAGE + VAST_COMET__KWARG: csi_plugin_version=${CI_PIPELINE_ID} + TRIGGER_SOURCE: "vastcsi" + + +test_csi [latest]: &test_csi stage: test - when: manual - variables: - VAST_COMET_KWARG: csi_plugin_version=${CI_PIPELINE_ID} + when: always + except: + - /v[\d]\.[\d]+/ trigger: project: dev/orion - branch: comet/master + branch: $ORION_BRANCH strategy: depend allow_failure: true + variables: *common_vars + + +test_csi_multicluster [latest]: + <<: *test_csi + when: manual + variables: + <<: *common_vars + NUM_SYSTEMS: 2 test_csi: <<: *test_csi + when: manual + allow_failure: true + except: null parallel: matrix: - - INSTALL_IMAGE: - - latest-3.6.0 - - latest-4.0.0 - - latest-4.2.0 - - latest-4.3.0 + - INSTALL_IMAGE: "4.5" + - INSTALL_IMAGE: "4.6" + - INSTALL_IMAGE: "4.7" + - INSTALL_IMAGE: "5.0" + - INSTALL_IMAGE: "5.1" + - INSTALL_IMAGE: "prev_version" + - INSTALL_IMAGE: "latest" update_dockerhub [prod]: &update_dockerhub stage: deploy environment: prod + only: + - /v[\d]\.[\d]+/ script: | set -x - if [[ "$CI_ENVIRONMENT_NAME" == "prod" ]]; then - VERSION=$(cat version.txt) - else - VERSION=$CI_ENVIRONMENT_NAME-$CI_PIPELINE_ID + VERSION=$(cat version.txt) + if [[ "$CI_ENVIRONMENT_NAME" != "prod" ]]; then + VERSION=$VERSION-$CI_ENVIRONMENT_NAME-$CI_PIPELINE_ID fi - RELEASE_NAME=docker.io/vastdataorg/csi:${VERSION} - - docker pull ${IMAGE_NAME} - docker tag ${IMAGE_NAME} ${RELEASE_NAME} - - echo "Pushing: ${IMAGE_NAME} --> ${RELEASE_NAME}" docker login -u ${DOCKER_USER} -p ${DOCKER_PASSWORD} $DOCKER_HUB - docker push ${RELEASE_NAME} - echo "pushed ${RELEASE_NAME}" - + export IMAGE_TAG=docker.io/vastdataorg/csi:${VERSION} + export DOCKERFILE=Dockerfile + export PUSH_ON_SUCCESS=true + make build_image after_script: - docker logout $DOCKER_HUB when: manual @@ -78,43 +145,46 @@ update_dockerhub [beta]: environment: beta -update_github: - needs: [] +update_github [prod]: &update_github image: name: alpine/git entrypoint: [""] stage: deploy - script: | - set -x - VERSION=$(cat version.txt) - VERSION_BRANCH=$CI_COMMIT_REF_NAME - + only: + - /^v[\d]\.[\d]+/ + before_script: &setup_github_access | mkdir ~/.ssh/ cp $GITHUB_KEY ~/.ssh/id_rsa chmod 0600 ~/.ssh/id_rsa ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts - git config --global user.email "ofer.koren@vastdata.com" - git config --global user.name "Ofer Koren" - + git config --global user.email "$GITLAB_USER_EMAIL" + git config --global user.name "$GITLAB_USER_NAME" + script: | + set -x + VERSION=$(cat version.txt) git remote -v git remote add github git@github.com:vast-data/vast-csi.git || true - git fetch github $VERSION_BRANCH - git checkout -B $VERSION_BRANCH FETCH_HEAD + git fetch github $CI_COMMIT_REF_NAME + git checkout -B $CI_COMMIT_REF_NAME FETCH_HEAD git checkout $CI_COMMIT_SHA -- . git commit -am "VAST Data CSI Plugin - $VERSION (from $CI_COMMIT_SHA)" git tag -f $VERSION - git push -f --tags github HEAD:$VERSION_BRANCH + git push -f --tags github HEAD:$CI_COMMIT_REF_NAME - only: - - /^v\d.*/ when: manual tags: - vast-dev-builder +update_github [beta]: + <<: *update_github + only: + - /v[\d]\.[\d]+-beta.*/ + + mark_stable: image: name: amazon/aws-cli @@ -124,3 +194,79 @@ mark_stable: tags: - vast-dev-builder when: manual + +# Publish CSI Driver image and CSI Operator image for RedHat certification +# Note: Images are published to catalog automatically only if "Auto-publish" is enabled in the catalog listing. +# Otherwise, you need to confirm the publication manually after images scanning. +publish_image_redhat: + stage: redhat + script: | + set -x + docker login -u ${RH_DOCKER_USER} -p ${RH_DOCKER_PASSWORD} registry.redhat.io + export VERSION=$(cat version.txt) + + # Publish CSI Driver image + export PROJECT_ID=5f7595a16fd1fbdbe36c0b50 + echo "${CSI_DRIVER_REGISTRY_KEY}" | docker login -u "redhat-isv-containers+${PROJECT_ID}-robot" --password-stdin quay.io + export IMAGE_TAG="quay.io/redhat-isv-containers/${PROJECT_ID}:${VERSION}" + export DOCKERFILE=Dockerfile + make build_image + make run_preflight + + # Publish CSI Operator image + export PROJECT_ID=66e6d0dd49f52e86c9d56b1c + echo "${CSI_OPERATOR_REGISTRY_KEY}" | docker login -u "redhat-isv-containers+${PROJECT_ID}-robot" --password-stdin quay.io + export IMAGE_TAG="quay.io/redhat-isv-containers/${PROJECT_ID}:${VERSION}" + export DOCKERFILE=operator.Dockerfile + make build_image + make run_preflight + when: manual + tags: + - vast-dev-builder + + +update_certified_operators: + stage: redhat + before_script: *setup_github_access + script: | + set -x + VERSION=$(cat version.txt) + + export IMG_PULL_SECRET=null + export PIPE=null + export IMG=${RH_REGISTRY}/csi-operator@sha256 + export OPERATOR_TAG=$(./scripts/img_to_digest.sh ${IMG}:${VERSION}) + + export CSI_PLUGIN_IMG=${RH_REGISTRY}/csi@sha256 + export CSI_TAG=$(./scripts/img_to_digest.sh ${CSI_PLUGIN_IMG}:${VERSION}) + if [ -z "$OPERATOR_TAG" ]; then + echo "Error: OPERATOR_TAG is empty." + exit 1 + fi + if [ -z "$CSI_TAG" ]; then + echo "Error: CSI_TAG is empty." + exit 1 + fi + make operator-bundle-gen + + git clone git@github.com:vast-data/certified-operators.git + DEST=certified-operators/operators/vast-csi-operator/${VERSION} + # Check if DEST directory exists + if [ -d "$DEST" ]; then + FORCE_FLAG="--force" + else + FORCE_FLAG="" + fi + rm -rf $DEST && mkdir -p $DEST + cp -r bundle/manifests $DEST + cp -r bundle/metadata $DEST + + cd certified-operators + git add . + git commit -am "VAST CSI Operator - $VERSION + + (from $CI_COMMIT_SHA)" + git push origin main $FORCE_FLAG + when: manual + tags: + - vast-dev-builder diff --git a/.merge-chain b/.merge-chain new file mode 100644 index 00000000..f31f3cd7 --- /dev/null +++ b/.merge-chain @@ -0,0 +1 @@ +vast-csi diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..48a41a05 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,83 @@ +# CHANGELOG + +## Version 2.5.0 +* CSI driver operator (VCSI-173) +* Allow using VIPPool DNS name instead of the CSI choosing IPs (VCSI-167) +* Expose existing data via Static PV (VCSI-150) + +## Version 2.4.2 +* Support for ARM architecture (VCSI-191) +* Bug Fix - do not expect VMS credentials in a non-ephemeral mounting flow (VCSI-196) + +## Version 2.4.1 +* Support for multiple Vast Clusters via using StorageClass secrets (VCSI-140) +* Set a timeout on requests to VMS, to prevent worker threads hanging (VCSI-183) +* Improve mounting performance by support the use of VIPPool DNS, skipping an API call to the VMS (VCSI-167) +* Bug fix - allow using "tenant-less" VIP pools when running in client-based tenancy (VCSI-188) + +## Version 2.4.0 +* added Container Object Storage Interface (COSI) support (VCSI-159) +* added formal support for multitenancy via StorageClasses (VCSI-147) +* added support for mounting using fixed-ips instead of VIP pool (VCSI-170) +* added support for host mount options propagation via /etc/nfsmount.conf.d (VCSI-169) +* changed Controller pod to use 'Deployment' instead of 'Statefulset' (VCSI-166) + +## Version 2.3.1 +* added volume stats metrics on Node (VCSI-125) + +## Version 2.3.0 +* added CLONE_VOLUME support (VCSI-83) +* clone volumes from snapshots in READ_WRITE mode (VCSI-103) + +## Version 2.2.6 +* added `sslCertsSecretName` parameter, which points to a user-defined secret for the CSI driver to utilize for custom CA bundles. (VCSI-120) +* removed kubernetes version check (VCSI-130) +* advanced resources usage and pod allocation for csi node/controller (VCSI-131) +* when using Trash API for deletions, disallow removal of volume if it has snapshots, as a workaround for a Vast Storage temporary limitation (VCI-128) + +## Version 2.2.5 +* added adjustable timeout and number of workers (VCSI-100) +* added k8s error events and more informative error logging (VCSI-97) +* added multitenancy awareness (VCSI-114) +* removed password and username fields from values.yaml. Created new required field `secretName` (VCSI-115) +* added QoS policy support (VCSI-113) +* Misc + * added `CHANGELOG.mg` (VCSI-95) + +## Version 2.2.1 (05/16/23) +* added NFS4 support (inferred from mount options) (VCSI-78) +* created `create_views.py` script which creates missing views for PVCs provisioned by version 2.1 of CSI driver. (VCSI-86) +* Misc + * updated helm release action version (VCSI-78) + * renamed env variable `X_CSI_DISABLE_VMS_SSL_VERIFICATION` -> `X_CSI_ENABLE_VMS_SSL_VERIFICATION` (VCSI-81) + * "volume_name", "view_policy" and "protocol" included in volume context for using on Node side (if needed) (VCSI-87) + +## Version 2.2.0 (03/09/23) +* docker based csi template generator is replaces with helm chart. (VCSI-39) +* implemented view per volume feature (VCSI-38) +* added ssl certificates support. (VCSI-42) +* added `deletion_vip_pool` and `deletion_view_policy` parameters specifically for the purpose of performing a volume cleanse. +* Misc + * added unit tests (VCSI-38) + * exceptions were moved to `exception.py` (VCSI-38) + * added intermediate base csi image. (VCSI-50) + +## Version 2.1.2 (01/28/23) +* added NFS4 support (VCSI-68) + +## Version 2.1.1 (12/29/22) +* trim the names to 64 characters (VCSI-68) +* Fix quota create volume when quota exists (VCSI-66) + +## Version 2.1.0 (12/29/22) +* added `CREATE_DELETE_SNAPSHOT` and `LIST_SNAPSHOTS` Controller capabilities support (VCSI-15) +* added Ephemeral volumes support (VCSI-37) +* added `mount options` support (VCSI-56) +* added multiple StorageClass support. (VCSI-65) +* Misc + * updated sidecar containers tags (VCSI-15) + * all methods and classes related to communication with VMS moved to `vms_session.py` (VCSI-15) + * all methods related to provisioning new volume/snapshot moved to `volume_builder.py` (VCSI-15) + * Config class moved to `configuration.py` (VCSI-15) + * created `migrate-pv.py` script to enhance PVCs provisioned by version 2.0 of the driver by adding necessary volume attributes (VCSI-44) + diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..2b9069df --- /dev/null +++ b/Makefile @@ -0,0 +1,166 @@ +.EXPORT_ALL_VARIABLES: +SHELL := /usr/bin/env bash +CURRENT_TARGET := $(firstword $(MAKECMDGOALS)) + +OPERATOR_VERSION := $(shell awk '/^version:/ { print $$2; exit }' $(CURDIR)/charts/vastcsi-operator/Chart.yaml | sed 's/"//g') +# Include content of .env file as environment for all make commands (if such file exists) +# For better usability you can create .env file and specify necessary variable here. For instance +# PIPE=12345 +# .... +MAKEENV=/tmp/.csimakeenv +$(shell echo '' > ${MAKEENV} && chmod 777 ${MAKEENV}) +IGNORE := $(shell [ -f .env ] && env `(cat .env | xargs)` | sed 's/=/:=/' | sed 's/^/export /' > ${MAKEENV}) +include ${MAKEENV} + +ifndef IMG + IMG := $(DOCKER_REGISTRY)/dev/vast-csi +endif +ifndef CSI_PLUGIN_IMG + CSI_PLUGIN_IMG := $(DOCKER_REGISTRY)/dev/vast-csi +endif +ifndef CHANNEL +CHANNEL := "alpha" +endif +ifndef NAMESPACE +NAMESPACE := "vast-csi" +endif +ifndef IMG_PULL_SECRET +IMG_PULL_SECRET := "regcred" +endif +# Set default values for tags +# in simplest case, only PIPE is required eg. export PIPE=xxxxxx. Other tags will be built upon this one: +# CSI_TAG=xxxxxx +# OPERATOR_TAG=xxxxxx-operator +# OPERATOR_BUNDLE_TAG=xxxxxx-operator-bundle +# In more complex scenarios, you can specify all tags +# separately eg export CSI_TAG=vvvvvv OPERATOR_TAG=yyyyy-operator etc. +CSI_TAG := $(if $(CSI_TAG),$(CSI_TAG),$(PIPE)) +OPERATOR_TAG := $(if $(OPERATOR_TAG),$(OPERATOR_TAG),$(if $(PIPE),$(PIPE)-operator)) +OPERATOR_BUNDLE_TAG := $(if $(OPERATOR_BUNDLE_TAG),$(OPERATOR_BUNDLE_TAG),$(if $(PIPE),$(PIPE)-operator-bundle)) +# Define the script for checking required environment variables +define check_required_env = + printf "\033[32m[%s]\033[0m\n" $$CURRENT_TARGET + missing_vars=0; \ + for var in $(strip $1); do \ + if [ -z "$${!var}" ]; then \ + printf "\033[31m!\033[36m%-30s\033[0m \033[31m\033[0m\n" $$var; \ + missing_vars=1; \ + else \ + printf "\033[31m!\033[36m%-30s\033[0m %s\n" $$var "$${!var}"; \ + fi; \ + done; \ + if [ $$missing_vars -ne 0 ]; then \ + echo "Please ensure all required environment variables are set and not empty."; \ + exit 1; \ + fi; +endef + +# Define the script for checking non-required environment variables (for informational purposes) +define check_non_required_env = + for var in $(strip $1); do \ + if [ ! -z "$${!var}" ]; then \ + printf " \033[36m%-30s\033[0m %s\n" $$var "$${!var}"; \ + fi; \ + done +endef + +.PHONY: check_required_env check_non_required_env + + +###################### +# CSI OPERATOR +###################### +operator-build: ## Build operator docker image + @$(call check_required_env,IMG OPERATOR_TAG OPERATOR_VERSION) + docker build --build-arg VERSION=$(OPERATOR_VERSION) -t "${IMG}:${OPERATOR_TAG}" -f $(CURDIR)/packaging/operator.Dockerfile . + docker tag "${IMG}:${OPERATOR_TAG}" "${IMG}:latest-csi-operator" + +operator-push: ## Push operator docker image to docker repository (specified in defaults) + @$(call check_required_env,IMG OPERATOR_TAG) + docker push "${IMG}:${OPERATOR_TAG}" + docker push "${IMG}:latest-csi-operator" + +###################### +# CSI OPERATOR BUNDLE +###################### +operator-bundle-gen: ## Generate bundle manifests and metadata, then validate generated files (NOTE: for prod builds IMG_PULL_SECRET and PIPE should be null). + @$(call check_required_env,IMG CSI_PLUGIN_IMG OPERATOR_TAG CSI_TAG CHANNEL) + @$(call check_non_required_env,IMG_PULL_SECRET PIPE) + @$(CURDIR)/packaging/gen-operator-bundle.sh $(CURDIR) $(CHANNEL) \ + --set olmBuild=true \ + --set installSnapshotCRDS=false \ + --set maturity=$(CHANNEL) \ + --set managerImage="${IMG}:${OPERATOR_TAG}" \ + --set overrides.csiVastPlugin.repository=$(CSI_PLUGIN_IMG):$(CSI_TAG) \ + --set imagePullSecret=$(IMG_PULL_SECRET) \ + --set ciPipe=$(PIPE) + @operator-sdk bundle validate $(CURDIR)/bundle + +operator-bundle-build: ## Generate manifests, metadata etc and build docker bundle image + @$(MAKE) operator-bundle-gen + @$(call check_required_env,IMG OPERATOR_BUNDLE_TAG OPERATOR_VERSION CHANNEL) + docker build --build-arg CHANNEL=${CHANNEL} -t "${IMG}:${OPERATOR_BUNDLE_TAG}" -f $(CURDIR)/packaging/operator_bundle.Dockerfile . + docker tag "${IMG}:${OPERATOR_BUNDLE_TAG}" "${IMG}:latest-csi-operator-bundle" + +operator-bundle-push: ## Push bundle image to docker repository (specified in defaults) + @$(call check_required_env,IMG OPERATOR_BUNDLE_TAG) + docker push "${IMG}:${OPERATOR_BUNDLE_TAG}" + docker push "${IMG}:latest-csi-operator-bundle" + +###################### +# OPENSHIFT HELPERS +###################### +create-secret: ## Create secret for pulling images from the configured Docker registry + @$(call check_required_env,NAMESPACE IMG_PULL_SECRET) + @if ! oc get namespace $(NAMESPACE) > /dev/null 2>&1; then \ + echo "Namespace $(NAMESPACE) does not exist. Creating it..."; \ + oc create namespace $(NAMESPACE); \ + fi + oc create secret docker-registry --dry-run=client $(IMG_PULL_SECRET) \ + --docker-server=$(DOCKER_REGISTRY) \ + --docker-username=AWS \ + --docker-password=$$(aws ecr get-login-password) \ + --namespace=${NAMESPACE} -o yaml | oc apply -f -; + +operator-bundle-run: ## Deploy bundle against the configured Kubernetes cluster in ~/.kube/config + @$(call check_required_env,IMG OPERATOR_BUNDLE_TAG NAMESPACE IMG_PULL_SECRET) + @if ! oc get secret "${IMG_PULL_SECRET}" -n "${NAMESPACE}" > /dev/null 2>&1; then \ + echo "${IMG_PULL_SECRET} secret does not exist in namespace ${NAMESPACE}. Run 'make create-secret' target first."; \ + exit 1; \ + fi + operator-sdk run bundle "${IMG}:${OPERATOR_BUNDLE_TAG}" --timeout 10m --namespace ${NAMESPACE} --install-mode OwnNamespace --pull-secret-name ${IMG_PULL_SECRET} + +operator-bundle-upgrade-run: ## Upgrade an Operator previously installed in the bundle format with OLM + @$(call check_required_env,IMG OPERATOR_BUNDLE_TAG NAMESPACE IMG_PULL_SECRET) + @if ! oc get secret "${IMG_PULL_SECRET}" -n "${NAMESPACE}" > /dev/null 2>&1; then \ + echo "${IMG_PULL_SECRET} secret does not exist in namespace ${NAMESPACE}. Run 'make create-secret' target first."; \ + exit 1; \ + fi + operator-sdk run bundle-upgrade "${IMG}:${OPERATOR_BUNDLE_TAG}" --timeout 10m --namespace ${NAMESPACE} --pull-secret-name ${IMG_PULL_SECRET} + +operator-bundle-clean: ## Cleanup bundle from the configured Kubernetes cluster in ~/.kube/config + @$(call check_required_env,NAMESPACE) + operator-sdk cleanup vast-csi-operator --namespace ${NAMESPACE} + +###################### +# MISC +###################### +docker-login-ecr: ## Login to AWS ECR + aws ecr get-login-password --region eu-west-1 | docker login --username AWS --password-stdin $(DOCKER_REGISTRY) + +build_image: ## Build (and optionally push) Docker image to the configured Docker registry + @$(call check_required_env,IMAGE_TAG DOCKERFILE) + @$(call check_non_required_env,BASE_IMAGE_NAME PLATFORMS CACHE_FROM PUSH_ON_SUCCESS) + @$(CURDIR)/packaging/build_image.sh + +run_preflight: ## Run preflight checks for the operator Red Hat certification + @$(call check_required_env,IMAGE_TAG PROJECT_ID) + @$(CURDIR)/packaging/run_preflight.sh + +run_csi_sanity: ## Run CSI sanity tests + @$(call check_required_env,IMAGE_TAG) + @$(CURDIR)/packaging/sanity.sh $(IMAGE_TAG) + +help: ## Show help + @echo "Please specify a build target. The choices are:" + @awk -F ': ## ' '/^[a-zA-Z0-9_-]+:.* ## .*/ {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) diff --git a/README.md b/README.md index 60ad6d4a..97225660 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,7 @@ # VAST Data CSI Driver -The source-code in this repository is for informational purposes only. It is not meant to be compiled or used directly. -If you wish to use our driver with your VAST storage system, please refer to [installation guide](https://support.vastdata.com/hc/en-us/articles/360010932159-VAST-with-Kubernetes). \ No newline at end of file +The source-code in this repository is for informational purposes only. It is not meant to be used directly. +If you wish to use our driver with your VAST storage system, please refer to our [official documentation](https://support.vastdata.com/s/topic/0TOV40000000TwTOAU/vast-csi-driver-23-administrators-guide). + +Avoid opening issues in this within this project. +If you need support, please use VAST's Customer Support channels - https://support.vastdata.com diff --git a/charts/vastcosi/.helmignore b/charts/vastcosi/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/charts/vastcosi/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/vastcosi/Chart.yaml b/charts/vastcosi/Chart.yaml new file mode 100644 index 00000000..348bf2b1 --- /dev/null +++ b/charts/vastcosi/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v2 +name: vastcosi +description: Helm chart for Deployment of VAST Container Object Storage Interface (COSI) +type: application +version: "0.1.0" # will be updated by the release ci +keywords: + - vast + - csi + - driver + - vastdata + - cosi + - cosi-driver +icon: "https://uploads.vastdata.com/2020/04/vast-white-1.svg" +home: "https://support.vastdata.com/s/topic/0TOV400000018tBOAQ/vast-csi-driver" \ No newline at end of file diff --git a/charts/vastcosi/README.md b/charts/vastcosi/README.md new file mode 100644 index 00000000..4ccb1184 --- /dev/null +++ b/charts/vastcosi/README.md @@ -0,0 +1,49 @@ +# Install COSI driver with Helm 3 + +## Prerequisites + - [install Helm](https://helm.sh/docs/intro/quickstart/#install-helm) + + +### install production version of the driver: +```console +helm repo add vast https://vast-data.github.io/vastcsi +helm install cosi-driver vast/vastcosi -f values.yaml -n vast-cosi --create-namespace +``` + +### install beta version of the driver: +```console +helm repo add vast https://raw.githubusercontent.com/vast-data/vast-csi/gh-pages-beta +helm install cosi-driver vast/vastcosi -f values.yaml -n vast-cosi --create-namespace +``` + +> **NOTE:** Optionally modify values.yaml or set overrides via Helm command line + + +### install a specific version +```console +helm install cosi-driver vast/vastcosi -f values.yaml -n vast-cosi --create-namespace --version 2.4.0 +``` + +### Upgrade driver +```console +helm upgrade cosi-driver vast/vastcosi -f values.yaml -n vast-cosi +``` + +### Upgrade helm repository +```console +helm repo update vast +``` + +### Uninstall driver +```console +helm uninstall cosi-driver -n vast-cosi +``` + +### search for all available chart versions +```console +helm search repo -l vast +``` + +### troubleshooting + - Add `--wait -v=5 --debug` in `helm install` command to get detailed error + - Use `kubectl describe` to acquire more info diff --git a/charts/vastcosi/templates/NOTES.txt b/charts/vastcosi/templates/NOTES.txt new file mode 100644 index 00000000..c05f66d1 --- /dev/null +++ b/charts/vastcosi/templates/NOTES.txt @@ -0,0 +1,9 @@ +Thank you for installing {{ .Chart.Name }}. + +Your release is named {{ .Release.Name }}. +The release is installed in namespace {{ .Release.Namespace }} + +To learn more about the release, try: + + $ helm status -n {{ .Release.Namespace}} {{ .Release.Name }} + $ helm get all -n {{ .Release.Namespace}} {{ .Release.Name }} diff --git a/charts/vastcosi/templates/bucket-class.yaml b/charts/vastcosi/templates/bucket-class.yaml new file mode 100644 index 00000000..5f799a56 --- /dev/null +++ b/charts/vastcosi/templates/bucket-class.yaml @@ -0,0 +1,44 @@ +{{/* Generate one or more bucket classes from 'bucketClasses' section. */}} + +{{/* Iterate over BucketClasses from manifest */}} +{{- range $name, $options := .Values.bucketClasses }} + +{{- $deletion_policy := pluck "deletionPolicy" $options $.Values.bucketClassDefaults | first | quote -}} +{{- $view_policy := pluck "viewPolicy" $options $.Values.bucketClassDefaults | first | quote -}} +{{- $vip_pool_name := pluck "vipPool" $options $.Values.bucketClassDefaults | first | quote -}} +{{- if eq $vip_pool_name ( quote "" ) -}} + {{- fail "vipPool is required value. Please specify valid vip pool" -}} +{{- end }} +{{- $scheme := pluck "scheme" $options $.Values.bucketClassDefaults | first | quote -}} +{{- if not ( $scheme | mustRegexMatch "http|https" ) -}} + {{- fail "scheme should be either 'http' or 'https'" -}} +{{- end }} +{{- $storage_path := pluck "storagePath" $options $.Values.bucketClassDefaults | first | quote -}} +{{- if eq $storage_path ( quote "" ) -}} + {{- fail "storagePath is required value. Please specify valid storage path" -}} +{{- end }} + +kind: BucketClass +apiVersion: objectstorage.k8s.io/v1alpha1 +metadata: + name: {{ required "A BucketClass name must be not empty" $name }} + namespace: {{ include "vastcosinamespace" $ }} + labels: +{{- include "vastcosilabels" $ | nindent 4 }} +driverName: csi.vastdata.com +deletionPolicy: {{ $deletion_policy }} +parameters: + root_export: {{ $storage_path }} + vip_pool_name: {{ $vip_pool_name }} + scheme: {{ $scheme }} +{{- if ne $view_policy ( quote "" ) }} + view_policy: {{ $view_policy }} +{{- end }} +{{- with omit $options "deletionPolicy" "viewPolicy" "vipPool" "scheme" "storagePath" }} + {{- range $key, $value := . }} + {{ $key }}: {{ $value | quote }} +{{- end }} +{{- end }} + +--- +{{- end }} diff --git a/charts/vastcosi/templates/cosi-provisioner.yaml b/charts/vastcosi/templates/cosi-provisioner.yaml new file mode 100644 index 00000000..880302ca --- /dev/null +++ b/charts/vastcosi/templates/cosi-provisioner.yaml @@ -0,0 +1,65 @@ +{{- $csi_images := .Values.image -}} +{{- $ca_bundle := empty .Values.sslCert | ternary .Values.sslCertsSecretName "csi-vast-ca-bundle" -}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cosi-vast-provisioner + labels: + app.kubernetes.io/csi-role: "cosi-provisioner" +{{- include "vastcosilabels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + app: cosi-provisioner +{{- include "vastcosiselectorLabels" . | nindent 6 }} + template: + metadata: + labels: + app: cosi-provisioner +{{- include "vastcosilabels" . | nindent 8 }} + spec: +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.cosiplugin.affinity | indent 8 }} + serviceAccountName: {{ .Release.Name }}-objectstorage-provisioner-sa + containers: + - name: cosi-vast-plugin + image: {{ printf "%s:%s" $csi_images.csiVastPlugin.repository $csi_images.csiVastPlugin.tag }} + args: + - "serve" + imagePullPolicy: {{ $csi_images.csiVastPlugin.imagePullPolicy | default "IfNotPresent" }} + env: +{{- include "vastcosicommonEnv" . | indent 10 }} + - name: CSI_ENDPOINT + value: unix:///var/lib/cosi/cosi.sock + - name: X_CSI_MODE + value: cosi_plugin + volumeMounts: +{{- include "vastcosivmsAuthVolumeMount" (merge (dict "ca_bundle" $ca_bundle) .) | indent 10 }} + - mountPath: /var/lib/cosi + name: socket-dir + resources: {{- toYaml .Values.cosiplugin.resources.cosiVastPlugin | nindent 10 }} + - name: objectstorage-provisioner + image: {{ printf "%s:%s" $csi_images.objectstorageProvisioner.repository $csi_images.objectstorageProvisioner.tag }} + imagePullPolicy: {{ $csi_images.objectstorageProvisioner.imagePullPolicy | default "IfNotPresent" }} + args: + - "--v=5" + volumeMounts: + - mountPath: /var/lib/cosi + name: socket-dir + resources: {{- toYaml .Values.cosiplugin.resources.objectstorageProvisioner | nindent 10 }} + dnsPolicy: {{ .Values.cosiplugin.dnsPolicy }} + nodeSelector: +{{ toYaml .Values.cosiplugin.nodeSelector | indent 8 }} + priorityClassName: {{ .Values.cosiplugin.priorityClassName }} + tolerations: +{{ toYaml .Values.cosiplugin.tolerations | indent 8 }} + volumes: + - name: socket-dir + emptyDir: {} +{{- include "vastcosivmsAuthVolume" (merge (dict "ca_bundle" $ca_bundle) .) | indent 8 }} diff --git a/charts/vastcosi/templates/cosi-rbac.yaml b/charts/vastcosi/templates/cosi-rbac.yaml new file mode 100644 index 00000000..8cf10bce --- /dev/null +++ b/charts/vastcosi/templates/cosi-rbac.yaml @@ -0,0 +1,48 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Name }}-objectstorage-provisioner-role + namespace: {{ include "vastcosinamespace" . }} + labels: + {{- include "vastcosilabels" . | nindent 4 }} +rules: +- apiGroups: ["objectstorage.k8s.io"] + resources: ["buckets", "bucketaccesses", "bucketclaims", "bucketaccessclasses", "buckets/status", "bucketaccesses/status", "bucketclaims/status", "bucketaccessclasses/status"] + verbs: ["get", "list", "watch", "update", "create", "delete"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +- apiGroups: [""] + resources: ["secrets", "events"] + verbs: ["get", "delete", "update", "create"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Name }}-objectstorage-provisioner-role-binding + namespace: {{ include "vastcosinamespace" . }} + labels: + {{- include "vastcosilabels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }}-objectstorage-provisioner-sa + namespace: {{ include "vastcosinamespace" . }} +roleRef: + kind: ClusterRole + name: {{ .Release.Name }}-objectstorage-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-objectstorage-provisioner-sa + namespace: {{ include "vastcosinamespace" . }} + labels: + {{- include "vastcosilabels" . | nindent 4 }} + +--- diff --git a/charts/vastcosi/templates/secret.yaml b/charts/vastcosi/templates/secret.yaml new file mode 100644 index 00000000..11acde75 --- /dev/null +++ b/charts/vastcosi/templates/secret.yaml @@ -0,0 +1,17 @@ +{{/* Optional ssl certificate for comminication with Vast Cluster host */}} + +{{- if .Values.sslCert }} +apiVersion: v1 +kind: Secret +metadata: + name: csi-vast-ca-bundle + namespace: {{ include "vastcosinamespace" . }} + labels: +{{- include "vastcosilabels" . | nindent 4 }} + annotations: + checksum/vast-vms-authority-secret: {{ .Values.sslCert | sha256sum | trim }} +type: Opaque +data: + ca-bundle.crt: |- + {{ .Values.sslCert | b64enc }} +{{- end -}} diff --git a/charts/vastcosi/templates/shared/_chart_name.tpl b/charts/vastcosi/templates/shared/_chart_name.tpl new file mode 100644 index 00000000..7f611f7e --- /dev/null +++ b/charts/vastcosi/templates/shared/_chart_name.tpl @@ -0,0 +1,5 @@ +{{/*Create chart name and version as used by the chart label.*/}} + +{{- define "vastcosichart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} diff --git a/charts/vastcosi/templates/shared/_common_env.tpl b/charts/vastcosi/templates/shared/_common_env.tpl new file mode 100644 index 00000000..31babeec --- /dev/null +++ b/charts/vastcosi/templates/shared/_common_env.tpl @@ -0,0 +1,24 @@ +{{- /* +# IMPORTANT: cosi and csi helm charts share similar templates. +# If you make changes to a template in one chart, make sure to replicate those +# changes in the corresponding template in the other chart. +*/}} + +{{- define "vastcosicommonEnv" -}} + +{{- if (urlParse (required "endpoint is required" $.Values.endpoint )).scheme }} + {{- fail "endpoint requires only host to be provided. Please exclude 'http//|https//' from url." -}} +{{- end }} +- name: X_CSI_PLUGIN_NAME + value: "csi.vastdata.com" +- name: X_CSI_VMS_HOST + value: {{ $.Values.endpoint | quote }} +- name: X_CSI_ENABLE_VMS_SSL_VERIFICATION + value: {{ $.Values.verifySsl | quote }} +- name: X_CSI_WORKER_THREADS + value: {{ $.Values.numWorkers | quote }} +{{ if $.Values.truncateVolumeName -}} +- name: X_CSI_TRUNCATE_VOLUME_NAME + value: {{ $.Values.truncateVolumeName | quote }} +{{- end }} +{{- end }} diff --git a/charts/vastcosi/templates/shared/_common_namespace.tpl b/charts/vastcosi/templates/shared/_common_namespace.tpl new file mode 100644 index 00000000..929c1256 --- /dev/null +++ b/charts/vastcosi/templates/shared/_common_namespace.tpl @@ -0,0 +1,3 @@ +{{- define "vastcosinamespace" -}} +{{- coalesce $.Release.Namespace "vast-csi" | quote -}} +{{- end }} diff --git a/charts/vastcosi/templates/shared/_common_selectors_and_labels.tpl b/charts/vastcosi/templates/shared/_common_selectors_and_labels.tpl new file mode 100644 index 00000000..1790deff --- /dev/null +++ b/charts/vastcosi/templates/shared/_common_selectors_and_labels.tpl @@ -0,0 +1,23 @@ +{{/* Common labels and selectors */}} + +{{- define "vastcosiname" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + + +{{/* Common labels */}} +{{- define "vastcosilabels" -}} +helm.sh/chart: {{ include "vastcosichart" . }} +{{ include "vastcosiselectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + + +{{/* Common selectors */}} +{{- define "vastcosiselectorLabels" -}} +app.kubernetes.io/name: {{ include "vastcosiname" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/charts/vastcosi/templates/shared/_vms_auth.tpl b/charts/vastcosi/templates/shared/_vms_auth.tpl new file mode 100644 index 00000000..257c4882 --- /dev/null +++ b/charts/vastcosi/templates/shared/_vms_auth.tpl @@ -0,0 +1,43 @@ +{{/*Set of templates for working with vms credentials and vms session certificates*/}} + +{{/* Volume declarations for vms credentials and vms session certificates */}} +{{- define "vastcosivmsAuthVolume" -}} +{{- if and .Values.sslCert .Values.sslCertsSecretName -}} +{{- + fail (printf "Ambiguous origin of the 'sslCert'. The certificate is found in both the '%s' secret and the command line --from-file argument." .Values.secretName) +-}} +{{- end -}} +{{- if and .ca_bundle (not .Values.verifySsl) -}} + {{- fail "When sslCert is provided `verifySsl` must be set to true." -}} +{{- end }} + +- name: vms-auth + secret: + secretName: {{ required "secretName field must be specified" .Values.secretName | quote }} + items: + - key: username + path: username + - key: password + path: password +{{- if $.ca_bundle }} +- name: vms-ca-bundle + secret: + secretName: {{ $.ca_bundle }} + items: + - key: ca-bundle.crt + path: ca-certificates.crt +{{- end }} +{{- end }} + + +{{/* Volume bindings for vms credentials and vms session certificates */}} +{{ define "vastcosivmsAuthVolumeMount" }} +- name: vms-auth + mountPath: /opt/vms-auth + readOnly: true +{{- if $.ca_bundle }} +- name: vms-ca-bundle + mountPath: /etc/ssl/certs + readOnly: true +{{- end }} +{{- end }} diff --git a/charts/vastcosi/values.yaml b/charts/vastcosi/values.yaml new file mode 100644 index 00000000..fd9f100a --- /dev/null +++ b/charts/vastcosi/values.yaml @@ -0,0 +1,116 @@ +#################### +# VAST REST SESSION ATTRIBUTES +#################### +# Secret name, which corresponds to a secret containing credentials to login - must be provided by user +# Secret must contain username and password fields +# Example: kubectl create secret generic vast-mgmt --from-literal=username='< VAST username >' --from-literal=password='< VAST password >' +secretName: "" + +# API endpoint of VAST appliance - must be provided by user +endpoint: "" + +# Set true to enable certificate validity test +verifySsl: false + +# Path (absolute or relative) to SSL certificate for verifying the VAST REST API. +# Must be set using `set-file` option eg `--set-file sslCert=< path to sslCert.crt >` +# sslCertsSecretName secret and sslCert option in values.yaml are mutually exclusive. Make sure to use only one of them. +sslCert: "" +# Secret name, which corresponds to a secret containing an SSL certificate for verifying the VAST REST API +# Example: kubectl create secret generic vast-tls --from-file=ca-bundle.crt=< path to sslCert.crt > +# sslCertsSecretName secret and sslCert option in values.yaml are mutually exclusive. Make sure to use only one of them. +sslCertsSecretName: "" + +#################### +# VAST COSI BUCKET CLASS OPTIONS +#################### + +bucketClassDefaults: + # Where buckets will be located on VAST - must be provided by user + storagePath: "" + # Name of VAST VIP pool to use - must be provided by user + vipPool: "" + # Name of S3 policy to create bucket - must be provided by user + viewPolicy: "" + # On bucket delete behavior. By default, Vast Cluster bucket will be removed. + deletionPolicy: "Delete" + # Scheme utilized for creating bucket endpoint. Must be one of: http, https + scheme: "http" + +bucketClasses: {} +# bucketClasses: +# BucketClass name. This field must be unique across all bucket classes. +# vastdata-bucket: +# vipPool: vippool-1 +# viewPolicy: s3_policy + +#################### +# COSI PLUGIN RUNTIME PARAMETERS +#################### + +# The number of worker threads the COSI plugin use to serve requests simultaneously. +numWorkers: 10 + +# Truncate VAST bucket name if name length is greater than this number. +# set `truncateVolumeName: null` to disable truncation. +truncateVolumeName: 64 + +#################### +# VAST COSI PLUGIN OPTIONS +#################### + +image: + csiVastPlugin: + repository: vastdataorg/csi + tag: v2.5.0 # the version of the Vast COSI driver + imagePullPolicy: IfNotPresent + objectstorageProvisioner: + repository: gcr.io/k8s-staging-sig-storage/objectstorage-sidecar/objectstorage-sidecar + tag: v20230130-v0.1.0-24-gc0cf995 + imagePullPolicy: IfNotPresent + +#################### +# VAST COSI PLUGIN BEHAVIOR +# +# WARNING - these parameters are for advanced users. +# Setting these incorrectly may prevent the VAST COSI Driver from running correctly. +# We recommend to consult with VAST Support before changing any of the following parameters +#################### + +cosiplugin: + # determine how DNS (Domain Name System) resolution should be handled within Pod. + # available values: Default, ClusterFirstWithHostNet, ClusterFirst + dnsPolicy: Default + # nodeSelector is the way to restrict pod to be assigned on certain node/nodes. + # Specify node selector if you want node and controller containers to be assigned only to specific node/nodes of + # your cluster. + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector. + nodeSelector: {} + # If specified, the pod's tolerations + # https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # If specified, the pod's scheduling constraints + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + affinity: {} + # Resources describes the compute resource requirements. + resources: + objectstorageProvisioner: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + cosiVastPlugin: + limits: + memory: 400Mi + requests: + cpu: 100m + memory: 50Mi + # priorityClassName is the name of priority class to be used for the pod. + priorityClassName: system-cluster-critical + +# Reference to one or more secrets to be used when pulling images +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +# - name: "image-pull-secret" +imagePullSecrets: [] + diff --git a/charts/vastcsi-operator/.helmignore b/charts/vastcsi-operator/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/charts/vastcsi-operator/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/vastcsi-operator/Chart.yaml b/charts/vastcsi-operator/Chart.yaml new file mode 100644 index 00000000..04c00dfe --- /dev/null +++ b/charts/vastcsi-operator/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v2 +name: vastcsi-operator +description: Helm chart for Vast CSI Operator deployment +type: application +version: "2.5.0" +keywords: + - vast + - csi + - driver + - vastdata + - csi-driver + - operator diff --git a/charts/vastcsi-operator/README.md b/charts/vastcsi-operator/README.md new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/charts/vastcsi-operator/README.md @@ -0,0 +1 @@ + diff --git a/charts/vastcsi-operator/crd-charts/vastcluster/.helmignore b/charts/vastcsi-operator/crd-charts/vastcluster/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcluster/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/vastcsi-operator/crd-charts/vastcluster/Chart.yaml b/charts/vastcsi-operator/crd-charts/vastcluster/Chart.yaml new file mode 100644 index 00000000..9fe1d837 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcluster/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +description: Helm chart for Deployment of VAST Container Storage Interface (CSI) +keywords: + - vast + - csi + - driver + - vastdata + - csi-driver +name: vastcluster +type: application +version: 0.1.0 diff --git a/charts/vastcsi-operator/crd-charts/vastcluster/templates/_helpers.tpl b/charts/vastcsi-operator/crd-charts/vastcluster/templates/_helpers.tpl new file mode 100644 index 00000000..7e2aabab --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcluster/templates/_helpers.tpl @@ -0,0 +1,29 @@ +{{/* Create chart name and version as used by the chart label. */}} +{{- define "vastcsi.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{- define "vastcsi.namespace" -}} +{{- coalesce $.Release.Namespace "vast-csi" | quote -}} +{{- end }} + +{{/* Common labels and selectors */}} +{{- define "vastcsi.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* Common labels */}} +{{- define "vastcsi.labels" -}} +helm.sh/chart: {{ include "vastcsi.chart" . }} +{{ include "vastcsi.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* Common selectors */}} +{{- define "vastcsi.selectorLabels" -}} +app.kubernetes.io/name: {{ include "vastcsi.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/charts/vastcsi-operator/crd-charts/vastcluster/templates/secret.yaml b/charts/vastcsi-operator/crd-charts/vastcluster/templates/secret.yaml new file mode 100644 index 00000000..99091216 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcluster/templates/secret.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }} + namespace: {{ include "vastcsi.namespace" . }} + labels: + used-by: vast-csi-driver-operator + {{- include "vastcsi.labels" $ | nindent 4 }} +type: Opaque +data: + endpoint: {{ .Values.endpoint | required "Endpoint is required value" | b64enc | quote }} + username: {{ .Values.username | required "Username is required value" | b64enc | quote }} + password: {{ .Values.password | required "Password is required value" | b64enc | quote }} + {{- if .Values.sslCert }} + ssl_cert: {{ .Values.sslCert | b64enc | quote }} + {{- end }} diff --git a/charts/vastcsi-operator/crd-charts/vastcluster/values.schema.yaml b/charts/vastcsi-operator/crd-charts/vastcluster/values.schema.yaml new file mode 100644 index 00000000..d8e8210d --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcluster/values.schema.yaml @@ -0,0 +1,47 @@ +description: > + VastCluster encapsulates the connection details required for accessing a VAST cluster. This information is ultimately stored in a Kubernetes Secret. + +spec: + description: Spec defines the desired state of VastCluster. + properties: + endpoint: + description: Endpoint or URL of the VAST mgmt host. + type: string + username: + description: Username used for authentication. + type: string + password: + description: Password used for authentication. + type: string + sslCert: + description: Optional SSL certificate for secure connections. + type: string + nullable: true + required: + - endpoint + - username + - password + type: object + +specDescriptors: + - description: Password used for authentication. + displayName: Password + path: password + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:password + - description: Optional SSL certificate for secure connections. + displayName: SSL Certificate + path: sslCert + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:password + +example: + apiVersion: storage.vastdata.com/v1 + kind: VastCluster + metadata: + name: vastcluster-sample + spec: + endpoint: {{ .Values.endpoint }} + username: {{ .Values.username }} + password: {{ .Values.password }} + sslCert: {{ .Values.sslCert }} diff --git a/charts/vastcsi-operator/crd-charts/vastcluster/values.yaml b/charts/vastcsi-operator/crd-charts/vastcluster/values.yaml new file mode 100644 index 00000000..eca358ff --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcluster/values.yaml @@ -0,0 +1,12 @@ +#################### +# COMMON +#################### + +# Default value, replace with actual endpoint +endpoint: "" +# Default value, replace with actual username +username: "" +# Default value, replace with actual password +password: "" +# Default value, replace with actual SSL cert +sslCert: "" diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/.helmignore b/charts/vastcsi-operator/crd-charts/vastcsidriver/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/Chart.yaml b/charts/vastcsi-operator/crd-charts/vastcsidriver/Chart.yaml new file mode 100644 index 00000000..e388fde0 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +description: Helm chart for Deployment of VAST Container Storage Interface (CSI) +keywords: + - vast + - csi + - driver + - vastdata + - csi-driver +name: vastcsidriver +type: application +version: 0.1.0 diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/clusterrole.yaml b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/clusterrole.yaml new file mode 100644 index 00000000..92f9ba40 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/clusterrole.yaml @@ -0,0 +1,91 @@ +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Release.Name }}-vast-provisioner-role + labels: + {{- include "vastcsi.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Release.Name }}-vast-attacher-role + labels: + {{- include "vastcsi.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Release.Name }}-vast-resizer-role + labels: + {{- include "vastcsi.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + +--- diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/clusterrolebinding.yaml b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..b7ef208c --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/clusterrolebinding.yaml @@ -0,0 +1,27 @@ +{{/* ClusterRoleBindings for Vast CSI */}} + +{{- $namespace := include "vastcsi.namespace" . }} +{{- $labels := include "vastcsi.labels" . | nindent 4 }} +{{- $releaseName := .Release.Name }} + +{{- $bindings := (list "provisioner" "attacher" "resizer") }} + +{{- range $index, $role := $bindings }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ $releaseName }}-vast-{{ $role }}-binding + labels: + {{- $labels }} +subjects: + - kind: ServiceAccount + name: {{ $releaseName }}-vast-controller-sa + namespace: {{ $namespace }} +roleRef: + kind: ClusterRole + name: {{ $releaseName }}-vast-{{ $role }}-role + apiGroup: rbac.authorization.k8s.io + +--- +{{ end }} + diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/controller.yaml b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/controller.yaml new file mode 100644 index 00000000..d2417bd1 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/controller.yaml @@ -0,0 +1,165 @@ +{{/* Vast CSI Controller */}} + +{{- $csi_images := .Values.image -}} +{{- $plugin_proxy_sock := "/var/lib/csi/sockets/pluginproxy/csi.sock" -}} +{{- $plugin_proxy_sock_path := "/var/lib/csi/sockets/pluginproxy/" -}} +{{- $ca_bundle := empty .Values.sslCert | ternary .Values.sslCertsSecretName "csi-vast-ca-bundle" -}} + + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: csi-vast-controller + namespace: {{ include "vastcsi.namespace" . }} + labels: + {{- include "vastcsi.labels" . | nindent 4 }} + app.kubernetes.io/csi-role: "controller" +spec: + replicas: 1 + selector: + matchLabels: + app: csi-vast-controller + {{- include "vastcsi.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + role: csi-vast + app: csi-vast-controller + {{- include "vastcsi.labels" . | nindent 8 }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + affinity: + podAffinity: + {{- toYaml .Values.controller.podAffinity | nindent 10 }} + podAntiAffinity: + {{- toYaml .Values.controller.podAntiAffinity | nindent 10 }} + nodeAffinity: + {{- toYaml .Values.controller.nodeAffinity | nindent 10 }} + containers: + - name: csi-provisioner + image: {{ $csi_images.csiProvisioner.repository | default $csi_images.csiProvisioner.defaultRepository }} + args: + {{- include "vastcsi.commonArgs" . | nindent 12 }} + - "--extra-create-metadata" + - "--timeout={{ .Values.operationTimeout }}s" + - "--worker-threads={{ .Values.numWorkers }}" + - "--retry-interval-start={{ .Values.operationRetryIntervalStart }}s" + - "--retry-interval-max={{ .Values.operationRetryIntervalMax }}s" + {{- range .Values.controller.extraArgs.csiProvisioner }} + - "--{{ . }}" + {{- end }} + env: + - name: ADDRESS + value: {{ $plugin_proxy_sock }} + imagePullPolicy: {{ $csi_images.csiProvisioner.imagePullPolicy | default "IfNotPresent" }} + volumeMounts: + - name: socket-dir + mountPath: {{ $plugin_proxy_sock_path }} + resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} + + - name: csi-attacher + image: {{ $csi_images.csiAttacher.repository | default $csi_images.csiAttacher.defaultRepository }} + args: + {{- include "vastcsi.commonArgs" . | nindent 12 }} + - "--timeout={{ .Values.operationTimeout }}s" + - "--worker-threads={{ .Values.numWorkers }}" + - "--retry-interval-start={{ .Values.operationRetryIntervalStart }}s" + - "--retry-interval-max={{ .Values.operationRetryIntervalMax }}s" + {{- range .Values.controller.extraArgs.csiAttacher }} + - "--{{ . }}" + {{- end }} + env: + - name: ADDRESS + value: {{ $plugin_proxy_sock }} + imagePullPolicy: {{ $csi_images.csiAttacher.imagePullPolicy | default "IfNotPresent" }} + volumeMounts: + - name: socket-dir + mountPath: {{ $plugin_proxy_sock_path }} + resources: {{- toYaml .Values.controller.resources.csiAttacher | nindent 12 }} + + - name: csi-snapshotter + image: {{ $csi_images.csiSnapshotter.repository | default $csi_images.csiSnapshotter.defaultRepository }} + args: + {{- include "vastcsi.commonArgs" . | nindent 12 }} + - "--extra-create-metadata" + - "--leader-election=false" + - "--timeout={{ .Values.operationTimeout }}s" + - "--worker-threads={{ .Values.numWorkers }}" + - "--retry-interval-start={{ .Values.operationRetryIntervalStart }}s" + - "--retry-interval-max={{ .Values.operationRetryIntervalMax }}s" + {{- range .Values.controller.extraArgs.csiSnapshotter }} + - "--{{ . }}" + {{- end }} + env: + - name: ADDRESS + value: {{ $plugin_proxy_sock }} + imagePullPolicy: {{ $csi_images.csiSnapshotter.imagePullPolicy | default "IfNotPresent" }} + volumeMounts: + - name: socket-dir + mountPath: {{ $plugin_proxy_sock_path }} + resources: {{- toYaml .Values.controller.resources.csiSnapshotter | nindent 12 }} + + - name: csi-resizer + image: {{ $csi_images.csiResizer.repository | default $csi_images.csiResizer.defaultRepository }} + args: + {{- include "vastcsi.commonArgs" . | nindent 12 }} + - "--timeout={{ .Values.operationTimeout }}s" + - "--workers={{ .Values.numWorkers }}" + - "--retry-interval-start={{ .Values.operationRetryIntervalStart }}s" + - "--retry-interval-max={{ .Values.operationRetryIntervalMax }}s" + - "--handle-volume-inuse-error=false" + {{- range .Values.controller.extraArgs.csiResizer }} + - "--{{ . }}" + {{- end }} + env: + - name: ADDRESS + value: {{ $plugin_proxy_sock }} + imagePullPolicy: {{ $csi_images.csiResizer.imagePullPolicy | default "IfNotPresent" }} + volumeMounts: + - name: socket-dir + mountPath: {{ $plugin_proxy_sock_path }} + resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} + - name: csi-vast-plugin + image: {{ $csi_images.csiVastPlugin.repository | default $csi_images.csiVastPlugin.defaultRepository }} + args: + - "serve" + imagePullPolicy: {{ $csi_images.csiVastPlugin.imagePullPolicy | default "IfNotPresent" }} + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + env: + {{- include "vastcsi.commonEnv" . | indent 12 }} + - name: CSI_ENDPOINT + value: unix://{{ $plugin_proxy_sock }} + - name: X_CSI_MODE + value: controller + volumeMounts: + - name: socket-dir + mountPath: {{ $plugin_proxy_sock_path }} + {{- include "vastcsi.vmsAuthVolumeMount" (merge (dict "ca_bundle" $ca_bundle) .) | indent 12 }} + resources: {{- toYaml .Values.controller.resources.csiVastPlugin | nindent 12 }} + hostNetwork: true + dnsPolicy: {{ .Values.controller.dnsPolicy }} + nodeSelector: + {{- if .Values.controller.runOnMaster }} + node-role.kubernetes.io/master: "" + {{- end }} + {{- if .Values.controller.runOnControlPlane }} + node-role.kubernetes.io/control-plane: "" + {{- end }} + {{- if .Values.controller.nodeSelector }} + {{ toYaml .Values.controller.nodeSelector | indent 8 }} + {{- end }} + priorityClassName: {{ .Values.controller.priorityClassName }} + serviceAccountName: {{ .Release.Name }}-vast-controller-sa + tolerations: + {{ toYaml .Values.controller.tolerations | indent 8 }} + volumes: + - name: socket-dir + emptyDir: {} + {{- include "vastcsi.vmsAuthVolume" (merge (dict "ca_bundle" $ca_bundle) .) | indent 8 }} diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/csi-driver.yaml b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/csi-driver.yaml new file mode 100644 index 00000000..a4360a17 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/csi-driver.yaml @@ -0,0 +1,20 @@ +{{/* Vast CSI Driver that defines behavior rules for all downstream PVS during attachment */}} + +{{- if .Release.IsInstall -}} +{{- if (lookup "storage.k8s.io/v1" "CSIDriver" "" "csi.vastdata.com") -}} +{{- fail "CSIDriver csi.vastdata.com already exists. Ensure you are using the correct Helm release, or delete the existing release associated with this storage class before proceeding." -}} +{{- end -}} +{{- end -}} + +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: csi.vastdata.com + labels: + {{- include "vastcsi.labels" . | nindent 4 }} +spec: + attachRequired: {{ .Values.attachRequired }} + podInfoOnMount: true + volumeLifecycleModes: + - Persistent + - Ephemeral diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/csi-scc.yaml b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/csi-scc.yaml new file mode 100644 index 00000000..cd5a5a87 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/csi-scc.yaml @@ -0,0 +1,33 @@ +{{- if eq (toString .Values.applySecurityContextConstraints) "true" }} + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Release.Name }}-vast-controller-scc-sa-binding + namespace: {{ .Release.Namespace }} +subjects: +- kind: ServiceAccount + name: {{ .Release.Name }}-vast-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: system:openshift:scc:privileged + apiGroup: rbac.authorization.k8s.io + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Release.Name }}-vast-node-scc-sa-binding + namespace: {{ .Release.Namespace }} +subjects: +- kind: ServiceAccount + name: {{ .Release.Name }}-vast-node-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: system:openshift:scc:privileged + apiGroup: rbac.authorization.k8s.io + +{{- end }} diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/node.yaml b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/node.yaml new file mode 100644 index 00000000..84018049 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/node.yaml @@ -0,0 +1,130 @@ +{{/* Vast CSI Node */}} + +{{- $csi_images := .Values.image -}} +{{- $kubelet_path := .Values.kubeletPath | default "/var/lib/kubelet" | trimSuffix "/" }} +{{- $ca_bundle := empty .Values.sslCert | ternary .Values.sslCertsSecretName "csi-vast-ca-bundle" -}} + + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: csi-vast-node + namespace: {{ include "vastcsi.namespace" . }} + labels: +{{ include "vastcsi.labels" . | indent 4 }} + app.kubernetes.io/csi-role: "node" +spec: + selector: + matchLabels: + app: "csi-vast-node" +{{- include "vastcsi.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + app: csi-vast-node + role: csi-vast +{{- include "vastcsi.labels" . | nindent 8 }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + affinity: + podAffinity: + {{- toYaml .Values.node.podAffinity | nindent 10 }} + podAntiAffinity: + {{- toYaml .Values.node.podAntiAffinity | nindent 10 }} + nodeAffinity: + {{- toYaml .Values.node.nodeAffinity | nindent 10 }} + containers: + - name: csi-node-driver-registrar + image: {{ $csi_images.csiNodeDriverRegistrar.repository | default $csi_images.csiNodeDriverRegistrar.defaultRepository }} + args: +{{- include "vastcsi.commonArgs" . | nindent 12 }} + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + imagePullPolicy: {{ $csi_images.csiNodeDriverRegistrar.imagePullPolicy | default "IfNotPresent" }} + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/csi.vastdata.com /registration/csi.vastdata.com-reg.sock"] + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: {{ $kubelet_path }}/plugins/csi.vastdata.com/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: plugin-dir + mountPath: /csi/ + - name: registration-dir + mountPath: /registration/ + resources: {{- toYaml .Values.node.resources.nodeDriverRegistrar | nindent 12 }} + - name: csi-vast-plugin + image: {{ $csi_images.csiVastPlugin.repository | default $csi_images.csiVastPlugin.defaultRepository }} + args: + - "serve" + imagePullPolicy: {{ $csi_images.csiVastPlugin.imagePullPolicy | default "IfNotPresent" }} + env: +{{- include "vastcsi.commonEnv" . | indent 12 }} + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: node + - name: X_CSI_NODE_ID + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: pods-mount-dir + mountPath: {{ $kubelet_path }} + mountPropagation: "Bidirectional" + - name: device-dir + mountPath: /dev + {{- if .Values.node.propagateHostMountOptions }} + - name: host-nfsmount-conf-d + mountPath: /etc/nfsmount.conf.d + {{- end }} + {{- include "vastcsi.vmsAuthVolumeMount" (merge (dict "ca_bundle" $ca_bundle) .) | indent 12 }} + resources: {{- toYaml .Values.node.resources.csiVastPlugin | nindent 12 }} + hostNetwork: true + dnsPolicy: {{ .Values.node.dnsPolicy }} + nodeSelector: +{{ toYaml .Values.node.nodeSelector | indent 8 }} + priorityClassName: {{ .Values.node.priorityClassName }} + serviceAccountName: {{ .Release.Name }}-vast-node-sa + tolerations: +{{ toYaml .Values.node.tolerations | indent 8 }} + volumes: + - name: registration-dir + hostPath: + path: {{ $kubelet_path }}/plugins_registry/ + type: DirectoryOrCreate + - name: plugin-dir + hostPath: + path: {{ $kubelet_path }}/plugins/csi.vastdata.com + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: {{ $kubelet_path }} + type: Directory + - name: device-dir + hostPath: + path: /dev + {{- include "vastcsi.vmsAuthVolume" (merge (dict "ca_bundle" $ca_bundle) .) | indent 8 }} + {{- if .Values.node.propagateHostMountOptions }} + - name: host-nfsmount-conf-d + hostPath: + path: /etc/nfsmount.conf.d + type: DirectoryOrCreate + {{- end }} diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/serviceaccount.yaml b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/serviceaccount.yaml new file mode 100644 index 00000000..4f2995f0 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/serviceaccount.yaml @@ -0,0 +1,18 @@ + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-vast-controller-sa + namespace: {{ include "vastcsi.namespace" . }} + labels: +{{- include "vastcsi.labels" . | nindent 4 }} + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-vast-node-sa + namespace: {{ include "vastcsi.namespace" . }} + labels: +{{- include "vastcsi.labels" . | nindent 4 }} diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_chart_name.tpl b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_chart_name.tpl new file mode 100644 index 00000000..c9d892c4 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_chart_name.tpl @@ -0,0 +1,5 @@ +{{/*Create chart name and version as used by the chart label.*/}} + +{{- define "vastcsi.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_common_args.tpl b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_common_args.tpl new file mode 100644 index 00000000..bd6f6562 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_common_args.tpl @@ -0,0 +1,4 @@ +{{- define "vastcsi.commonArgs" -}} +- "--csi-address=$(ADDRESS)" +- "--v={{ .Values.logLevel | default 5 }}" +{{- end }} diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_common_env.tpl b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_common_env.tpl new file mode 100644 index 00000000..15be9dac --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_common_env.tpl @@ -0,0 +1,32 @@ +{{- /* +# IMPORTANT: cosi and csi helm charts share similar templates. +# If you make changes to a template in one chart, make sure to replicate those +# changes in the corresponding template in the other chart. +*/}} + +{{- define "vastcsi.commonEnv" }} + +- name: X_CSI_PLUGIN_NAME + value: "csi.vastdata.com" +- name: X_CSI_VMS_HOST + value: {{ $.Values.endpoint | default "" | quote }} +- name: X_CSI_ENABLE_VMS_SSL_VERIFICATION + value: {{ $.Values.verifySsl | quote }} +- name: X_CSI_DELETION_VIP_POOL_NAME + value: {{ $.Values.deletionVipPool | quote }} +- name: X_CSI_DELETION_VIEW_POLICY + value: {{ $.Values.deletionViewPolicy | quote }} +- name: X_CSI_WORKER_THREADS + value: {{ $.Values.numWorkers | quote }} +- name: X_CSI_DONT_USE_TRASH_API + value: {{ $.Values.dontUseTrashApi | quote }} +- name: X_CSI_USE_LOCALIP_FOR_MOUNT + value: {{ $.Values.useLocalIpForMount | quote }} +- name: X_CSI_ATTACH_REQUIRED + value: {{ $.Values.attachRequired | quote }} +{{- if $.Values.truncateVolumeName }} +- name: X_CSI_TRUNCATE_VOLUME_NAME + value: {{ $.Values.truncateVolumeName | quote }} +{{- end }} + +{{- end }} diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_common_namespace.tpl b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_common_namespace.tpl new file mode 100644 index 00000000..4bfb78c8 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_common_namespace.tpl @@ -0,0 +1,3 @@ +{{- define "vastcsi.namespace" -}} +{{- quote (coalesce $.Release.Namespace "vast-csi") -}} +{{- end }} diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_common_selectors_and_labels.tpl b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_common_selectors_and_labels.tpl new file mode 100644 index 00000000..66c69ee1 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_common_selectors_and_labels.tpl @@ -0,0 +1,21 @@ +{{/* Common labels and selectors */}} + +{{- define "vastcsi.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* Common labels */}} +{{- define "vastcsi.labels" -}} +helm.sh/chart: {{ include "vastcsi.chart" . }} +{{ include "vastcsi.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* Common selectors */}} +{{- define "vastcsi.selectorLabels" -}} +app.kubernetes.io/name: {{ include "vastcsi.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_vms_auth.tpl b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_vms_auth.tpl new file mode 100644 index 00000000..4fad2960 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/templates/shared/_vms_auth.tpl @@ -0,0 +1,47 @@ +{{/*Set of templates for working with vms credentials and vms session certificates*/}} + +{{/* Volume declarations for vms credentials and vms session certificates */}} +{{- define "vastcsi.vmsAuthVolume" -}} +{{- if and .Values.sslCert .Values.sslCertsSecretName -}} +{{- + fail (printf "Ambiguous origin of the 'sslCert'. The certificate is found in both the '%s' secret and the command line --from-file argument." .Values.sslCertsSecretName) +-}} +{{- end -}} +{{- if and .ca_bundle (not .Values.verifySsl) -}} + {{- fail "When sslCert is provided `verifySsl` must be set to true." -}} +{{- end }} + +{{- if $.Values.secretName }} +- name: vms-auth + secret: + secretName: {{ $.Values.secretName | quote }} + items: + - key: username + path: username + - key: password + path: password +{{- end }} +{{- if $.ca_bundle }} +- name: vms-ca-bundle + secret: + secretName: {{ $.ca_bundle }} + items: + - key: ca-bundle.crt + path: ca-certificates.crt +{{- end }} +{{- end }} + + +{{/* Volume bindings for vms credentials and vms session certificates */}} +{{ define "vastcsi.vmsAuthVolumeMount" }} +{{- if $.Values.secretName }} +- name: vms-auth + mountPath: /opt/vms-auth + readOnly: true +{{- end }} +{{- if $.ca_bundle }} +- name: vms-ca-bundle + mountPath: /etc/ssl/certs + readOnly: true +{{- end }} +{{- end }} diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/values.schema.yaml b/charts/vastcsi-operator/crd-charts/vastcsidriver/values.schema.yaml new file mode 100644 index 00000000..8da006dd --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/values.schema.yaml @@ -0,0 +1,1999 @@ +description: VastCSIDriver is a common specification for VAST CSI Controller and VAST CSI Node plugins, typically intended for creation in a single instance. + + +spec: + description: Spec defines the desired state of VastCSIDriver + type: object + properties: + # -- Controller -- start -> + controller: + type: object + description: Controller configuration + properties: + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + dnsPolicy: + type: string + default: {{ .Values.controller.dnsPolicy }} + description: DNSPolicy defines how a pod's DNS will be configured. + enum: + - Default + - ClusterFirstWithHostNet + - ClusterFirst + nodeSelector: + additionalProperties: + type: string + description: NodeSelector specifies a map of key-value pairs. For + the pod to be eligible to run on a node, the node must have each + of the indicated key-value pairs as labels. + type: object + resources: + type: object + description: Resource limits and requests for controller components + properties: + csiAttacher: + type: object + properties: + limits: &limits + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: &requests + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + csiProvisioner: + type: object + properties: + limits: + <<: *limits + requests: + <<: *requests + csiResizer: + type: object + properties: + limits: + <<: *limits + requests: + <<: *requests + csiSnapshotter: + type: object + properties: + limits: + <<: *limits + requests: + <<: *requests + csiVastPlugin: + type: object + properties: + limits: + <<: *limits + requests: + <<: *requests + extraArgs: + type: object + properties: + csiAttacher: + type: array + items: + type: string + description: | + Additional arguments for csiAttacher. + For further options, check + https://github.com/kubernetes-csi/external-attacher#command-line-options + csiProvisioner: + type: array + items: + type: string + description: | + Additional arguments for csiProvisioner. + For further options, check + https://github.com/kubernetes-csi/external-provisioner#command-line-options + csiResizer: + type: array + items: + type: string + description: | + Additional arguments for csiResizer. + For further options, check + https://github.com/kubernetes-csi/external-resizer#recommended-optional-arguments + csiSnapshotter: + type: array + items: + type: string + description: | + Additional arguments for csiSnapshotter. + For further options, check + https://github.com/kubernetes-csi/external-snapshotter#csi-external-snapshotter-sidecar-command-line-options + runOnControlPlane: + type: boolean + description: Whether to run on the control plane + default: {{ .Values.controller.runOnControlPlane }} + runOnMaster: + type: boolean + description: Whether to run on the master node + default: {{ .Values.controller.runOnMaster }} + tolerations: + type: array + default: {{ .Values.controller.tolerations }} + description: Tolerations for all the pods deployed by the VASTData CSI Controller. + The pod with this toleration attached will tolerate any taint that matches the + triple using the matching operator . + items: + type: object + properties: + effect: + type: string + description: Effect indicates the taint effect to match. Empty means match + all taint effects. When specified, allowed values are NoSchedule, + PreferNoSchedule and NoExecute. + key: + type: string + description: Key is the taint key that the toleration applies to. Empty means + match all taint keys. If the key is empty, operator must be Exists; this + combination means to match all values and all keys. + operator: + type: string + description: Operator represents a key's relationship to the value. Valid + operators are Exists and Equal. Defaults to Equal. Exists is equivalent to + wildcard for value, so that a pod can tolerate all taints of a particular category. + value: + type: string + description: Value is the taint value the toleration matches to. If the operator + is Exists, the value should be empty, otherwise just a regular string. + tolerationSeconds: + type: integer + description: TolerationSeconds represents the period of time the toleration + (which must be of effect NoExecute, otherwise this field is ignored) tolerates + the taint. By default, it is not set, which means tolerate the taint forever + (do not evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + # -- Controller -- end -> + # -- Node -- start -> + node: + type: object + properties: + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + dnsPolicy: + default: {{ .Values.node.dnsPolicy }} + description: DNSPolicy defines how a pod's DNS will be configured. + type: string + enum: + - Default + - ClusterFirstWithHostNet + - ClusterFirst + nodeSelector: + additionalProperties: + type: string + description: NodeSelector specifies a map of key-value pairs. For + the pod to be eligible to run on a node, the node must have each + of the indicated key-value pairs as labels. + type: object + tolerations: + type: array + default: {{ .Values.node.tolerations }} + description: Tolerations for all the pods deployed by the VASTData CSI Node. + The pod with this toleration attached will tolerate any taint that matches the + triple using the matching operator . + items: + type: object + properties: + effect: + type: string + description: Effect indicates the taint effect to match. Empty means match + all taint effects. When specified, allowed values are NoSchedule, + PreferNoSchedule and NoExecute. + key: + type: string + description: Key is the taint key that the toleration applies to. Empty means + match all taint keys. If the key is empty, operator must be Exists; this + combination means to match all values and all keys. + operator: + type: string + description: Operator represents a key's relationship to the value. Valid + operators are Exists and Equal. Defaults to Equal. Exists is equivalent to + wildcard for value, so that a pod can tolerate all taints of a particular category. + value: + type: string + description: Value is the taint value the toleration matches to. If the operator + is Exists, the value should be empty, otherwise just a regular string. + tolerationSeconds: + type: integer + description: TolerationSeconds represents the period of time the toleration + (which must be of effect NoExecute, otherwise this field is ignored) tolerates + the taint. By default, it is not set, which means tolerate the taint forever + (do not evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + propagateHostMountOptions: + description: >- + the default host /etc/nfsmount.d mount configuration directory as source for mount options + https://man7.org/linux/man-pages/man5/nfsmount.conf.5.html + default: {{ .Values.node.propagateHostMountOptions }} + type: boolean + resources: + type: object + description: Resource limits and requests for node components + properties: + csiVastPlugin: + type: object + properties: + limits: + <<: *limits + requests: + <<: *requests + nodeDriverRegistrar: + type: object + properties: + limits: + <<: *limits + requests: + <<: *requests + # -- Node -- end -> + # -- images -- start -> + image: + description: Images used for components + type: object + properties: + csiAttacher: + type: object + properties: + imagePullPolicy: + default: {{ .Values.image.csiAttacher.imagePullPolicy }} + type: string + enum: + - Always + - IfNotPresent + - Never + repository: + default: {{ .GlobalValues.overrides.csiAttacher.repository }} + type: string + csiNodeDriverRegistrar: + type: object + properties: + imagePullPolicy: + default: {{ .Values.image.csiNodeDriverRegistrar.imagePullPolicy }} + type: string + enum: + - Always + - IfNotPresent + - Never + repository: + default: {{ .GlobalValues.overrides.csiNodeDriverRegistrar.repository }} + type: string + csiProvisioner: + type: object + properties: + imagePullPolicy: + default: {{ .Values.image.csiProvisioner.imagePullPolicy }} + type: string + enum: + - Always + - IfNotPresent + - Never + repository: + default: {{ .GlobalValues.overrides.csiProvisioner.repository }} + type: string + csiResizer: + type: object + properties: + imagePullPolicy: + default: {{ .Values.image.csiResizer.imagePullPolicy }} + type: string + enum: + - Always + - IfNotPresent + - Never + repository: + default: {{ .GlobalValues.overrides.csiResizer.repository }} + type: string + csiSnapshotter: + type: object + properties: + imagePullPolicy: + default: {{ .Values.image.csiSnapshotter.imagePullPolicy }} + type: string + enum: + - Always + - IfNotPresent + - Never + repository: + default: {{ .GlobalValues.overrides.csiSnapshotter.repository }} + type: string + csiVastPlugin: + type: object + properties: + imagePullPolicy: + default: {{ .Values.image.csiVastPlugin.imagePullPolicy }} + type: string + enum: + - Always + - IfNotPresent + - Never + repository: + default: {{ .GlobalValues.overrides.csiVastPlugin.repository }} + type: string + # -- images -- end -> + deletionViewPolicy: + default: {{ .Values.deletionViewPolicy }} + description: Dedicated view policy to delete volumes. Driver-wide option. Preferably, utilize the Trash API for volume deletion. If using multiple clusters, ensure a view policy with the same name is created for each cluster. + type: string + deletionVipPool: + default: {{ .Values.deletionVipPool }} + description: Dedicated VIP pool to delete volumes. Driver-wide option. Preferably, utilize the Trash API for volume deletion. If using multiple clusters, ensure a VIP pool with the same name is created for each cluster. + type: string + dontUseTrashApi: + default: {{ .Values.dontUseTrashApi }} + description: Whether to use Trash API + type: boolean + attachRequired: + type: boolean + default: {{ .Values.attachRequired }} + description: |- + Indicates whether this CSI driver requires an attach operation, implementing ControllerPublishVolume. + If set to false, the driver will perform ControllerPublishVolume as part of NodePublishVolume operation, + potentially speeding up volume attachment where HTTP/HTTPS ports are not open. + useLocalIpForMount: + description: Use this local IP address for mounting, when the StorageClass does not define a vipPool. This is useful for DPU-based deployments. + type: string + default: "" + secretName: + description: |- + Name of the global secret that holds VAST credentials. + For improved granularity per StorageClass, it's advisable to define the secret as a VastStorage CRD. + type: string + default: {{ .Values.secretName }} + deprecated: true + endpoint: + description: API endpoint of VAST appliance - should be provided by user if "global" secretName is provided. + type: string + default: {{ .Values.endpoint }} + deprecated: true + sslCertsSecretName: + description: |- + Name of the secret that holds the SSL certificates for the VAST appliance. + For improved granularity per StorageClass, it's advisable to define the SSL certificate as a part VastStorage CRD. + type: string + default: {{ .Values.sslCertsSecretName }} + deprecated: true + sslCert: + description: Path (absolute or relative) to SSL certificate for verifying the VAST REST API. + type: string + default: {{ .Values.sslCert }} + deprecated: true + verifySsl: + type: boolean + default: {{ .Values.verifySsl }} + numWorkers: + description: The number of worker threads the CSI plugin use to serve requests simultaneously. + type: integer + format: int32 + default: {{ .Values.numWorkers }} + operationTimeout: + description: Timeout for all Remote Procedure Call (RPC) requests to the CSI driver. + type: integer + format: int32 + default: {{ .Values.operationTimeout }} + operationRetryIntervalStart: + description: |- + Each time a failure occurs, sidecar containers initiate retries + but only after waiting for 'operationRetryIntervalStart' seconds + which then doubles with each subsequent failure until it reaches `operationRetryIntervalMax` + type: integer + format: int32 + default: {{ .Values.operationRetryIntervalStart }} + operationRetryIntervalMax: + description: Maximum interval between attempts. + type: integer + format: int32 + default: {{ .Values.operationRetryIntervalMax }} + truncateVolumeName: + description: Truncate VAST quota name if name length is greater than this number. set truncateVolumeName as null to disable truncation. + type: integer + format: int32 + default: {{ .Values.truncateVolumeName }} + applySecurityContextConstraints: + description: >- + Flag specifies whether to enforce the defined security context constraints for pods + using specified service accounts in OpenShift, essential for ensuring proper permissions for mounting volumes. + type: boolean + default: {{ .Values.applySecurityContextConstraints }} + imagePullSecrets: + description: Image pull secrets for the CSI driver container image. + {{- if .GlobalValues.imagePullSecret }} + default: + - name: {{ .GlobalValues.imagePullSecret }} + {{- end }} + type: array + items: + properties: + name: + type: string + type: object + x-kubernetes-list-type: atomic + kubeletPath: + default: {{ .Values.kubeletPath }} + type: string + description: Path to kubelet directory + logLevel: + default: {{ .Values.logLevel }} + type: integer + minimum: 0 + maximum: 5 + description: |- + The logging level of deployed containers expressed as an integer + from 0 (low detail) to 5 (high detail). 0 only logs errors. 3 logs most + RPC requests/responses and some detail about driver actions. + +specDescriptors: + # Controller + - description: Affinity, resource constraints, DNS policy, and other setting for Controller components. + displayName: Controller runtime configuration settings + path: controller + - description: NodeAffinity specifies the nodes where a pod can be scheduled based on node labels. + displayName: nodeAffinity + path: controller.nodeAffinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: PodAffinity defines rules for scheduling pods based on the labels of other pods already running on nodes within the Kubernetes cluster. + displayName: podAffinity + path: controller.podAffinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:podAffinity' + - description: PodAntiAffinity defines rules for scheduling pods to avoid placing them on nodes where other pods with certain labels are already running within the Kubernetes cluster. + displayName: podAntiAffinity + path: controller.podAntiAffinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:podAntiAffinity' + - description: Controller tolerations + displayName: Tolerations + path: controller.tolerations + - description: NodeSelector specifies a map of key-value pairs. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels. + displayName: nodeSelector + path: controller.nodeSelector + - description: Controller dnsPolicy + displayName: dnsPolicy + path: controller.dnsPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:select:Default' + - 'urn:alm:descriptor:com.tectonic.ui:select:ClusterFirstWithHostNet' + - 'urn:alm:descriptor:com.tectonic.ui:select:ClusterFirst' + - description: 'Adds node-role.kubernetes.io/master: "" taint to nodeSelector list' + displayName: runOnMaster + path: controller.runOnMaster + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + - description: 'Adds node-role.kubernetes.io/control-plane: "" taint to nodeSelector list' + displayName: runOnControlPlane + path: controller.runOnControlPlane + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + - description: resources for VAST CSI driver container image. + displayName: resources for VAST CSI driver container image + path: controller.resources.csiVastPlugin + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:resourceRequirements' + - description: resources for CSI Attacher container image. + displayName: resources for CSI Attacher container image + path: controller.resources.csiAttacher + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:resourceRequirements' + - description: resources for CSI Provisioner container image. + displayName: resources for CSI Provisioner container image + path: controller.resources.csiProvisioner + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:resourceRequirements' + - description: resources for CSI Resizer container image. + displayName: resources for CSI Resizer container image + path: controller.resources.csiResizer + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:resourceRequirements' + - description: resources for CSI Snapshotter container image. + displayName: resources for CSI Snapshotter container image + path: controller.resources.csiSnapshotter + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:resourceRequirements' + # Node + - description: Affinity, resource constraints, DNS policy, and other setting for Node components. + displayName: Node runtime configuration settings + path: node + - description: NodeAffinity specifies the nodes where a pod can be scheduled based on node labels. + displayName: nodeAffinity + path: node.nodeAffinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: PodAffinity defines rules for scheduling pods based on the labels of other pods already running on nodes within the Kubernetes cluster. + displayName: podAffinity + path: node.podAffinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:podAffinity' + - description: PodAntiAffinity defines rules for scheduling pods to avoid placing them on nodes where other pods with certain labels are already running within the Kubernetes cluster. + displayName: podAntiAffinity + path: node.podAntiAffinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:podAntiAffinity' + - description: Node tolerations + displayName: Tolerations + path: node.tolerations + - description: NodeSelector specifies a map of key-value pairs. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels. + displayName: nodeSelector + path: node.nodeSelector + - description: Node dnsPolicy + displayName: dnsPolicy + path: node.dnsPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:select:Default' + - 'urn:alm:descriptor:com.tectonic.ui:select:ClusterFirstWithHostNet' + - 'urn:alm:descriptor:com.tectonic.ui:select:ClusterFirst' + - description: resources for VAST CSI driver container image. + displayName: resources for VAST CSI driver container image + path: node.resources.csiVastPlugin + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:resourceRequirements' + - description: resources for CSI nodeDriverRegistrar container image. + displayName: resources for CSI nodeDriverRegistrar container image + path: node.resources.nodeDriverRegistrar + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:resourceRequirements' + - description: Enable the default host `/etc/nfsmount.d` mount configuration directory as source for mount options. + displayName: propagateHostMountOptions + path: node.propagateHostMountOptions + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + # Images + - description: Docker images used for CSI configuration and associated image pull policies. + displayName: CSI images + path: image + - description: Vast CSI driver container image to use. + displayName: Vast CSI driver container image + path: image.csiVastPlugin.repository + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - description: Image pull policy for Vast CSI driver container image. + displayName: Image pull policy for Vast CSI driver container image + path: image.csiVastPlugin.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: CSI Attacher container image to use. + displayName: CSI Attacher container image + path: image.csiAttacher.repository + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - description: Image pull policy for CSI Attacher container image. + displayName: Image pull policy for CSI Attacher container image + path: image.csiAttacher.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: CSI NodeDriverRegistrar container image to use. + displayName: CSI NodeDriverRegistrar container image + path: image.csiNodeDriverRegistrar.repository + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - description: Image pull policy for CSI NodeDriverRegistrar container image. + displayName: Image pull policy for CSI NodeDriverRegistrar container image + path: image.csiNodeDriverRegistrar.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: CSI Provisioner container image to use. + displayName: CSI Provisioner container image + path: image.csiProvisioner.repository + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - description: Image pull policy for CSI Provisioner container image. + displayName: Image pull policy for CSI Provisioner container image + path: image.csiProvisioner.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: CSI Resizer container image to use. + displayName: CSI Resizer container image + path: image.csiResizer.repository + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - description: Image pull policy for CSI Resizer container image. + displayName: Image pull policy for CSI Resizer container image + path: image.csiResizer.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: CSI Snapshotter container image to use. + displayName: CSI Snapshotter container image + path: image.csiSnapshotter.repository + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - description: Image pull policy for CSI Snapshotter container image. + displayName: Image pull policy for CSI Snapshotter container image + path: image.csiSnapshotter.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + # Top Level + - description: Whether to verify SSL. This option applies to all sessions across all clusters. + displayName: verifySsl + path: verifySsl + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + - description: Use this local IP address for mounting, when the StorageClass does not define a vipPool. This is useful for DPU-based deployments. + displayName: useLocalIpForMount + path: useLocalIpForMount + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - description: >- + When the flag is set to 'true', the controller will opt to using a local mount for deleting data from discarded volumes, + as opposed to sending the request to the VMS over REST. + Please contact VAST Support before modifying this setting. + displayName: dontUseTrashApi + path: dontUseTrashApi + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + - description: >- + Flag specifies whether to enforce the defined security context constraints for pods + using specified service accounts in OpenShift, essential for ensuring proper permissions for mounting volumes. + displayName: applySecurityContextConstraints + path: applySecurityContextConstraints + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + # Hidden fields. The field(s) will be hidden on the UI but can still be accessed through the YAML editor. + # Delete "hidden" descriptor if you think field should be visible. + - description: |- + Name of the global secret that holds VAST credentials. + For improved granularity per StorageClass, it's advisable to define the secret as a VastStorage CRD. + displayName: secretName + path: secretName + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:hidden' + - description: API endpoint of VAST appliance - should be provided by user if "global" secretName is provided. + displayName: endpoint + path: endpoint + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:hidden' + - description: |- + Name of the secret that holds the SSL certificates for the VAST appliance. + For improved granularity per StorageClass, it's advisable to define the SSL certificate as a part VastStorage CRD. + displayName: sslCertsSecretName + path: sslCertsSecretName + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:hidden' + - description: Path (absolute or relative) to SSL certificate for verifying the VAST REST API. + displayName: sslCert + path: sslCert + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:hidden' + - description: |- + Indicates this CSI driver requires an attachment operation as it implements the ControllerPublishVolume + if set to false, the driver will perform ControllerPublishVolume as a part of NodePublishVolume op + which might speed up the volume attach operation significantly but not appropriate for workload nodes where + http/https ports are not open. + displayName: attachRequired + path: attachRequired + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + - 'urn:alm:descriptor:com.tectonic.ui:hidden' + - description: Global deletionViewPolicy + displayName: deletionViewPolicy + path: deletionViewPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:hidden' + - description: Global deletionVipPool + displayName: deletionVipPool + path: deletionVipPool + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:hidden' + - description: >- + The logging level of deployed containers expressed as an integer + from 0 (low detail) to 5 (high detail). 0 only logs errors. 3 logs most + RPC requests/responses and some detail about driver actions. + displayName: logLevel + path: logLevel + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:hidden' + - description: |- + Each time a failure occurs, sidecar containers initiate retries + but only after waiting for 'operationRetryIntervalStart' seconds + which then doubles with each subsequent failure until it reaches `operationRetryIntervalMax`. + displayName: operationRetryIntervalStart + path: operationRetryIntervalStart + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:hidden' + - description: Maximum interval between attempts. + displayName: operationRetryIntervalMax + path: operationRetryIntervalMax + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:hidden' + - description: Truncate VAST quota name if name length is greater than this number. set truncateVolumeName as null to disable truncation. + displayName: truncateVolumeName + path: truncateVolumeName + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:hidden' + + +example: + apiVersion: storage.vastdata.com/v1 + kind: VastCSIDriver + metadata: + name: vastcsidriver + spec: + controller: + podAffinity: {} + podAntiAffinity: {} + nodeAffinity: {} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + nodeSelector: {} + tolerations: {{ .Values.controller.tolerations }} + runOnControlPlane: {{ .Values.controller.runOnControlPlane }} + runOnMaster: {{ .Values.controller.runOnMaster }} + resources: + csiAttacher: + limits: + memory: {{ .Values.controller.resources.csiAttacher.limits.memory }} + requests: + cpu: {{ .Values.controller.resources.csiAttacher.requests.cpu }} + memory: {{ .Values.controller.resources.csiAttacher.requests.memory }} + csiProvisioner: + limits: + memory: {{ .Values.controller.resources.csiProvisioner.limits.memory }} + requests: + cpu: {{ .Values.controller.resources.csiProvisioner.requests.cpu }} + memory: {{ .Values.controller.resources.csiProvisioner.requests.memory }} + csiResizer: + limits: + memory: {{ .Values.controller.resources.csiResizer.limits.memory }} + requests: + cpu: {{ .Values.controller.resources.csiResizer.requests.cpu }} + memory: {{ .Values.controller.resources.csiResizer.requests.memory }} + csiSnapshotter: + limits: + memory: {{ .Values.controller.resources.csiSnapshotter.limits.memory }} + requests: + cpu: {{ .Values.controller.resources.csiSnapshotter.requests.cpu }} + memory: {{ .Values.controller.resources.csiSnapshotter.requests.memory }} + csiVastPlugin: + limits: + memory: {{ .Values.controller.resources.csiVastPlugin.limits.memory }} + requests: + cpu: {{ .Values.controller.resources.csiVastPlugin.requests.cpu }} + memory: {{ .Values.controller.resources.csiVastPlugin.requests.memory }} + extraArgs: + csiProvisioner: {{ .Values.controller.extraArgs.csiProvisioner }} + csiSnapshotter: {{ .Values.controller.extraArgs.csiSnapshotter }} + csiAttacher: {{ .Values.controller.extraArgs.csiAttacher }} + csiResizer: {{ .Values.controller.extraArgs.csiResizer }} + node: + podAffinity: {} + podAntiAffinity: {} + nodeAffinity: {} + dnsPolicy: {{ .Values.node.dnsPolicy }} + nodeSelector: {} + tolerations: {{ .Values.node.tolerations }} + propagateHostMountOptions: {{ .Values.node.propagateHostMountOptions }} + resources: + csiVastPlugin: + limits: + memory: {{ .Values.node.resources.csiVastPlugin.limits.memory }} + requests: + cpu: {{ .Values.node.resources.csiVastPlugin.requests.cpu }} + memory: {{ .Values.node.resources.csiVastPlugin.requests.memory }} + nodeDriverRegistrar: + limits: + memory: {{ .Values.node.resources.nodeDriverRegistrar.limits.memory }} + requests: + cpu: {{ .Values.node.resources.nodeDriverRegistrar.requests.cpu }} + memory: {{ .Values.node.resources.nodeDriverRegistrar.requests.memory }} + image: + csiAttacher: + imagePullPolicy: {{ .Values.image.csiAttacher.imagePullPolicy }} + repository: {{ .GlobalValues.overrides.csiAttacher.repository }} + csiNodeDriverRegistrar: + imagePullPolicy: {{ .Values.image.csiNodeDriverRegistrar.imagePullPolicy }} + repository: {{ .GlobalValues.overrides.csiNodeDriverRegistrar.repository }} + csiProvisioner: + imagePullPolicy: {{ .Values.image.csiProvisioner.imagePullPolicy }} + repository: {{ .GlobalValues.overrides.csiProvisioner.repository }} + csiResizer: + imagePullPolicy: {{ .Values.image.csiResizer.imagePullPolicy }} + repository: {{ .GlobalValues.overrides.csiResizer.repository }} + csiSnapshotter: + imagePullPolicy: {{ .Values.image.csiSnapshotter.imagePullPolicy }} + repository: {{ .GlobalValues.overrides.csiSnapshotter.repository }} + csiVastPlugin: + imagePullPolicy: {{ .Values.image.csiVastPlugin.imagePullPolicy }} + repository: {{ .GlobalValues.overrides.csiVastPlugin.repository }} + deletionViewPolicy: {{ .Values.deletionViewPolicy }} + deletionVipPool: {{ .Values.deletionVipPool }} + dontUseTrashApi: {{ .Values.dontUseTrashApi }} + imagePullSecrets: {{ empty .GlobalValues.imagePullSecret | ternary "[]" (printf "[{\"name\": \"%s\"}]" .GlobalValues.imagePullSecret) }} + kubeletPath: {{ .Values.kubeletPath }} + logLevel: {{ .Values.logLevel }} + numWorkers: {{ .Values.numWorkers }} + operationRetryIntervalMax: {{ .Values.operationRetryIntervalMax }} + operationRetryIntervalStart: {{ .Values.operationRetryIntervalStart }} + operationTimeout: {{ .Values.operationTimeout }} + truncateVolumeName: {{ .Values.truncateVolumeName }} + useLocalIpForMount: {{ .Values.useLocalIpForMount }} + verifySsl: {{ .Values.verifySsl }} + secretName: {{ .Values.secretName }} + endpoint: {{ .Values.endpoint }} + sslCertsSecretName: {{ .Values.sslCertsSecretName }} + attachRequired: {{ .Values.attachRequired }} + applySecurityContextConstraints: {{ .Values.applySecurityContextConstraints }} diff --git a/charts/vastcsi-operator/crd-charts/vastcsidriver/values.yaml b/charts/vastcsi-operator/crd-charts/vastcsidriver/values.yaml new file mode 100644 index 00000000..7938f39f --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vastcsidriver/values.yaml @@ -0,0 +1,236 @@ +#################### +# VAST REST SESSION ATTRIBUTES +#################### + +# Secret name, which corresponds to a secret containing credentials to login - should be provided by user if secretName is not provided in StorageClass attributes +# Secret must contain username and password fields +# Example: kubectl create secret generic vast-mgmt --from-literal=username='< VAST username >' --from-literal=password='< VAST password >' +# NOTE: This field is backwards compatibility purposes only, and should not be used otherwise +secretName: "" +# API endpoint of VAST appliance - should be provided by user if secretName is not provided in StorageClass attributes +# NOTE: This field is backwards compatibility purposes only, and should not be used otherwise +endpoint: "" +# Path (absolute or relative) to SSL certificate for verifying the VAST REST API. +# Must be set using `set-file` option eg `--set-file sslCert=< path to sslCert.crt >` +# sslCertsSecretName secret and sslCert option in values.yaml are mutually exclusive. Make sure to use only one of them. +# NOTE: This field is backwards compatibility purposes only, and should not be used otherwise +sslCert: "" +# Secret name, which corresponds to a secret containing an SSL certificate for verifying the VAST REST API +# Example: kubectl create secret generic vast-tls --from-file=ca-bundle.crt=< path to sslCert.crt > +# sslCertsSecretName secret and sslCert option in values.yaml are mutually exclusive. Make sure to use only one of them. +# NOTE: This field is backwards compatibility purposes only, and should not be used otherwise +sslCertsSecretName: "" +# Set true to enable certificate validity test +verifySsl: false + +#################### +# DELETE VOLUMES +#################### +# Dedicated vip pool to delete volumes. Can have the same value as regular StorageClass option `vipPool` +# - value is not required when VAST cluster version is 4.6.0 onwards and `DontUseTrashApi` flag is set to false +deletionVipPool: "" +# Dedicated view policy to delete volumes. Can have the same value as regular StorageClass option `viewPolicy` +# - value is not required when VAST cluster version is 4.6.0 onwards and `DontUseTrashApi` flag is set to false +deletionViewPolicy: "" +# When the flag is set to 'true', the controller will opt to using a local mount for deleting data from discarded volumes, +# as opposed to sending the request to the VMS over REST. +# Please contact VAST Support before modifying this setting. +dontUseTrashApi: false +# Use this local IP address for mounting, when the StorageClass does not define a vipPool. +# This is useful for DPU-based deployments. +useLocalIpForMount: "" + +#################### +# VAST PROVISIONER RUNTIME PARAMETERS +#################### + +# The number of worker threads the CSI plugin use to serve requests simultaneously. +numWorkers: 10 +# Timeout of all calls to CSI driver. +operationTimeout: 15 +# Each time a failure occurs, sidecar containers initiate retries +# but only after waiting for 'operationRetryIntervalStart' seconds +# which then doubles with each subsequent failure until it reaches `operationRetryIntervalMax` +operationRetryIntervalStart: 10 + +# Maximum interval between attempts. +operationRetryIntervalMax: 60 + +# Truncate VAST quota name if name length is greater than this number. +# set `truncateVolumeName: null` to disable truncation. +truncateVolumeName: 64 + +# indicates this CSI driver requires an attachment operation as it implements the `ControllerPublishVolume` +# if set to false, the driver will perform `ControllerPublishVolume` as a part of NodePublishVolume op +# which might speed up the volume attach operation significantly but not appropriate for workload nodes where +# http/https ports are not open. +attachRequired: true + + +#################### +# VAST CONTROLLER AND NODE IMAGE SPECIFICATION +#################### + +image: + csiVastPlugin: + repository: null # User-defined repository; if not provided, falls back to defaultRepository + defaultRepository: null # Overrides repository globally from charts/vastcsi-operator/values.yaml (overrides.csiVastPlugin.repository) + imagePullPolicy: IfNotPresent + csiAttacher: + repository: null # User-defined repository; if not provided, falls back to defaultRepository + defaultRepository: null # Overrides repository globally from charts/vastcsi-operator/values.yaml (overrides.csiAttacher.repository) + imagePullPolicy: IfNotPresent + csiNodeDriverRegistrar: + repository: null # User-defined repository; if not provided, falls back to defaultRepository + defaultRepository: null # Overrides repository globally from charts/vastcsi-operator/values.yaml (overrides.csiNodeDriverRegistrar.repository) + imagePullPolicy: IfNotPresent + csiProvisioner: + repository: null # User-defined repository; if not provided, falls back to defaultRepository + defaultRepository: null # Overrides repository globally from charts/vastcsi-operator/values.yaml (overrides.csiProvisioner.repository) + imagePullPolicy: IfNotPresent + csiResizer: + repository: null # User-defined repository; if not provided, falls back to defaultRepository + defaultRepository: null # Overrides repository globally from charts/vastcsi-operator/values.yaml (overrides.csiResizer.repository) + imagePullPolicy: IfNotPresent + csiSnapshotter: + repository: null # User-defined repository; if not provided, falls back to defaultRepository + defaultRepository: null # Overrides repository globally from charts/vastcsi-operator/values.yaml (overrides.csiSnapshotter.repository) + imagePullPolicy: IfNotPresent + +#################### +# VAST CONTROLLER AND NODE BEHAVIOR +# +# WARNING - these parameters are for advanced users. +# Setting these incorrectly may prevent the VAST CSI Driver from running correctly. +# We recommend to consult with VAST Support before changing any of the following parameters +#################### + +controller: + # runOnMaster flag indicated if CSI Controller should be run on master. + runOnMaster: false + # runOnControlPlane flag indicated if CSI Controller should be run on control plane node. + runOnControlPlane: false + # determine how DNS (Domain Name System) resolution should be handled within Pod. + # available values: Default, ClusterFirstWithHostNet, ClusterFirst + dnsPolicy: Default + # nodeSelector is the way to restrict pod to be assigned on certain node/nodes. + # Specify node selector if you want node and controller containers to be assigned only to specific node/nodes of + # your cluster. + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector. + nodeSelector: {} + # If specified, the pod's tolerations + # https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # Allows to specify which nodes your pod is eligible to be scheduled based on labels on pods that are already running on the node. + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + podAffinity: {} + # Allows to specify conditions for preventing pods from being scheduled on nodes where certain labels are already present on other pods. + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + podAntiAffinity: {} + # Allows to specify which nodes your pod is eligible to be scheduled on based on labels on the node + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + nodeAffinity: {} + # Resources describes the compute resource requirements. + resources: + csiProvisioner: + limits: + memory: 400Mi + requests: + cpu: 10m + memory: 20Mi + csiSnapshotter: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + csiAttacher: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + csiResizer: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + csiVastPlugin: + limits: + memory: 400Mi + requests: + cpu: 100m + memory: 50Mi + extraArgs: + # For further options, check + # https://github.com/kubernetes-csi/external-provisioner#command-line-options + # Example: + # controller: + # extraArgs: + # csiProvisioner: + # - kube-api-qps=500 + # - kube-api-burst=1000 + csiProvisioner: [] + # For further options, check + # https://github.com/kubernetes-csi/external-snapshotter#csi-external-snapshotter-sidecar-command-line-options + csiSnapshotter: [] + # For further options, check + # https://github.com/kubernetes-csi/external-attacher#command-line-options + csiAttacher: [] + # For further options, check + # https://github.com/kubernetes-csi/external-resizer#recommended-optional-arguments + csiResizer: [] + # priorityClassName is the name of priority class to be used for the pod. + # priorityClassName is the name of priority class to be used for the pod. + priorityClassName: system-cluster-critical + +node: + # See controller.dnsPolicy section for details + dnsPolicy: Default + # See controller.nodeSelector section for details + nodeSelector: {} + # See controller.tolerations section for details + tolerations: [] + # See controller.podAffinity section for details + podAffinity: {} + # See controller.podAntiAffinity section for details + podAntiAffinity: {} + # See controller.podAffinity section for details + nodeAffinity: {} + # the default host `/etc/nfsmount.d` mount configuration directory as source for mount options + # https://man7.org/linux/man-pages/man5/nfsmount.conf.5.html + propagateHostMountOptions: true + # see controller.resources section for details + resources: + nodeDriverRegistrar: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + csiVastPlugin: + limits: + memory: 400Mi + requests: + cpu: 100m + memory: 50Mi + # See controller.priorityClassName section for details + priorityClassName: system-cluster-critical + +# The path to the kubelet root dir. must be provided when Kubernetes is not installed in its default directory. +kubeletPath: "/var/lib/kubelet" +# Reference to one or more secrets to be used when pulling images +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +# - name: "image-pull-secret" +imagePullSecrets: [] +# Log level of CSI plugin +logLevel: 5 + +#################### +# OPENSHIFT SETTINGS +#################### + +# Flag specifies whether to enforce the defined security context constraints for pods using specified +# service accounts in OpenShift, essential for ensuring proper permissions for mounting volumes. +applySecurityContextConstraints: true diff --git a/charts/vastcsi-operator/crd-charts/vaststorage/.helmignore b/charts/vastcsi-operator/crd-charts/vaststorage/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vaststorage/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/vastcsi-operator/crd-charts/vaststorage/Chart.yaml b/charts/vastcsi-operator/crd-charts/vaststorage/Chart.yaml new file mode 100644 index 00000000..b9b87860 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vaststorage/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +description: Helm chart for Deployment of VAST Container Storage Interface (CSI) +keywords: + - vast + - csi + - driver + - vastdata + - csi-driver +name: vaststorage +type: application +version: 0.1.0 diff --git a/charts/vastcsi-operator/crd-charts/vaststorage/templates/_helpers.tpl b/charts/vastcsi-operator/crd-charts/vaststorage/templates/_helpers.tpl new file mode 100644 index 00000000..7e2aabab --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vaststorage/templates/_helpers.tpl @@ -0,0 +1,29 @@ +{{/* Create chart name and version as used by the chart label. */}} +{{- define "vastcsi.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{- define "vastcsi.namespace" -}} +{{- coalesce $.Release.Namespace "vast-csi" | quote -}} +{{- end }} + +{{/* Common labels and selectors */}} +{{- define "vastcsi.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* Common labels */}} +{{- define "vastcsi.labels" -}} +helm.sh/chart: {{ include "vastcsi.chart" . }} +{{ include "vastcsi.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* Common selectors */}} +{{- define "vastcsi.selectorLabels" -}} +app.kubernetes.io/name: {{ include "vastcsi.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/charts/vastcsi-operator/crd-charts/vaststorage/templates/snapshot-class.yaml b/charts/vastcsi-operator/crd-charts/vaststorage/templates/snapshot-class.yaml new file mode 100644 index 00000000..ef7a44a6 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vaststorage/templates/snapshot-class.yaml @@ -0,0 +1,38 @@ +{{- if eq (toString .Values.createSnapshotClass) "true" }} + +{{- $is_default_class := .Values.snapshotClass.setDefaultSnapshotClass | default true | quote -}} +{{- $snapshot_name_fmt := .Values.snapshotClass.snapshotNameFormat -}} +{{- $deletion_policy := .Values.snapshotClass.deletionPolicy -}} + +{{/* Check secret presence in deployment namespaces and find out */}} +{{- $secret := .Values.clusterName -}} +{{- $secret_namespace := .Release.Namespace -}} + +{{- if not $secret -}} +{{- fail "clusterName is required value. Please specify valid clusterName" -}} +{{- end }} + +{{- if .Release.IsInstall -}} +{{- if not (lookup "v1" "Secret" $secret_namespace $secret) -}} +{{- fail (printf "cluster '%s' doesn't exist in namespace '%s' or doesn't have underlying secret." .Values.clusterName .Release.Namespace) -}} +{{- end -}} +{{- end -}} + +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshotClass +metadata: + name: {{ .Release.Name }} + namespace: {{ include "vastcsi.namespace" $ }} + annotations: + snapshot.storage.kubernetes.io/is-default-class: {{ $is_default_class }} + labels: + {{- include "vastcsi.labels" $ | nindent 4 }} +driver: csi.vastdata.com +deletionPolicy: {{ $deletion_policy }} +parameters: + snapshot_name_fmt: {{ $snapshot_name_fmt }} + csi.storage.k8s.io/snapshotter-secret-name: {{ $secret }} + csi.storage.k8s.io/snapshotter-secret-namespace: {{ $secret_namespace }} +--- + +{{- end }} diff --git a/charts/vastcsi-operator/crd-charts/vaststorage/templates/storage-class.yaml b/charts/vastcsi-operator/crd-charts/vaststorage/templates/storage-class.yaml new file mode 100644 index 00000000..bee2ec17 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vaststorage/templates/storage-class.yaml @@ -0,0 +1,79 @@ +{{/* Validate storagePath parameter. Parameter should not be an empty string. */}} +{{- $is_default_class := .Values.setDefaultStorageClass | default false | quote -}} +{{- $storage_path := .Values.storagePath -}} +{{- if not $storage_path -}} + {{- fail "storagePath is required value. Please specify valid root export path" -}} +{{- end }} + +{{/* Validate viewPolicy parameter. Parameter should not be an empty string. */}} +{{- $view_policy := .Values.viewPolicy -}} +{{- if not $view_policy -}} + {{- fail "viewPolicy is required value. Please specify valid policy name" -}} +{{- end }} + +{{/* Validate vipPool and vipPoolFQDN parameters. Only one of them should be provided. */}} +{{- $vip_pool_name := .Values.vipPool -}} +{{- $vip_pool_fqdn := .Values.vipPoolFQDN -}} +{{- if (and $vip_pool_name $vip_pool_fqdn) -}} + {{- fail (printf "vipPool and vipPoolFQDN are mutually exclusive in the StorageClass '%s' parameters. Do not set a default value from storageDefaults for either field; choose only one to specify." .Release.Name) -}} +{{- end }} + +{{- $volume_name_fmt := .Values.volumeNameFormat -}} +{{- $eph_volume_name_fmt := .Values.ephemeralVolumeNameFormat -}} +{{- $qos_policy := .Values.qosPolicy -}} +{{- $mount_options := .Values.mountOptions -}} +{{- $reclaim_policy := .Values.reclaimPolicy -}} +{{- $allow_volume_expansion := .Values.allowVolumeExpansion | quote | mustRegexMatch "true" | ternary true false -}} + +{{/* Check secret presence in deployment namespaces and find out */}} +{{- $secret := .Values.clusterName -}} +{{- $secret_namespace := .Release.Namespace -}} + +{{- if not $secret -}} + {{- fail "clusterName is required value. Please specify valid clusterName" -}} +{{- end }} + +{{- if .Release.IsInstall -}} +{{- if not (lookup "v1" "Secret" $secret_namespace $secret) -}} + {{- fail (printf "cluster '%s' doesn't exist in namespace '%s' or doesn't have underlying secret." .Values.clusterName .Release.Namespace) -}} +{{- end -}} +{{- end -}} + +kind: StorageClass +apiVersion: storage.k8s.io/v1 +provisioner: csi.vastdata.com +metadata: + name: {{ .Release.Name }} + namespace: {{ include "vastcsi.namespace" $ }} + annotations: + storageclass.kubernetes.io/is-default-class: {{ $is_default_class }} + labels: + {{- include "vastcsi.labels" $ | nindent 4 }} +reclaimPolicy: {{ $reclaim_policy }} +parameters: + root_export: {{ $storage_path }} + view_policy: {{ $view_policy }} + volume_name_fmt: {{ $volume_name_fmt }} + eph_volume_name_fmt: {{ $eph_volume_name_fmt }} +{{- range $key, $value := dict "vip_pool_name" $vip_pool_name "vip_pool_fqdn" $vip_pool_fqdn "qos_policy" $qos_policy }} + {{- if and $value (ne $value "") }} + {{ $key }}: {{ if (kindIs "int" $value) }}{{ $value | quote }}{{ else }}{{ $value }}{{ end }} + {{- end }} +{{- end }} + csi.storage.k8s.io/provisioner-secret-name: {{ $secret }} + csi.storage.k8s.io/provisioner-secret-namespace: {{ $secret_namespace }} + csi.storage.k8s.io/controller-publish-secret-name: {{ $secret }} + csi.storage.k8s.io/controller-publish-secret-namespace: {{ $secret_namespace }} + csi.storage.k8s.io/node-publish-secret-name: {{ $secret }} + csi.storage.k8s.io/node-publish-secret-namespace: {{ $secret_namespace }} + csi.storage.k8s.io/controller-expand-secret-name: {{ $secret }} + csi.storage.k8s.io/controller-expand-secret-namespace: {{ $secret_namespace }} +allowVolumeExpansion: {{ $allow_volume_expansion }} +{{- if kindIs "string" $mount_options -}} +{{/* Keep option to specify mountOptions as string for backward compatibility */}} +mountOptions: + - {{ $mount_options | quote }} +{{- else }} +mountOptions: {{ toYaml $mount_options | nindent 2 }} +{{- end }} +--- diff --git a/charts/vastcsi-operator/crd-charts/vaststorage/values.schema.yaml b/charts/vastcsi-operator/crd-charts/vaststorage/values.schema.yaml new file mode 100644 index 00000000..66556a20 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vaststorage/values.schema.yaml @@ -0,0 +1,154 @@ +description: > + Represents VAST storage. Each VastStorage instance generates a corresponding + storage class that can be used in PVCs to create workloads. + +spec: + description: Spec defines the desired state of VastStorage. + properties: + clusterName: + description: The name of VastCluster reference. + type: string + storagePath: + description: Base path where volumes will be located on VAST. + type: string + viewPolicy: + description: VAST policy name to create views. + type: string + vipPool: + description: Name of VAST VIP pool to use. + type: string + nullable: true + vipPoolFQDN: + description: |- + The FQDN of the VIP pool to use. Must specify either vipPool or vipPoolFQDN. + Using a DNS skips an API call to the VMS for obtaining a random VIP from the vipPool, + leading to faster volume mounting. + NOTE: The driver will prepend the FQDN with a random prefix, which forces the NFS client + to resolve into a different VIP, thereby distributing the load across the entire range + of the VIP pool. + nullable: true + type: string + qosPolicy: + description: Name of QoS policy associated with the view. + type: string + nullable: true + allowVolumeExpansion: + description: Allows resizing existing volumes. + default: true + type: boolean + mountOptions: + description: Add any extra mount NFS options here. + default: [] + type: array + items: + type: string + reclaimPolicy: + description: >- + Reclaim policy defines what happens to the volume when the corresponding + PersistentVolumeClaim (PVC) is deleted. + See https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy + for more details. + default: Delete + enum: + - Delete + - Retain + type: string + setDefaultStorageClass: + description: >- + When enabled, Vast CSI will be set as the default storage class for the entire + Kubernetes cluster. This means Vast CSI will automatically be chosen as the + storage class for persistent volume claims (PVCs) that do not specify a + storage class. + See https://kubernetes.io/docs/concepts/storage/storage-classes/#default-storageclass + for more details. + type: boolean + default: false + volumeNameFormat: + description: String template for CSI-provisioned volume names, within VAST. + default: csi:{namespace}:{name}:{id} + type: string + ephemeralVolumeNameFormat: + description: String template for CSI-provisioned ephemeral volumes, within VAST. + default: csi:{namespace}:{name}:{id} + type: string + createSnapshotClass: + description: Create SnapshotClass for VastStorage. + type: boolean + default: true + snapshotClass: + description: VolumeSnapshotClass definition. + properties: + snapshotNameFormat: + type: string + default: csi:{namespace}:{name}:{id} + deletionPolicy: + description: >- + Specifies the deletion policy for snapshots associated with this snapshot class. + See https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes/#deletionpolicy + for more details. + default: Delete + enum: + - Delete + - Retain + type: string + setDefaultSnapshotClass: + description: >- + When enabled, this sets the VAST CSI Snapshot Class as the default for creating + volume snapshots. This means VAST CSI Snapshot Class will be used automatically + when volume snapshots are created without specifying a snapshot class. + See https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes/#the-volumesnapshotclass-resource + for more details. + type: boolean + default: false + type: object + required: + - clusterName + - storagePath + - viewPolicy + type: object + +specDescriptors: + - description: The name of VastCluster reference. + displayName: clusterName + path: clusterName + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:storage.vastdata.com:v1:VastCluster' + - description: Allows resizing existing volumes. + displayName: allowVolumeExpansion + path: allowVolumeExpansion + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + - description: Create SnapshotClass for VastStorage. + displayName: Create SnapshotClass + path: createSnapshotClass + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + - displayName: SnapshotClass configuration + description: SnapshotClass options. + path: snapshotClass + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:fieldDependency:createSnapshotClass:true' + +example: + apiVersion: storage.vastdata.com/v1 + kind: VastStorage + metadata: + name: vastdata-filesystem + spec: + clusterName: "" + storagePath: "" + viewPolicy: "" + vipPool: "" + qosPolicy: "" + allowVolumeExpansion: {{ .Values.allowVolumeExpansion }} + ephemeralVolumeNameFormat: {{ .Values.ephemeralVolumeNameFormat }} + vipPoolFQDN: {{ .Values.vipPoolFQDN }} + mountOptions: {{ .Values.mountOptions }} + volumeNameFormat: {{ .Values.volumeNameFormat }} + reclaimPolicy: {{ .Values.reclaimPolicy }} + setDefaultStorageClass: {{ .Values.setDefaultStorageClass }} + createSnapshotClass: {{ .Values.createSnapshotClass }} + snapshotClass: + snapshotNameFormat: {{ .Values.snapshotClass.snapshotNameFormat }} + deletionPolicy: {{ .Values.snapshotClass.deletionPolicy }} + setDefaultSnapshotClass: {{ .Values.snapshotClass.setDefaultSnapshotClass }} diff --git a/charts/vastcsi-operator/crd-charts/vaststorage/values.yaml b/charts/vastcsi-operator/crd-charts/vaststorage/values.yaml new file mode 100644 index 00000000..a4973803 --- /dev/null +++ b/charts/vastcsi-operator/crd-charts/vaststorage/values.yaml @@ -0,0 +1,51 @@ +#################### +# COMMON +#################### + +# Reference to installed VastCluster CRD +clusterName: "" + +#################### +# VAST CSI STORAGE CLASS OPTIONS +#################### + +# Where volumes will be located on VAST - must be provided by user +storagePath: "" +# Name of VAST VIP pool to use - must be provided by user +vipPool: "" +# The FQDN of the VIP pool to use. Must specify either vipPool or vipPoolFQDN. +# Using a DNS skips an API call to the VMS for obtaining a random VIP from the vipPool, leading to faster volume mounting. +# NOTE: The driver will prepend the FQDN with a random prefix, which forces the NFS client to resolve into a different VIP, +# thereby distributing the load across the entire range of the VIP pool. +vipPoolFQDN: "" +# VAST policy name to create views - must be provided by user +viewPolicy: "" +# Allows resizing existing volumes +allowVolumeExpansion: true +# If true, sets Vast CSI as the cluster-wide storage class default +setDefaultStorageClass: false +# String template for CSI-provisioned volume names, within VAST +volumeNameFormat: "csi:{namespace}:{name}:{id}" +# String template for CSI-provisioned ephemeral volumes, within VAST +ephemeralVolumeNameFormat: "csi:{namespace}:{name}:{id}" +# Add any extra NFS options desired here +mountOptions: [] +# Name of QoS policy associates with the view. +qosPolicy: "" +# Reclaim policy to use with the storage class. +reclaimPolicy: "Delete" + + +#################### +# VAST CSI SNAPSHOT CLASS OPTIONS +#################### + +createSnapshotClass: true + +snapshotClass: + # If true, sets SnapshotClass as the cluster-wide snapshot class default + setDefaultSnapshotClass: false + # String template for CSI-provisioned snapshot names, within VAST + snapshotNameFormat: "csi:{namespace}:{name}:{id}" + # On snapshot delete behavior. By default, Vast Cluster snapshot will be removed as well. + deletionPolicy: "Delete" diff --git a/charts/vastcsi-operator/scorecard_config.yaml b/charts/vastcsi-operator/scorecard_config.yaml new file mode 100755 index 00000000..11a06ffe --- /dev/null +++ b/charts/vastcsi-operator/scorecard_config.yaml @@ -0,0 +1,49 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: + - entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.3.0 + labels: + suite: basic + test: basic-check-spec-test + - entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.3.0 + labels: + suite: olm + test: olm-bundle-validation-test + - entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.3.0 + labels: + suite: olm + test: olm-crds-have-validation-test + - entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.3.0 + labels: + suite: olm + test: olm-crds-have-resources-test + - entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.3.0 + labels: + suite: olm + test: olm-spec-descriptors-test + - entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.3.0 + labels: + suite: olm + test: olm-status-descriptors-test diff --git a/charts/vastcsi-operator/templates/NOTES.txt b/charts/vastcsi-operator/templates/NOTES.txt new file mode 100644 index 00000000..20b9fc16 --- /dev/null +++ b/charts/vastcsi-operator/templates/NOTES.txt @@ -0,0 +1,7 @@ +The Vast CSI Driver Operator version {{ .Chart.Version }} has been successfully deployed in the {{ .Release.Namespace }} namespace. +{{ if .Values.installSnapshotCRDS -}} +Snapshot support is now enabled. +{{- else -}} +Snapshot support is now disabled. +{{- end }} +You may proceed to create VastCSIDriver, VastStorage and VastCluster resources to complete the CSI deployment. diff --git a/charts/vastcsi-operator/templates/_helpers.tpl b/charts/vastcsi-operator/templates/_helpers.tpl new file mode 100644 index 00000000..d62cd0e9 --- /dev/null +++ b/charts/vastcsi-operator/templates/_helpers.tpl @@ -0,0 +1,309 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "csi-operator.rbac.proxy" -}} +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +{{- end }} + +{{- define "csi-operator.rbac.manager" -}} +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: + - "" + resources: + - secrets + verbs: + - '*' +- apiGroups: + - "" + resources: + - events + verbs: + - create +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - security.openshift.io + resourceNames: + - privileged + - hostmount-anyuid + resources: + - securitycontextconstraints + verbs: + - '*' +- apiGroups: + - storage.vastdata.com + resources: + - vastcsidrivers + - vastcsidrivers/status + - vastcsidrivers/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - storage.vastdata.com + resources: + - vaststorages + - vaststorages/status + - vaststorages/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - storage.vastdata.com + resources: + - vastclusters + - vastclusters/status + - vastclusters/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - storage.k8s.io + resources: + - csidrivers + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + verbs: + - '*' +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - '*' +- apiGroups: + - apps + resources: + - daemonsets + - deployments + verbs: + - '*' +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - '*' +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - delete +{{- end }} + +{{- define "csi-operator.rbac.leader-election" -}} +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- end }} + +{{- define "csi-operator.manager-deployment.spec" -}} +replicas: 1 +selector: + matchLabels: + control-plane: controller-manager +template: + metadata: + labels: + control-plane: controller-manager + spec: + containers: + - name: kube-rbac-proxy + image: {{ .Values.proxyImage }} + args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v=0 + ports: + - containerPort: 8443 + name: https + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + - name: csi-vast-operator + image: {{ .Values.managerImage | required "Manager image is required" }} + imagePullPolicy: Always + args: + - --metrics-addr=127.0.0.1:8080 + - --enable-leader-election + - --leader-election-id=vast-csi-operator + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: RELATED_IMAGE_CSI_DRIVER + value: {{ .Values.overrides.csiVastPlugin.repository }} + - name: RELATED_IMAGE_CSI_ATTACHER + value: {{ .Values.overrides.csiAttacher.repository }} + - name: RELATED_IMAGE_CSI_NODE_DRIVER_REGISTRAR + value: {{ .Values.overrides.csiNodeDriverRegistrar.repository }} + - name: RELATED_IMAGE_CSI_PROVISIONER + value: {{ .Values.overrides.csiProvisioner.repository }} + - name: RELATED_IMAGE_CSI_RESIZER + value: {{ .Values.overrides.csiResizer.repository }} + - name: RELATED_IMAGE_CSI_SNAPSHOTTER + value: {{ .Values.overrides.csiSnapshotter.repository }} + {{- if .Values.imagePullSecret }} + imagePullSecrets: + - name: {{ .Values.imagePullSecret }} + {{- end }} + securityContext: + runAsNonRoot: true + serviceAccountName: vast-csi-driver-operator-controller-manager + priorityClassName: system-cluster-critical + terminationGracePeriodSeconds: 10 +{{- end }} diff --git a/charts/vastcsi-operator/templates/crd-csi-snapshot.yaml b/charts/vastcsi-operator/templates/crd-csi-snapshot.yaml new file mode 100644 index 00000000..2ffa9682 --- /dev/null +++ b/charts/vastcsi-operator/templates/crd-csi-snapshot.yaml @@ -0,0 +1,845 @@ +{{- if eq (toString .Values.installSnapshotCRDS) "true" }} + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the + VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent + created through the VolumeSnapshotClass should be deleted when its bound + VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: false + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical + snapshot on the underlying storage system should be deleted when its bound + VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on + the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. + "Delete" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are deleted. For dynamically provisioned + snapshots, this field will automatically be filled in by the CSI + snapshotter sidecar with the "DeletionPolicy" field defined in the + corresponding VolumeSnapshotClass. For pre-existing snapshots, users + MUST specify this field when creating the VolumeSnapshotContent + object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be + the same as the name returned by the CSI GetPluginName() call for + that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) + dynamically provisioned or already exists, and just requires a Kubernetes + object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of + a pre-existing snapshot on the underlying storage system for + which a Kubernetes object representation was (or should be) + created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the + volume from which a snapshot should be dynamically taken from. + This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + sourceVolumeMode: + description: SourceVolumeMode is the mode of the volume whose snapshot + is taken. Can be either “Filesystem” or “Block”. If not specified, + it indicates the source volume's mode is unknown. This field is + immutable. This field is an alpha field. + type: string + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot + was (or will be) created. Note that after provisioning, the VolumeSnapshotClass + may be deleted or recreated with different set of values, and as + such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object + to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be + provided for binding to happen. This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the CSI snapshotter + sidecar with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it indicates + the creation time is unknown. The format of this field is a Unix + nanoseconds time encoded as an int64. On Unix, the command `date + +%s%N` returns the current time in nanoseconds since 1970-01-01 + 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, + if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the CSI snapshotter sidecar with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in by the CSI snapshotter sidecar with the "size_bytes" value + returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "size_bytes" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. When restoring a volume from this snapshot, the size of the + volume MUST NOT be smaller than the restoreSize if it is specified, + otherwise the restoration will fail. If not specified, it indicates + that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot + on the underlying storage system. If not specified, it indicates + that dynamic snapshot creation has either failed or it is still + in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of + the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing + VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from + this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot + object intends to bind to. Please note that verification of binding actually + requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure + both are pointing at each other. Binding MUST be verified prior to usage of + this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying + storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object representing the volume from which + a snapshot should be created. This PVC is assumed to be in the + same namespace as the VolumeSnapshot object. This field should + be set if the snapshot does not exists, and needs to be created. + This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a + pre-existing VolumeSnapshotContent object representing an existing + volume snapshot. This field should be set if the snapshot already + exists and only needs a representation in Kubernetes. This field + is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. VolumeSnapshotClassName may be + left nil to indicate that the default SnapshotClass should be used. + A given cluster may have multiple default Volume SnapshotClasses: + one default per CSI Driver. If a VolumeSnapshot does not specify + a SnapshotClass, VolumeSnapshotSource will be checked to figure + out what the associated CSI Driver is, and the default VolumeSnapshotClass + associated with that CSI Driver will be used. If more than one VolumeSnapshotClass + exist for a given CSI Driver and more than one have been marked + as default, CreateSnapshot will fail and generate an event. Empty + string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. + Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent + objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent + object to which this VolumeSnapshot object intends to bind to. If + not specified, it indicates that the VolumeSnapshot object has not + been successfully bound to a VolumeSnapshotContent object yet. NOTE: + To avoid possible security issues, consumers must verify binding + between VolumeSnapshot and VolumeSnapshotContent objects is successful + (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the snapshot controller + with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it may indicate + that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. The snapshot controller will keep retrying when an error + occurs during the snapshot creation. Upon success, this error field + will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the snapshot controller with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required + to create a volume from this snapshot. In dynamic snapshot creation + case, this field will be filled in by the snapshot controller with + the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. + For a pre-existing snapshot, this field will be filled with the + "size_bytes" value returned from the CSI "ListSnapshots" gRPC call + if the driver supports it. When restoring a volume from this snapshot, + the size of the volume MUST NOT be smaller than the restoreSize + if it is specified, otherwise the restoration will fail. If not + specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +{{- end -}} \ No newline at end of file diff --git a/charts/vastcsi-operator/templates/csi-snapshot-controller.yaml b/charts/vastcsi-operator/templates/csi-snapshot-controller.yaml new file mode 100644 index 00000000..4a99082b --- /dev/null +++ b/charts/vastcsi-operator/templates/csi-snapshot-controller.yaml @@ -0,0 +1,43 @@ +{{- if eq (toString .Values.installSnapshotCRDS) "true" }} +# This YAML file shows how to deploy the snapshot controller + +# The snapshot controller implements the control loop for CSI snapshot functionality. +# It should be installed as part of the base Kubernetes distribution in an appropriate +# namespace for components implementing base system functionality. For installing with +# Vanilla Kubernetes, kube-system makes sense for the namespace. + +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: snapshot-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + app: snapshot-controller + # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable + # in #504 the snapshot-controller will exit after around 7.5 seconds if it + # can't find the v1 CRDs so this value should be greater than that + minReadySeconds: 15 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: snapshot-controller + spec: + serviceAccountName: snapshot-controller + containers: + - name: snapshot-controller + image: gcr.io/k8s-staging-sig-storage/snapshot-controller:v5.0.1 + args: + - "--v=5" + - "--leader-election=true" + imagePullPolicy: IfNotPresent + +{{- end -}} \ No newline at end of file diff --git a/charts/vastcsi-operator/templates/namespace.yaml b/charts/vastcsi-operator/templates/namespace.yaml new file mode 100644 index 00000000..3b7d871e --- /dev/null +++ b/charts/vastcsi-operator/templates/namespace.yaml @@ -0,0 +1,15 @@ +{{- if and (ne (toString .Values.olmBuild) "true") .Values.namespace }} + +{{- $namespace := .Values.namespace -}} +{{- $existingNamespace := (lookup "v1" "Namespace" "" $namespace) -}} + +{{- if not $existingNamespace -}} +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: {{ $namespace }} +{{- end -}} + +{{- end -}} diff --git a/charts/vastcsi-operator/templates/operator_manager.yaml b/charts/vastcsi-operator/templates/operator_manager.yaml new file mode 100644 index 00000000..a3e54bdd --- /dev/null +++ b/charts/vastcsi-operator/templates/operator_manager.yaml @@ -0,0 +1,13 @@ +{{- if ne (toString .Values.olmBuild) "true" }} + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + control-plane: controller-manager + name: vast-csi-operator-controller-manager + namespace: {{ .Values.namespace | default .Release.Namespace | quote }} +spec: +{{- include "csi-operator.manager-deployment.spec" . | nindent 2 }} + +{{- end -}} diff --git a/charts/vastcsi-operator/templates/rbac-snapshot-controller.yaml b/charts/vastcsi-operator/templates/rbac-snapshot-controller.yaml new file mode 100644 index 00000000..b660c9be --- /dev/null +++ b/charts/vastcsi-operator/templates/rbac-snapshot-controller.yaml @@ -0,0 +1,89 @@ +{{- if eq (toString .Values.installSnapshotCRDS) "true" }} +# RBAC file for the snapshot controller. +# +# The snapshot controller implements the control loop for CSI snapshot functionality. +# It should be installed as part of the base Kubernetes distribution in an appropriate +# namespace for components implementing base system functionality. For installing with +# Vanilla Kubernetes, kube-system makes sense for the namespace. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: snapshot-controller + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: snapshot-controller-runner +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + # Enable this RBAC rule only when using distributed snapshotting, i.e. when the enable-distributed-snapshotting flag is set to true + # - apiGroups: [""] + # resources: ["nodes"] + # verbs: ["get", "list", "watch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: snapshot-controller-role +subjects: + - kind: ServiceAccount + name: snapshot-controller + namespace: kube-system +roleRef: + kind: ClusterRole + name: snapshot-controller-runner + apiGroup: rbac.authorization.k8s.io + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: snapshot-controller-leaderelection + namespace: kube-system +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: snapshot-controller-leaderelection + namespace: kube-system +subjects: + - kind: ServiceAccount + name: snapshot-controller +roleRef: + kind: Role + name: snapshot-controller-leaderelection + apiGroup: rbac.authorization.k8s.io + +{{- end -}} diff --git a/charts/vastcsi-operator/templates/rbac_operator_manager.yaml b/charts/vastcsi-operator/templates/rbac_operator_manager.yaml new file mode 100644 index 00000000..e5eb5426 --- /dev/null +++ b/charts/vastcsi-operator/templates/rbac_operator_manager.yaml @@ -0,0 +1,88 @@ +{{- if ne (toString .Values.olmBuild) "true" }} + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vast-csi-driver-operator-controller-manager + namespace: {{ .Values.namespace | default .Release.Namespace | quote }} + +--- + +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: vast-csi-operator-leader-election-role + namespace: {{ .Values.namespace | default .Release.Namespace | quote }} +rules: + {{ include "csi-operator.rbac.leader-election" . }} + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: vast-csi-operator-manager-role +rules: + {{ include "csi-operator.rbac.manager" . }} + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: vast-csi-operator-proxy-role +rules: + {{ include "csi-operator.rbac.proxy" . }} + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: vast-csi-operator-leader-election-rolebinding + namespace: {{ .Values.namespace | default .Release.Namespace | quote }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: vast-csi-operator-leader-election-role +subjects: + - kind: ServiceAccount + name: vast-csi-driver-operator-controller-manager + namespace: {{ .Values.namespace | default .Release.Namespace | quote }} + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: vast-csi-operator-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vast-csi-operator-manager-role +subjects: + - kind: ServiceAccount + name: vast-csi-driver-operator-controller-manager + namespace: {{ .Values.namespace | default .Release.Namespace | quote }} + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: vast-csi-operator-proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vast-csi-operator-proxy-role +subjects: + - kind: ServiceAccount + name: vast-csi-driver-operator-controller-manager + namespace: {{ .Values.namespace | default .Release.Namespace | quote }} + +--- + +{{- end -}} diff --git a/charts/vastcsi-operator/templates/storage.vastdata.com_vastclusters.yaml b/charts/vastcsi-operator/templates/storage.vastdata.com_vastclusters.yaml new file mode 100644 index 00000000..c17266ea --- /dev/null +++ b/charts/vastcsi-operator/templates/storage.vastdata.com_vastclusters.yaml @@ -0,0 +1,40 @@ +{{/* Load and parse the values.schema.yaml file */}} +{{- $cluster_values := dict "Values" (.Files.Get "crd-charts/vastcluster/values.yaml" | trim | fromYaml) "GlobalValues" .Values -}} +{{- $cluster_spec := tpl (.Files.Get "crd-charts/vastcluster/values.schema.yaml") $cluster_values | trim | fromYaml -}} + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: vastclusters.storage.vastdata.com +spec: + group: storage.vastdata.com + names: + kind: VastCluster + listKind: VastClusterList + plural: vastclusters + singular: vastcluster + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: VastCluster is the Schema for the vastclusters API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: {{- $cluster_spec.spec | toYamlPretty | nindent 14 }} + status: + description: Status defines the observed state of VastCluster + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/vastcsi-operator/templates/storage.vastdata.com_vastcsidriver.yaml b/charts/vastcsi-operator/templates/storage.vastdata.com_vastcsidriver.yaml new file mode 100644 index 00000000..9cd03831 --- /dev/null +++ b/charts/vastcsi-operator/templates/storage.vastdata.com_vastcsidriver.yaml @@ -0,0 +1,53 @@ +{{/* Resources description, API spec etc. */}} +{{- $csidriver_values := dict "Values" (.Files.Get "crd-charts/vastcsidriver/values.yaml" | trim | fromYaml) "GlobalValues" .Values -}} +{{- $csidriver_spec := tpl (.Files.Get "crd-charts/vastcsidriver/values.schema.yaml") $csidriver_values | trim | fromYaml -}} + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: vastcsidrivers.storage.vastdata.com +spec: + group: storage.vastdata.com + names: + kind: VastCSIDriver + listKind: VastCSIDriverList + plural: vastcsidrivers + singular: vastcsidriver + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: CreationTime + type: date + - description: CSI Plugin Image + jsonPath: .spec.image.csiVastPlugin.image + name: CSIPluginImage + type: string + name: v1 + schema: + openAPIV3Schema: + description: VastCSIDriver is the Schema for the vastcsidrivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + properties: + name: + description: Only one driver may exist on a cluster at a time. + type: string + type: object + spec: {{- $csidriver_spec.spec | toYamlPretty | nindent 14 }} + x-kubernetes-preserve-unknown-fields: true + status: + description: Status defines the observed state of VastCSIDriver + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/vastcsi-operator/templates/storage.vastdata.com_vaststorages.yaml b/charts/vastcsi-operator/templates/storage.vastdata.com_vaststorages.yaml new file mode 100644 index 00000000..ec10f0ac --- /dev/null +++ b/charts/vastcsi-operator/templates/storage.vastdata.com_vaststorages.yaml @@ -0,0 +1,53 @@ +{{/* Resources description, API spec etc. */}} +{{- $storage_values := dict "Values" (.Files.Get "crd-charts/vaststorage/values.yaml" | trim | fromYaml) "GlobalValues" .Values -}} +{{- $storage_spec := tpl (.Files.Get "crd-charts/vaststorage/values.schema.yaml") $storage_values | trim | fromYaml -}} + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: vaststorages.storage.vastdata.com +spec: + group: storage.vastdata.com + names: + kind: VastStorage + listKind: VastStorageList + plural: vaststorages + singular: vaststorage + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Secret + jsonPath: .spec.secretName + name: Secret + type: string + - description: Storage Class + jsonPath: .spec.storageClass.name + name: StorageClass + type: string + - description: Snapshot Class + jsonPath: .spec.snapshotClass.name + name: SnapshotClass + type: string + name: v1 + schema: + openAPIV3Schema: + description: VastStorage is the Schema for the vaststorages API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: {{- $storage_spec.spec | toYamlPretty | nindent 14 }} + status: + description: Status defines the observed state of VastStorage + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/vastcsi-operator/templates/vast-csi-operator-controller-manager-metrics-service_v1_service.yaml b/charts/vastcsi-operator/templates/vast-csi-operator-controller-manager-metrics-service_v1_service.yaml new file mode 100644 index 00000000..6c40ca4f --- /dev/null +++ b/charts/vastcsi-operator/templates/vast-csi-operator-controller-manager-metrics-service_v1_service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: vast-csi-operator + app.kubernetes.io/instance: controller-manager-metrics-service + app.kubernetes.io/name: service + app.kubernetes.io/part-of: vast-csi-operator + control-plane: controller-manager + name: vast-csi-operator-controller-manager-metrics-service +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + control-plane: controller-manager diff --git a/charts/vastcsi-operator/templates/vast-csi-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/charts/vastcsi-operator/templates/vast-csi-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 00000000..5346c9f9 --- /dev/null +++ b/charts/vastcsi-operator/templates/vast-csi-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,9 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: vast-csi-operator-metrics-reader +rules: + - nonResourceURLs: + - /metrics + verbs: + - get diff --git a/charts/vastcsi-operator/templates/vast-csi-operator.clusterserviceversion.yaml b/charts/vastcsi-operator/templates/vast-csi-operator.clusterserviceversion.yaml new file mode 100644 index 00000000..25831466 --- /dev/null +++ b/charts/vastcsi-operator/templates/vast-csi-operator.clusterserviceversion.yaml @@ -0,0 +1,158 @@ +{{- if eq (toString .Values.olmBuild) "true" }} + +{{- $operator_values := dict "GlobalValues" .Values -}} +{{- $operator_spec := tpl (.Files.Get "values.schema.yaml") $operator_values | trim | fromYaml }} + +{{- $csidriver_values := dict "Values" (.Files.Get "crd-charts/vastcsidriver/values.yaml" | trim | fromYaml) "GlobalValues" .Values -}} +{{- $csidriver_spec := tpl (.Files.Get "crd-charts/vastcsidriver/values.schema.yaml") $csidriver_values | trim | fromYaml -}} + +{{- $storage_values := dict "Values" (.Files.Get "crd-charts/vaststorage/values.yaml" | trim | fromYaml) "GlobalValues" .Values -}} +{{- $storage_spec := tpl (.Files.Get "crd-charts/vaststorage/values.schema.yaml") $storage_values | trim | fromYaml -}} + +{{- $cluster_values := dict "Values" (.Files.Get "crd-charts/vastcluster/values.yaml" | trim | fromYaml) "GlobalValues" .Values -}} +{{- $cluster_spec := tpl (.Files.Get "crd-charts/vastcluster/values.schema.yaml") $cluster_values | trim | fromYaml -}} + +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: |- + {{ list $csidriver_spec.example $storage_spec.example $cluster_spec.example | toJson }} + capabilities: Seamless Upgrades + categories: Storage + description: |- + {{- $operator_spec.description | nindent 6 }} + containerImage: {{ .Values.managerImage }} + createdAt: "2024-07-01T11:59:59Z" + support: VastData Infra Team + repository: https://github.com/vast-data/vast-csi + operators.operatorframework.io/builder: operator-sdk-v1.3.0-ocp + operators.operatorframework.io/project_layout: helm.sdk.operatorframework.io/v1 + operatorframework.io/suggested-namespace: {{ .Values.suggestedNamespace }} + features.operators.openshift.io/disconnected: "true" + features.operators.openshift.io/fips-compliant: "false" + features.operators.openshift.io/proxy-aware: "false" + features.operators.openshift.io/tls-profiles: "false" + features.operators.openshift.io/token-auth-aws: "false" + features.operators.openshift.io/token-auth-azure: "false" + features.operators.openshift.io/token-auth-gcp: "false" + name: vast-csi-operator.v{{ .Chart.Version }}{{- if .Values.ciPipe }}-{{ .Values.ciPipe }}{{- end }} + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - kind: VastCSIDriver + name: vastcsidrivers.storage.vastdata.com + displayName: VastCSIDriver + description: {{ $csidriver_spec.description | trim | quote }} + version: v1 + resources: + - kind: Deployment + name: '' + version: v1 + - kind: DaemonSet + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + specDescriptors: {{- $csidriver_spec.specDescriptors | toYamlPretty | nindent 10 }} + - kind: VastCluster + name: vastclusters.storage.vastdata.com + displayName: VastCluster + description: {{ $cluster_spec.description | trim | quote }} + version: v1 + resources: + - kind: Secret + name: '' + version: v1 + specDescriptors: {{- $cluster_spec.specDescriptors | toYamlPretty | nindent 10 }} + - kind: VastStorage + name: vaststorages.storage.vastdata.com + displayName: VastStorage + description: {{ $storage_spec.description | trim | quote }} + version: v1 + resources: + - kind: StorageClass + name: '' + version: v1 + - kind: VolumeSnapshotClass + name: '' + version: v1 + specDescriptors: {{- $storage_spec.specDescriptors | toYamlPretty | nindent 10 }} + description: |- + {{- $operator_spec.description | nindent 4 }} + displayName: VAST CSI driver operator + icon: + - base64data: "iVBORw0KGgoAAAANSUhEUgAAAD4AAAA+CAMAAABEH1h2AAAAeFBMVEX///8AAAAAtP/i8/rx8fEet//n5+d8zvnY2NjMzMy3t7cTExPh4eFOTk6Ojo6JiYnExMReXl6en59sbGxUVFQhISF+fn69vb0/Pz9ISEhycnJ4eHirq6vw+vw0ufgAr/zO6/qo2/ed2fhTwPm/5fiR1PcwMDAoKCgtkLcJAAABsklEQVR4nOyV75aaMBDF72WBEoIIiICittvt6vu/YU/+SOL20BPox3I/GczvZjLJTLBp0yYgzrTUzzenayCd0mgP3CKn7z/C8MziI66Jp+g9cPmWfHTngwB+frP6SJJbIK2jj18/3ZLoVyiOPdm+fPhMko9gGjiSuTe8RknytgCvSPrj9+hzAQ3U+ty89RfRiEkWk1e+DAbQkDv7syT7xfyd0hxe4QcSrKyzR78jy8W0ljLovxxCsNKBGSDX7Bwme6c/L2C4jmRJMl2JV7puV+bNJH1l3rRU5VbrcZSs/4EGll+3TQtU3E1jqrziGAb3f1rr6yf1oLSPiLuNuW3rA8fnp95rtfET0JMOykiSPEz2D21VedUxSHl37nehlBkzIXKyF5V7SC76Zp+coVAL9BP+eC269OsjRDbqZcye45bnjoMX/DDmf8EbHlG7viDIIiXFM7WD2vlpHo8l9266cTpMnRpI81HyMourniSn2YV28jcDoGM3j8eUrrLPJtDWpjJv67quJZt5HCMfzso4CTtrZ059mN87EI8uUmFfx9LEkzVKfre+XLBp0/+p3wEAAP//kCAQXEWcSkUAAAAASUVORK5CYII=" + mediatype: "image/png" + install: + spec: + clusterPermissions: + - rules: + {{- include "csi-operator.rbac.manager" . | nindent 12 }} + {{- include "csi-operator.rbac.proxy" . | nindent 12 }} + serviceAccountName: vast-csi-driver-operator-controller-manager + deployments: + - label: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: vast-csi-operator + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/name: deployment + app.kubernetes.io/part-of: vast-csi-operator + control-plane: controller-manager + name: vast-csi-operator-controller-manager + spec: + {{- include "csi-operator.manager-deployment.spec" . | nindent 12 }} + permissions: + - rules: + {{- include "csi-operator.rbac.leader-election" . | nindent 12 }} + serviceAccountName: vast-csi-driver-operator-controller-manager + strategy: deployment + installModes: + - supported: true + type: OwnNamespace + - supported: true + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: false + type: AllNamespaces + keywords: + - VAST + - VASTData + - CSI + - Multi Cluster + - AI + - High-Performance Computing + links: + - name: VAST CSI Plugin + url: https://github.com/vast-data/vast-csi + maintainers: + - email: volodymyr.boiko@vastdata.com + name: VAST Infrastructure Team + maturity: {{ .Values.maturity | quote }} + provider: + name: VASTData + url: https://www.vastdata.com + version: {{ .Chart.Version }}{{- if .Values.ciPipe }}-{{ .Values.ciPipe }}{{- end }} + minKubeVersion: 1.18.0 + relatedImages: + - image: {{ .Values.overrides.csiVastPlugin.repository }} + name: csi-vast-plugin + - image: {{ .Values.overrides.csiResizer.repository }} + name: csi-resizer + - image: {{ .Values.overrides.csiSnapshotter.repository }} + name: csi-snapshotter + - image: {{ .Values.overrides.csiAttacher.repository }} + name: csi-attacher + - image: {{ .Values.overrides.csiProvisioner.repository }} + name: csi-provisioner + - image: {{ .Values.overrides.csiNodeDriverRegistrar.repository }} + name: csi-node-driver-registrar + - image: {{ .Values.managerImage | required "Operator image is required." }} + name: csi-vast-operator + - image: {{ .Values.proxyImage }} + name: kube-rbac-proxy +{{- end }} diff --git a/charts/vastcsi-operator/values.schema.yaml b/charts/vastcsi-operator/values.schema.yaml new file mode 100644 index 00000000..4122d941 --- /dev/null +++ b/charts/vastcsi-operator/values.schema.yaml @@ -0,0 +1,4 @@ +description: |- + The VAST Data CSI Driver enables users to provision, manage, and scale volumes directly from VAST’s high-performance storage system. The VAST Data CSI Operator manages the driver's lifecycle, including installation, upgrade, and configuration. + ### Installation + Refer to the VASTData Operator for Kubernetes [official documentation]({{- .GlobalValues.operatorDocs -}}). diff --git a/charts/vastcsi-operator/values.yaml b/charts/vastcsi-operator/values.yaml new file mode 100644 index 00000000..dc7c0a16 --- /dev/null +++ b/charts/vastcsi-operator/values.yaml @@ -0,0 +1,50 @@ +# Namespace for csi driver operator installation. +# If a namespace is not specified during the installation of the CSI driver operator, +# it will default to the namespace of the Helm release. +namespace: null + +# CI pipe for testing builds +ciPipe: null + +# imagePullSecret for testing builds +imagePullSecret: null + +# Operator manager image +managerImage: null +# Operator proxy image +# Note: community version fo kube-rbac-proxi: gcr.io/kubebuilder/kube-rbac-proxy:v0.5.0 +proxyImage: registry.redhat.io/openshift4/ose-kube-rbac-proxy@sha256:77df668a9591bbaae675d0553f8dca5423c0f257317bc08fe821d965f44ed019 + +# Used in OpenShift UI for driver installation. +suggestedNamespace: "vast-csi" + +# Operator documentation link +operatorDocs: "https://support.vastdata.com/s/topic/0TOV400000018tBOAQ/vast-csi-driver" + +overrides: + csiVastPlugin: + repository: vastdataorg/csi@sha256:a18cf994383111b76b56a60aa2d6b4d40d963e6b02ef5746e806fe8c91e4a0c1 + csiAttacher: + repository: registry.k8s.io/sig-storage/csi-attacher@sha256:b4d611100ece2f9bc980d1cb19c2285b8868da261e3b1ee8f45448ab5512ab94 + csiNodeDriverRegistrar: + repository: registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:f25af73ee708ff9c82595ae99493cdef9295bd96953366cddf36305f82555dac + csiProvisioner: + repository: registry.k8s.io/sig-storage/csi-provisioner@sha256:405a14e1aa702f7ea133cea459e8395fe40a6125c088c55569e696d48e1bd385 + csiResizer: + repository: registry.k8s.io/sig-storage/csi-resizer@sha256:a541e6cc2d8b011bb21b1d4ffec6b090e85270cce6276ee302d86153eec0af43 + csiSnapshotter: + repository: registry.k8s.io/sig-storage/csi-snapshotter@sha256:2e04046334baf9be425bb0fa1d04c2d1720d770825eedbdbcdb10d430da4ad8c + +# Disable if you don't have intention to use snapshots (Enabled by default) +installSnapshotCRDS: true + +#################### +# BUNDLE SETTINGS +#################### +# Build based on the OLM bundle: https://olm.operatorframework.io/ +# Do not change this unless you fully understand all implications. +# Enabling this option (true) will generate a different template. It skips all roles, role bindings, and the operator manager deployment. +# Instead, these templates become part of the installation section of the ClusterServiceVersion. +# This option is intended for clusters with OLM installed, where the intention is to install and manage the operator via OLM. +olmBuild: false +maturity: "alpha" diff --git a/charts/vastcsi-operator/watches.yaml b/charts/vastcsi-operator/watches.yaml new file mode 100644 index 00000000..6b0c84c1 --- /dev/null +++ b/charts/vastcsi-operator/watches.yaml @@ -0,0 +1,21 @@ +# Use the 'create api' subcommand to add watches to this file. +- group: storage.vastdata.com + version: v1 + kind: VastCSIDriver + chart: helm-charts/vastcsidriver + overrideValues: + image.csiVastPlugin.defaultRepository: $RELATED_IMAGE_CSI_DRIVER + image.csiAttacher.defaultRepository: $RELATED_IMAGE_CSI_ATTACHER + image.csiNodeDriverRegistrar.defaultRepository: $RELATED_IMAGE_CSI_NODE_DRIVER_REGISTRAR + image.csiProvisioner.defaultRepository: $RELATED_IMAGE_CSI_PROVISIONER + image.csiResizer.defaultRepository: $RELATED_IMAGE_CSI_RESIZER + image.csiSnapshotter.defaultRepository: $RELATED_IMAGE_CSI_SNAPSHOTTER +- group: storage.vastdata.com + version: v1 + kind: VastStorage + chart: helm-charts/vaststorage +- group: storage.vastdata.com + version: v1 + kind: VastCluster + chart: helm-charts/vastcluster +# +kubebuilder:scaffold:watch diff --git a/charts/vastcsi/.helmignore b/charts/vastcsi/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/charts/vastcsi/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/vastcsi/Chart.yaml b/charts/vastcsi/Chart.yaml new file mode 100644 index 00000000..5b56ab5f --- /dev/null +++ b/charts/vastcsi/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v2 +name: vastcsi +description: Helm chart for Deployment of VAST Container Storage Interface (CSI) +type: application +version: "0.1.0" # will be updated by the release ci +keywords: + - vast + - csi + - driver + - vastdata + - csi-driver +icon: "https://uploads.vastdata.com/2020/04/vast-white-1.svg" +home: "https://support.vastdata.com/s/topic/0TOV400000018tBOAQ/vast-csi-driver" diff --git a/charts/vastcsi/README.md b/charts/vastcsi/README.md new file mode 100644 index 00000000..dfaeda4a --- /dev/null +++ b/charts/vastcsi/README.md @@ -0,0 +1,49 @@ +# Install CSI driver with Helm 3 + +## Prerequisites + - [install Helm](https://helm.sh/docs/intro/quickstart/#install-helm) + + +### install production version of the driver: +```console +helm repo add vast https://vast-data.github.io/vast-csi +helm install csi-driver vast/vastcsi -f values.yaml -n vast-csi --create-namespace +``` + +### install beta version of the driver: +```console +helm repo add vast https://raw.githubusercontent.com/vast-data/vast-csi/gh-pages-beta +helm install csi-driver vast/vastcsi -f values.yaml -n vast-csi --create-namespace +``` + +> **NOTE:** Optionally modify values.yaml or set overrides via Helm command line + + +### install a specific version +```console +helm install csi-driver vast/vastcsi -f values.yaml -n vast-csi --create-namespace --version 2.3.0 +``` + +### Upgrade driver +```console +helm upgrade csi-driver vast/vastcsi -f values.yaml -n vast-csi +``` + +### Upgrade helm repository +```console +helm repo update vast +``` + +### Uninstall driver +```console +helm uninstall csi-driver -n vast-csi +``` + +### search for all available chart versions +```console +helm search repo -l vast +``` + +### troubleshooting + - Add `--wait -v=5 --debug` in `helm install` command to get detailed error + - Use `kubectl describe` to acquire more info diff --git a/charts/vastcsi/templates/NOTES.txt b/charts/vastcsi/templates/NOTES.txt new file mode 100644 index 00000000..b5feb284 --- /dev/null +++ b/charts/vastcsi/templates/NOTES.txt @@ -0,0 +1,12 @@ +Thank you for installing {{ .Chart.Name }}. + +Your release is named {{ .Release.Name }}. +The release is installed in namespace {{ .Release.Namespace }} + +To learn more about the release, try: + + $ helm status -n {{ .Release.Namespace}} {{ .Release.Name }} + $ helm get all -n {{ .Release.Namespace}} {{ .Release.Name }} + +Examples on how to configure a storage class and start using the driver are here: +{{ .Chart.Home }} diff --git a/charts/vastcsi/templates/clusterrole.yaml b/charts/vastcsi/templates/clusterrole.yaml new file mode 100644 index 00000000..e1c586d1 --- /dev/null +++ b/charts/vastcsi/templates/clusterrole.yaml @@ -0,0 +1,87 @@ +{{- if .Values.rbac }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Release.Name }}-vast-provisioner-role + labels: + {{- include "vastcsi.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Release.Name }}-vast-attacher-role + labels: + {{- include "vastcsi.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Release.Name }}-external-resizer-runner + labels: + {{- include "vastcsi.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] +--- +{{- end -}} diff --git a/charts/vastcsi/templates/clusterrolebinding.yaml b/charts/vastcsi/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..590b9437 --- /dev/null +++ b/charts/vastcsi/templates/clusterrolebinding.yaml @@ -0,0 +1,48 @@ +{{- if .Values.rbac }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Name }}-vast-provisioner-binding + labels: + {{- include "vastcsi.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }}-vast-controller-sa + namespace: {{ include "vastcsi.namespace" . }} +roleRef: + kind: ClusterRole + name: {{ .Release.Name }}-vast-provisioner-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Name }}-vast-attacher-binding + labels: + {{- include "vastcsi.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }}-vast-controller-sa + namespace: {{ include "vastcsi.namespace" . }} +roleRef: + kind: ClusterRole + name: {{ .Release.Name }}-vast-attacher-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Name }}-resizer-role + labels: + {{- include "vastcsi.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }}-vast-controller-sa + namespace: {{ include "vastcsi.namespace" . }} +roleRef: + kind: ClusterRole + name: {{ .Release.Name }}-external-resizer-runner + apiGroup: rbac.authorization.k8s.io +--- +{{- end -}} diff --git a/charts/vastcsi/templates/controller.yaml b/charts/vastcsi/templates/controller.yaml new file mode 100644 index 00000000..5ec32cbd --- /dev/null +++ b/charts/vastcsi/templates/controller.yaml @@ -0,0 +1,163 @@ +{{/* Vast CSI Controller */}} + +{{- $csi_images := .Values.image -}} +{{- $plugin_proxy_sock := "/var/lib/csi/sockets/pluginproxy/csi.sock" -}} +{{- $plugin_proxy_sock_path := "/var/lib/csi/sockets/pluginproxy/" -}} +{{- $ca_bundle := empty .Values.sslCert | ternary .Values.sslCertsSecretName "csi-vast-ca-bundle" -}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: csi-vast-controller + namespace: {{ include "vastcsi.namespace" . }} + labels: +{{- include "vastcsi.labels" . | nindent 4 }} + app.kubernetes.io/csi-role: "controller" +spec: + replicas: 1 + selector: + matchLabels: + app: csi-vast-controller +{{- include "vastcsi.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + labels: + role: csi-vast + app: csi-vast-controller +{{- include "vastcsi.labels" . | nindent 8 }} + spec: +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + affinity: + podAffinity: + {{- toYaml .Values.controller.podAffinity | nindent 10 }} + podAntiAffinity: + {{- toYaml .Values.controller.podAntiAffinity | nindent 10 }} + nodeAffinity: + {{- toYaml .Values.controller.nodeAffinity | nindent 10 }} + containers: + - name: csi-provisioner + image: {{ printf "%s:%s" $csi_images.csiProvisioner.repository (toString $csi_images.csiProvisioner.tag) }} + args: +{{- include "vastcsi.commonArgs" . | nindent 12 }} + - "--extra-create-metadata" + - "--timeout={{ .Values.operationTimeout }}s" + - "--worker-threads={{ .Values.numWorkers }}" + - "--retry-interval-start={{ .Values.operationRetryIntervalStart }}s" + - "--retry-interval-max={{ .Values.operationRetryIntervalMax }}s" + {{- range .Values.controller.extraArgs.csiProvisioner }} + - "--{{ . }}" + {{- end }} + env: + - name: ADDRESS + value: {{ $plugin_proxy_sock }} + imagePullPolicy: {{ $csi_images.csiProvisioner.imagePullPolicy | default "IfNotPresent" }} + volumeMounts: + - name: socket-dir + mountPath: {{ $plugin_proxy_sock_path }} + resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} + - name: csi-attacher + image: {{ printf "%s:%s" $csi_images.csiAttacher.repository (toString $csi_images.csiAttacher.tag) }} + args: +{{- include "vastcsi.commonArgs" . | nindent 12 }} + - "--timeout={{ .Values.operationTimeout }}s" + - "--worker-threads={{ .Values.numWorkers }}" + - "--retry-interval-start={{ .Values.operationRetryIntervalStart }}s" + - "--retry-interval-max={{ .Values.operationRetryIntervalMax }}s" + {{- range .Values.controller.extraArgs.csiAttacher }} + - "--{{ . }}" + {{- end }} + env: + - name: ADDRESS + value: {{ $plugin_proxy_sock }} + imagePullPolicy: {{ $csi_images.csiAttacher.imagePullPolicy | default "IfNotPresent" }} + volumeMounts: + - name: socket-dir + mountPath: {{ $plugin_proxy_sock_path }} + resources: {{- toYaml .Values.controller.resources.csiAttacher | nindent 12 }} + - name: csi-snapshotter + image: {{ printf "%s:%s" $csi_images.csiSnapshotter.repository (toString $csi_images.csiSnapshotter.tag) }} + args: +{{- include "vastcsi.commonArgs" . | nindent 12 }} + - "--extra-create-metadata" + - "--leader-election=false" + - "--timeout={{ .Values.operationTimeout }}s" + - "--worker-threads={{ .Values.numWorkers }}" + - "--retry-interval-start={{ .Values.operationRetryIntervalStart }}s" + - "--retry-interval-max={{ .Values.operationRetryIntervalMax }}s" + {{- range .Values.controller.extraArgs.csiSnapshotter }} + - "--{{ . }}" + {{- end }} + env: + - name: ADDRESS + value: {{ $plugin_proxy_sock }} + imagePullPolicy: {{ $csi_images.csiSnapshotter.imagePullPolicy | default "IfNotPresent" }} + volumeMounts: + - name: socket-dir + mountPath: {{ $plugin_proxy_sock_path }} + resources: {{- toYaml .Values.controller.resources.csiSnapshotter | nindent 12 }} + - name: csi-resizer + image: {{ printf "%s:%s" $csi_images.csiResizer.repository (toString $csi_images.csiResizer.tag) }} + args: +{{- include "vastcsi.commonArgs" . | nindent 12 }} + - "--timeout={{ .Values.operationTimeout }}s" + - "--workers={{ .Values.numWorkers }}" + - "--retry-interval-start={{ .Values.operationRetryIntervalStart }}s" + - "--retry-interval-max={{ .Values.operationRetryIntervalMax }}s" + - "--handle-volume-inuse-error=false" + {{- range .Values.controller.extraArgs.csiResizer }} + - "--{{ . }}" + {{- end }} + env: + - name: ADDRESS + value: {{ $plugin_proxy_sock }} + imagePullPolicy: {{ $csi_images.csiResizer.imagePullPolicy | default "IfNotPresent" }} + volumeMounts: + - name: socket-dir + mountPath: {{ $plugin_proxy_sock_path }} + resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} + - name: csi-vast-plugin + image: {{ printf "%s:%s" $csi_images.csiVastPlugin.repository (toString $csi_images.csiVastPlugin.tag) }} + args: + - "serve" + imagePullPolicy: {{ $csi_images.csiVastPlugin.imagePullPolicy | default "IfNotPresent" }} + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + env: +{{- include "vastcsi.commonEnv" . | indent 12 }} + - name: CSI_ENDPOINT + value: unix://{{ $plugin_proxy_sock }} + - name: X_CSI_MODE + value: controller + volumeMounts: + - name: socket-dir + mountPath: {{ $plugin_proxy_sock_path }} +{{- include "vastcsi.vmsAuthVolumeMount" (merge (dict "ca_bundle" $ca_bundle) .) | indent 12 }} + resources: {{- toYaml .Values.controller.resources.csiVastPlugin | nindent 12 }} + hostNetwork: true + dnsPolicy: {{ .Values.controller.dnsPolicy }} + nodeSelector: +{{- if .Values.controller.runOnMaster}} + node-role.kubernetes.io/master: "" + {{- end }} +{{- if .Values.controller.runOnControlPlane }} + node-role.kubernetes.io/control-plane: "" + {{- end }} +{{- if .Values.controller.nodeSelector }} +{{ toYaml .Values.controller.nodeSelector | indent 8 }} + {{- end }} + priorityClassName: {{ .Values.controller.priorityClassName }} + serviceAccountName: {{ .Release.Name }}-vast-controller-sa + tolerations: +{{ toYaml .Values.controller.tolerations | indent 8 }} + volumes: + - name: socket-dir + emptyDir: {} +{{- include "vastcsi.vmsAuthVolume" (merge (dict "ca_bundle" $ca_bundle) .) | indent 8 }} diff --git a/charts/vastcsi/templates/csi-driver.yaml b/charts/vastcsi/templates/csi-driver.yaml new file mode 100644 index 00000000..279d339b --- /dev/null +++ b/charts/vastcsi/templates/csi-driver.yaml @@ -0,0 +1,14 @@ +{{/* Vast csi driver which defines the behaviour rules for all downstream PVS while attachement */}} + +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: csi.vastdata.com + labels: + {{- include "vastcsi.labels" . | nindent 4 }} +spec: + attachRequired: {{ .Values.attachRequired }} + podInfoOnMount: true + volumeLifecycleModes: + - Persistent + - Ephemeral diff --git a/charts/vastcsi/templates/node.yaml b/charts/vastcsi/templates/node.yaml new file mode 100644 index 00000000..e0fcf171 --- /dev/null +++ b/charts/vastcsi/templates/node.yaml @@ -0,0 +1,131 @@ +{{/* Vast CSI Node */}} + +{{- $csi_images := .Values.image -}} +{{- $kubelet_path := .Values.kubeletPath | default "/var/lib/kubelet" | trimSuffix "/" }} +{{- $ca_bundle := empty .Values.sslCert | ternary .Values.sslCertsSecretName "csi-vast-ca-bundle" -}} + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: csi-vast-node + namespace: {{ include "vastcsi.namespace" . }} + labels: +{{- include "vastcsi.labels" . | nindent 4 }} + app.kubernetes.io/csi-role: "node" +spec: + selector: + matchLabels: + app: "csi-vast-node" +{{- include "vastcsi.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + app: csi-vast-node + role: csi-vast +{{- include "vastcsi.labels" . | nindent 8 }} + annotations: + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + spec: +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + affinity: + podAffinity: + {{- toYaml .Values.node.podAffinity | nindent 10 }} + podAntiAffinity: + {{- toYaml .Values.node.podAntiAffinity | nindent 10 }} + nodeAffinity: + {{- toYaml .Values.node.nodeAffinity | nindent 10 }} + containers: + - name: csi-node-driver-registrar + image: {{ printf "%s:%s" $csi_images.csiNodeDriverRegistrar.repository (toString $csi_images.csiNodeDriverRegistrar.tag) }} + args: +{{- include "vastcsi.commonArgs" . | nindent 12 }} + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + imagePullPolicy: {{ $csi_images.csiNodeDriverRegistrar.imagePullPolicy | default "IfNotPresent" }} + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/csi.vastdata.com /registration/csi.vastdata.com-reg.sock"] + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: {{ $kubelet_path }}/plugins/csi.vastdata.com/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: plugin-dir + mountPath: /csi/ + - name: registration-dir + mountPath: /registration/ + resources: {{- toYaml .Values.node.resources.nodeDriverRegistrar | nindent 12 }} + - name: csi-vast-plugin + image: {{ printf "%s:%s" $csi_images.csiVastPlugin.repository (toString $csi_images.csiVastPlugin.tag) }} + args: + - "serve" + imagePullPolicy: {{ $csi_images.csiVastPlugin.imagePullPolicy | default "IfNotPresent" }} + env: +{{- include "vastcsi.commonEnv" . | indent 12 }} + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: node + - name: X_CSI_NODE_ID + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: pods-mount-dir + mountPath: {{ $kubelet_path }} + mountPropagation: "Bidirectional" + - name: device-dir + mountPath: /dev + {{- if .Values.node.propagateHostMountOptions }} + - name: host-nfsmount-conf-d + mountPath: /etc/nfsmount.conf.d + {{- end }} +{{- include "vastcsi.vmsAuthVolumeMount" (merge (dict "ca_bundle" $ca_bundle) .) | indent 12 }} + resources: {{- toYaml .Values.node.resources.csiVastPlugin | nindent 12 }} + hostNetwork: true + dnsPolicy: {{ .Values.node.dnsPolicy }} + nodeSelector: +{{ toYaml .Values.node.nodeSelector | indent 8 }} + priorityClassName: {{ .Values.node.priorityClassName }} + serviceAccountName: {{ .Release.Name }}-vast-node-sa + tolerations: +{{ toYaml .Values.node.tolerations | indent 8 }} + volumes: + - name: registration-dir + hostPath: + path: {{ $kubelet_path }}/plugins_registry/ + type: DirectoryOrCreate + - name: plugin-dir + hostPath: + path: {{ $kubelet_path }}/plugins/csi.vastdata.com + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: {{ $kubelet_path }} + type: Directory + - name: device-dir + hostPath: + path: /dev +{{- include "vastcsi.vmsAuthVolume" (merge (dict "ca_bundle" $ca_bundle) .) | indent 8 }} + {{- if .Values.node.propagateHostMountOptions }} + - hostPath: + path: /etc/nfsmount.conf.d + type: DirectoryOrCreate + name: host-nfsmount-conf-d + {{- end }} diff --git a/charts/vastcsi/templates/secret.yaml b/charts/vastcsi/templates/secret.yaml new file mode 100644 index 00000000..b352dec3 --- /dev/null +++ b/charts/vastcsi/templates/secret.yaml @@ -0,0 +1,17 @@ +{{/* Optional ssl certificate for comminication with Vast Cluster host */}} + +{{- if .Values.sslCert }} +apiVersion: v1 +kind: Secret +metadata: + name: csi-vast-ca-bundle + namespace: {{ include "vastcsi.namespace" . }} + labels: +{{- include "vastcsi.labels" . | nindent 4 }} + annotations: + checksum/vast-vms-authority-secret: {{ .Values.sslCert | sha256sum | trim }} +type: Opaque +data: + ca-bundle.crt: |- + {{ .Values.sslCert | b64enc }} +{{- end -}} diff --git a/charts/vastcsi/templates/serviceaccount.yaml b/charts/vastcsi/templates/serviceaccount.yaml new file mode 100644 index 00000000..006db04c --- /dev/null +++ b/charts/vastcsi/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-vast-controller-sa + namespace: {{ include "vastcsi.namespace" . }} + labels: +{{- include "vastcsi.labels" . | nindent 4 }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-vast-node-sa + namespace: {{ include "vastcsi.namespace" . }} + labels: +{{- include "vastcsi.labels" . | nindent 4 }} diff --git a/charts/vastcsi/templates/shared/_chart_name.tpl b/charts/vastcsi/templates/shared/_chart_name.tpl new file mode 100644 index 00000000..c9d892c4 --- /dev/null +++ b/charts/vastcsi/templates/shared/_chart_name.tpl @@ -0,0 +1,5 @@ +{{/*Create chart name and version as used by the chart label.*/}} + +{{- define "vastcsi.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} diff --git a/charts/vastcsi/templates/shared/_common_args.tpl b/charts/vastcsi/templates/shared/_common_args.tpl new file mode 100644 index 00000000..bd6f6562 --- /dev/null +++ b/charts/vastcsi/templates/shared/_common_args.tpl @@ -0,0 +1,4 @@ +{{- define "vastcsi.commonArgs" -}} +- "--csi-address=$(ADDRESS)" +- "--v={{ .Values.logLevel | default 5 }}" +{{- end }} diff --git a/charts/vastcsi/templates/shared/_common_env.tpl b/charts/vastcsi/templates/shared/_common_env.tpl new file mode 100644 index 00000000..4557ac7d --- /dev/null +++ b/charts/vastcsi/templates/shared/_common_env.tpl @@ -0,0 +1,33 @@ +{{- /* +# IMPORTANT: cosi and csi helm charts share similar templates. +# If you make changes to a template in one chart, make sure to replicate those +# changes in the corresponding template in the other chart. +*/}} + +{{- define "vastcsi.commonEnv" }} + +- name: X_CSI_PLUGIN_NAME + value: "csi.vastdata.com" +- name: X_CSI_VMS_HOST + value: {{ $.Values.endpoint | default "" | quote }} +- name: X_CSI_ENABLE_VMS_SSL_VERIFICATION + value: {{ $.Values.verifySsl | quote }} +- name: X_CSI_DELETION_VIP_POOL_NAME + value: {{ $.Values.deletionVipPool | quote }} +- name: X_CSI_DELETION_VIEW_POLICY + value: {{ $.Values.deletionViewPolicy | quote }} +- name: X_CSI_WORKER_THREADS + value: {{ $.Values.numWorkers | quote }} +- name: X_CSI_DONT_USE_TRASH_API + value: {{ $.Values.dontUseTrashApi | quote }} +- name: X_CSI_USE_LOCALIP_FOR_MOUNT + value: {{ $.Values.useLocalIpForMount | quote }} +- name: X_CSI_ATTACH_REQUIRED + value: {{ $.Values.attachRequired | quote }} +- name: X_CSI_VMS_TIMEOUT + value: {{ $.Values.operationTimeout | quote }} +{{ if $.Values.truncateVolumeName -}} +- name: X_CSI_TRUNCATE_VOLUME_NAME + value: {{ $.Values.truncateVolumeName | quote }} +{{- end }} +{{- end }} diff --git a/charts/vastcsi/templates/shared/_common_namespace.tpl b/charts/vastcsi/templates/shared/_common_namespace.tpl new file mode 100644 index 00000000..2d9191fe --- /dev/null +++ b/charts/vastcsi/templates/shared/_common_namespace.tpl @@ -0,0 +1,3 @@ +{{- define "vastcsi.namespace" -}} +{{- coalesce $.Release.Namespace "vast-csi" | quote -}} +{{- end }} diff --git a/charts/vastcsi/templates/shared/_common_selectors_and_labels.tpl b/charts/vastcsi/templates/shared/_common_selectors_and_labels.tpl new file mode 100644 index 00000000..eddc9ff1 --- /dev/null +++ b/charts/vastcsi/templates/shared/_common_selectors_and_labels.tpl @@ -0,0 +1,23 @@ +{{/* Common labels and selectors */}} + +{{- define "vastcsi.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + + +{{/* Common labels */}} +{{- define "vastcsi.labels" -}} +helm.sh/chart: {{ include "vastcsi.chart" . }} +{{ include "vastcsi.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + + +{{/* Common selectors */}} +{{- define "vastcsi.selectorLabels" -}} +app.kubernetes.io/name: {{ include "vastcsi.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/charts/vastcsi/templates/shared/_vms_auth.tpl b/charts/vastcsi/templates/shared/_vms_auth.tpl new file mode 100644 index 00000000..4fad2960 --- /dev/null +++ b/charts/vastcsi/templates/shared/_vms_auth.tpl @@ -0,0 +1,47 @@ +{{/*Set of templates for working with vms credentials and vms session certificates*/}} + +{{/* Volume declarations for vms credentials and vms session certificates */}} +{{- define "vastcsi.vmsAuthVolume" -}} +{{- if and .Values.sslCert .Values.sslCertsSecretName -}} +{{- + fail (printf "Ambiguous origin of the 'sslCert'. The certificate is found in both the '%s' secret and the command line --from-file argument." .Values.sslCertsSecretName) +-}} +{{- end -}} +{{- if and .ca_bundle (not .Values.verifySsl) -}} + {{- fail "When sslCert is provided `verifySsl` must be set to true." -}} +{{- end }} + +{{- if $.Values.secretName }} +- name: vms-auth + secret: + secretName: {{ $.Values.secretName | quote }} + items: + - key: username + path: username + - key: password + path: password +{{- end }} +{{- if $.ca_bundle }} +- name: vms-ca-bundle + secret: + secretName: {{ $.ca_bundle }} + items: + - key: ca-bundle.crt + path: ca-certificates.crt +{{- end }} +{{- end }} + + +{{/* Volume bindings for vms credentials and vms session certificates */}} +{{ define "vastcsi.vmsAuthVolumeMount" }} +{{- if $.Values.secretName }} +- name: vms-auth + mountPath: /opt/vms-auth + readOnly: true +{{- end }} +{{- if $.ca_bundle }} +- name: vms-ca-bundle + mountPath: /etc/ssl/certs + readOnly: true +{{- end }} +{{- end }} diff --git a/charts/vastcsi/templates/snapshot-class.yaml b/charts/vastcsi/templates/snapshot-class.yaml new file mode 100644 index 00000000..fb573fda --- /dev/null +++ b/charts/vastcsi/templates/snapshot-class.yaml @@ -0,0 +1,48 @@ +{{/* Generate one or more snapshot classes from 'snapshotClasses' section. */}} + +{{/* Check if .Values.secretName is not empty */}} +{{- if not (empty .Values.secretName) }} + +{{/* If .Values.snapshotClasses is empty, set a default value */}} +{{- if empty .Values.snapshotClasses }} +{{- $_ := set .Values "snapshotClasses" (dict "vastdata-snapshot" (dict)) }} +{{- end -}} +{{- end -}} + +{{/* Iterate over SnapshotClasses from manifest */}} +{{- range $name, $options := .Values.snapshotClasses }} + +{{/* Validate setDefaultSnapshotClass option. Options should be either true or false */}} +{{- + $is_default_class := pluck "setDefaultSnapshotClass" $options $.Values.snapshotClassDefaults | first | quote +-}} +{{- if not (or (kindIs "bool" $is_default_class ) ( $is_default_class | mustRegexMatch "true|false" )) -}} + {{- fail "setDefaultSnapshotClass should be either 'true' or 'false'" -}} +{{- end }} + +{{- $snapshot_name_fmt := pluck "snapshotNameFormat" $options $.Values.snapshotClassDefaults | first | quote -}} +{{- $deletion_policy := pluck "deletionPolicy" $options $.Values.snapshotClassDefaults | first | quote -}} + +{{- $snapshot_class_secret := pluck "secretName" $options $.Values.snapshotClassDefaults | first | quote -}} +{{/* Get secretNamespace parameter. If not provided .Release.Namespace is used. */}} +{{- $snapshot_class_secret_namespace := pluck "secretNamespace" $options $.Values.snapshotClassDefaults | first | default $.Release.Namespace | quote -}} + +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshotClass +metadata: + name: {{ required "snapshotClassName must be non empty string" $name }} + namespace: {{ include "vastcsi.namespace" $ }} + annotations: + snapshot.storage.kubernetes.io/is-default-class: {{ $is_default_class }} + labels: + {{- include "vastcsi.labels" $ | nindent 4 }} +driver: csi.vastdata.com +deletionPolicy: {{ $deletion_policy }} +parameters: + snapshot_name_fmt: {{ $snapshot_name_fmt }} +{{- if ne $snapshot_class_secret ( quote "" ) }} + csi.storage.k8s.io/snapshotter-secret-name: {{ $snapshot_class_secret }} + csi.storage.k8s.io/snapshotter-secret-namespace: {{ $snapshot_class_secret_namespace }} +{{- end }} +--- +{{- end }} diff --git a/charts/vastcsi/templates/storage-class.yaml b/charts/vastcsi/templates/storage-class.yaml new file mode 100644 index 00000000..c7818cf2 --- /dev/null +++ b/charts/vastcsi/templates/storage-class.yaml @@ -0,0 +1,92 @@ +{{/* Generate one or more storage classes from 'storageClasses' section. */}} + +{{- if not .Values.storageClasses -}} + {{- fail "`storageClasses` cannot be empty section. Specify at least one StorageClass with required parameters (vipPolicy, storagePath etc)" -}} +{{- end -}} + +{{/* Iterate over StorageClasses from manifest */}} +{{- range $name, $options := .Values.storageClasses }} + +{{/* Validate setDefaultStorageClass option. Options should be either true or false */}} +{{- + $is_default_class := pluck "setDefaultStorageClass" $options $.Values.storageClassDefaults | first | quote +-}} +{{- if not (or (kindIs "bool" $is_default_class ) ( $is_default_class | mustRegexMatch "true|false" )) -}} + {{- fail "setDefaultStorageClass should be either 'true' or 'false'" -}} +{{- end }} + +{{/* Validate storagePath parameter. Parameter should be not empty string. */}} +{{- $storage_path := pluck "storagePath" $options $.Values.storageClassDefaults | first | quote -}} +{{- if eq $storage_path ( quote "" ) -}} + {{- fail "storagePath is required value. Please specify valid root export path" -}} +{{- end }} + +{{/* Validate viewPolicy parameter. Parameter should be not empty string. */}} +{{- $view_policy := pluck "viewPolicy" $options $.Values.storageClassDefaults | first | quote -}} +{{- if eq $view_policy ( quote "" ) -}} +{{- fail "viewPolicy is required value. Please specify valid policy name" -}} +{{- end }} + +{{- $vip_pool_name := pluck "vipPool" $options $.Values.storageClassDefaults | first | quote -}} +{{- $vip_pool_fqdn := pluck "vipPoolFQDN" $options $.Values.storageClassDefaults | first | quote -}} + +{{- if and (ne $vip_pool_name ( quote "" )) (ne $vip_pool_fqdn ( quote "" )) -}} +{{- fail (printf "vipPool and vipPoolFQDN are mutually exclusive in the StorageClass '%s' parameters. Do not set a default value from storageDefaults for either field; choose only one to specify." $name) -}} +{{- end }} + +{{- $volume_name_fmt := pluck "volumeNameFormat" $options $.Values.storageClassDefaults | first | quote -}} +{{- $eph_volume_name_fmt := pluck "ephemeralVolumeNameFormat" $options $.Values.storageClassDefaults | first | quote -}} +{{- $qos_policy := pluck "qosPolicy" $options $.Values.storageClassDefaults | first | quote -}} +{{- $mount_options := pluck "mountOptions" $options $.Values.storageClassDefaults | first -}} +{{- $reclaim_policy := pluck "reclaimPolicy" $options $.Values.storageClassDefaults | first | quote -}} +{{- + $allow_volume_expansion := pluck "allowVolumeExpansion" $options $.Values.storageClassDefaults | + first | quote | mustRegexMatch "true" | ternary true false +-}} + +{{- $storage_class_secret := pluck "secretName" $options $.Values.storageClassDefaults | first | quote -}} +{{/* Get secretNamespace parameter. If not provided .Release.Namespace is used. */}} +{{- $storage_class_secret_namespace := pluck "secretNamespace" $options $.Values.storageClassDefaults | first | default $.Release.Namespace | quote -}} + +kind: StorageClass +apiVersion: storage.k8s.io/v1 +provisioner: csi.vastdata.com +metadata: + name: {{ required "A StorageClass name must be not empty" $name }} + namespace: {{ include "vastcsi.namespace" $ }} + annotations: + storageclass.kubernetes.io/is-default-class: {{ $is_default_class }} + labels: + {{- include "vastcsi.labels" $ | nindent 4 }} +reclaimPolicy: {{ $reclaim_policy }} +parameters: + root_export: {{ $storage_path }} + view_policy: {{ $view_policy }} + lb_strategy: "roundrobin" # deprecated; this is here for backwards compatibility, so users don't have to delete their helm deployment and reinstall (since StorageClass is immutable) + volume_name_fmt: {{ $volume_name_fmt }} + eph_volume_name_fmt: {{ $eph_volume_name_fmt }} +{{- range $key, $value := dict "vip_pool_name" $vip_pool_name "vip_pool_fqdn" $vip_pool_fqdn "qos_policy" $qos_policy }} + {{- if and $value (ne $value ( quote "" )) }} + {{ $key }}: {{ if (kindIs "int" $value) }}{{ $value | quote }}{{ else }}{{ $value }}{{ end }} + {{- end }} +{{- end }} +{{- if ne $storage_class_secret ( quote "" ) }} + csi.storage.k8s.io/provisioner-secret-name: {{ $storage_class_secret }} + csi.storage.k8s.io/provisioner-secret-namespace: {{ $storage_class_secret_namespace }} + csi.storage.k8s.io/controller-publish-secret-name: {{ $storage_class_secret }} + csi.storage.k8s.io/controller-publish-secret-namespace: {{ $storage_class_secret_namespace }} + csi.storage.k8s.io/node-publish-secret-name: {{ $storage_class_secret }} + csi.storage.k8s.io/node-publish-secret-namespace: {{ $storage_class_secret_namespace }} + csi.storage.k8s.io/controller-expand-secret-name: {{ $storage_class_secret }} + csi.storage.k8s.io/controller-expand-secret-namespace: {{ $storage_class_secret_namespace }} +{{- end }} +allowVolumeExpansion: {{ $allow_volume_expansion }} +{{- if kindIs "string" $mount_options -}} +{{/* Keep option to specify mountOptions as string for backward compatibility */}} +mountOptions: + - {{ $mount_options | quote }} +{{- else }} +mountOptions: {{ toYaml $mount_options | nindent 2 }} +{{- end }} +--- +{{- end }} diff --git a/charts/vastcsi/values.yaml b/charts/vastcsi/values.yaml new file mode 100644 index 00000000..c2d486d7 --- /dev/null +++ b/charts/vastcsi/values.yaml @@ -0,0 +1,337 @@ +#################### +# VAST REST SESSION ATTRIBUTES +#################### +# Secret name, which corresponds to a secret containing credentials to login - should be provided by user if secretName is not provided in StorageClass attributes +# Secret must contain username and password fields +# Example: kubectl create secret generic vast-mgmt --from-literal=username='< VAST username >' --from-literal=password='< VAST password >' +secretName: "" + +# API endpoint of VAST appliance - should be provided by user if secretName is not provided in StorageClass attributes +endpoint: "" + +# Set true to enable certificate validity test +verifySsl: false + +# Path (absolute or relative) to SSL certificate for verifying the VAST REST API. +# Must be set using `set-file` option eg `--set-file sslCert=< path to sslCert.crt >` +# sslCertsSecretName secret and sslCert option in values.yaml are mutually exclusive. Make sure to use only one of them. +sslCert: "" +# Secret name, which corresponds to a secret containing an SSL certificate for verifying the VAST REST API +# Example: kubectl create secret generic vast-tls --from-file=ca-bundle.crt=< path to sslCert.crt > +# sslCertsSecretName secret and sslCert option in values.yaml are mutually exclusive. Make sure to use only one of them. +sslCertsSecretName: "" + +#################### +# DELETE VOLUMES +#################### +# Dedicated vip pool to delete volumes. Can have the same value as regular StorageClass option `vipPool` +# - value is not required when VAST cluster version is 4.6.0 onwards and `DontUseTrashApi` flag is set to false +deletionVipPool: "" +# Dedicated view policy to delete volumes. Can have the same value as regular StorageClass option `viewPolicy` +# - value is not required when VAST cluster version is 4.6.0 onwards and `DontUseTrashApi` flag is set to false +deletionViewPolicy: "" +# When the flag is set to 'true', the controller will opt to using a local mount for deleting data from discarded volumes, +# as opposed to sending the request to the VMS over REST. +# Please contact VAST Support before modifying this setting. +dontUseTrashApi: false +# Use this local IP address for mounting, when the StorageClass does not define a vipPool. +# This is useful for DPU-based deployments. +useLocalIpForMount: "" + +#################### +# VAST CSI STORAGE CLASS OPTIONS +#################### +# storageClassDefaults is set of options that will be using by default if option is not provided +# for particular storageClass in 'storageClasses' section +storageClassDefaults: + # Any of the following options can be specified within the StorageClasses section on a per-storage class basis, + # or can be set here as default values for all storage classes. + + # Secret name, which corresponds to a secret containing credentials to login - must be provided by user + # Secret must contain username, password and endpoint fields. Other fields are ignored. + # Example: kubectl create secret generic vast-mgmt --from-literal=username='< VAST username >' --from-literal=password='< VAST password >' --from-literal=endpoint='< VAST endpoint >' + # Optionally you can include CA ssl certificate. Along with verifySsl option enabled it will establish trusted connection per StorageClass + # if you have different certificates per cluster (Otherwise use `sslCertsSecretName` to specify global secret with ssl certificate to be used across all storage classes): + # Example: + # kubectl create secret generic vast-mgmt \ + # --from-literal=username='' \ + # --from-literal=password='' \ + # --from-literal=endpoint='' \ + # --from-file=ssl_cert='' + secretName: "" + # Secret namespace. If not specified then secret will be searched in the same namespace as StorageClass is created. + secretNamespace: "" + # Where volumes will be located on VAST - must be provided by user + storagePath: "" + # Name of VAST VIP pool to use. Must specify either vipPool or vipPoolFQDN. + vipPool: "" + # The FQDN of the VIP pool to use. Must specify either vipPool or vipPoolFQDN. + # Using a DNS skips an API call to the VMS for obtaining a random VIP from the vipPool, leading to faster volume mounting. + # NOTE: The driver will prepend the FQDN with a random prefix, which forces the NFS client to resolve into a different VIP, + # thereby distributing the load across the entire range of the VIP pool. + vipPoolFQDN: "" + # VAST policy name to create views - must be provided by user + viewPolicy: "" + # Allows resizing existing volumes + allowVolumeExpansion: true + # If true, sets Vast CSI as the cluster-wide storage class default + setDefaultStorageClass: false + # String template for CSI-provisioned volume names, within VAST + volumeNameFormat: "csi:{namespace}:{name}:{id}" + # String template for CSI-provisioned ephemeral volumes, within VAST + ephemeralVolumeNameFormat: "csi:{namespace}:{name}:{id}" + # Add any extra mount NFS options here + mountOptions: [] + # Name of QoS policy associates with the view. + qosPolicy: "" + # Reclaim policy to use with the storage class. + reclaimPolicy: "Delete" + +# Default storage class to use with CSI DRIVER. +# The only required value is 'vipPool' name where user should provide name of existing vip pool on +# VAST cluster. +# User can extend this section using more storage classes with different storage class parameters and options. +storageClasses: {} + # StorageClass name. This field must be unique across all storage classes. + # vastdata-filesystem: + # Name of VAST VIP pool to use - must be provided by user + # vipPool: "" + # ... Any other options from 'storageClassDefaults' section. If not provided then default value + # will be used. +# User can add more storage classes to this section eg: +# vastdata-filesystem2: +# vipPool: "vippool-2" +# secretName: "secret2" +# .... other options +# +# vastdata-filesystem3: +# vipPool: "vippool-3" +# secretName: "secret3" +# .... other options + +#################### +# VAST PROVISIONER RUNTIME PARAMETERS +#################### + +# The number of worker threads the CSI plugin use to serve requests simultaneously. +numWorkers: 10 +# Timeout of all calls to CSI driver. +operationTimeout: 15 +# Each time a failure occurs, sidecar containers initiate retries +# but only after waiting for 'operationRetryIntervalStart' seconds +# which then doubles with each subsequent failure until it reaches `operationRetryIntervalMax` +operationRetryIntervalStart: 10 + +# Maximum interval between attempts. +operationRetryIntervalMax: 60 + +# Truncate VAST quota name if name length is greater than this number. +# set `truncateVolumeName: null` to disable truncation. +truncateVolumeName: 64 + +# indicates this CSI driver requires an attach operation as it implements the `ControllerPublishVolume` +# if set to false, the driver will perform `ControllerPublishVolume` as a part of NodePublishVolume op +# which might speed up the volume attach operation significantly but not appropriate for workload nodes where +# http/https ports are not open. +attachRequired: true + +#################### +# VAST CSI SNAPSHOTS CLASS OPTIONS +#################### +# snapshotClassDefaults is set of options that will be using by default if option is not provided +# for particular snapshotClass in 'snapshotClasses' section +snapshotClassDefaults: + # Any of the following options can be specified within the snapshotClasses section on a per-snapshot class basis, + # or can be set here as default values for all snapshot classes. + + # Secret name, which corresponds to a secret containing credentials to login - must be provided by user + # Secret must contain username, password and endpoint fields. Other fields are ignored. + # Example: kubectl create secret generic vast-mgmt --from-literal=username='< VAST username >' --from-literal=password='< VAST password >' --from-literal=endpoint='< VAST endpoint >' + # Optionally you can include CA ssl certificate. Along with verifySsl option enabled it will establish trusted connection per SnapshotClass + # if you have different certificates per cluster (Otherwise use `sslCertsSecretName` to specify global secret with ssl certificate to be used across all snapshot classes): + # Example: + # kubectl create secret generic vast-mgmt \ + # --from-literal=username='' \ + # --from-literal=password='' \ + # --from-literal=endpoint='' \ + # --from-file=ssl_cert='' + secretName: "" + # Secret namespace. If not specified then secret will be searched in the same namespace as SnapshotClass is created. + secretNamespace: "" + # If true, sets SnapshotClass as the cluster-wide snapshot class default + setDefaultSnapshotClass: true + # String template for CSI-provisioned snapshot names, within VAST + snapshotNameFormat: "csi:{namespace}:{name}:{id}" + # On snapshot delete behavior. By default, Vast Cluster snapshot will be removed as well. + deletionPolicy: "Delete" + +snapshotClasses: {} +# vastdata-snapshot: +# secretName: "secret" +# deletionPolicy: "Delete" +# setDefaultSnapshotClass: false +# snapshotNameFormat: "snapshot:{name}:{id}" + + +#################### +# VAST CONTROLLER AND NODE IMAGE SPECIFICATION +#################### + +image: + csiVastPlugin: + repository: vastdataorg/csi + tag: v2.5.0 # the version of the Vast CSI driver + imagePullPolicy: IfNotPresent + csiAttacher: + repository: registry.k8s.io/sig-storage/csi-attacher + tag: v4.5.0 + imagePullPolicy: IfNotPresent + csiNodeDriverRegistrar: + repository: registry.k8s.io/sig-storage/csi-node-driver-registrar + tag: v2.10.0 + imagePullPolicy: IfNotPresent + csiProvisioner: + repository: registry.k8s.io/sig-storage/csi-provisioner + tag: v4.0.0 + imagePullPolicy: IfNotPresent + csiResizer: + repository: registry.k8s.io/sig-storage/csi-resizer + tag: v1.10.0 + imagePullPolicy: IfNotPresent + csiSnapshotter: + repository: registry.k8s.io/sig-storage/csi-snapshotter + tag: v7.0.1 + imagePullPolicy: IfNotPresent + +#################### +# VAST CONTROLLER AND NODE BEHAVIOR +# +# WARNING - these parameters are for advanced users. +# Setting these incorrectly may prevent the VAST CSI Driver from running correctly. +# We recommend to consult with VAST Support before changing any of the following parameters +#################### + +controller: + # runOnMaster flag indicated if CSI Controller should be run on master. + runOnMaster: false + # runOnControlPlane flag indicated if CSI Controller should be run on control plane node. + runOnControlPlane: false + # determine how DNS (Domain Name System) resolution should be handled within Pod. + # available values: Default, ClusterFirstWithHostNet, ClusterFirst + dnsPolicy: Default + # nodeSelector is the way to restrict pod to be assigned on certain node/nodes. + # Specify node selector if you want node and controller containers to be assigned only to specific node/nodes of + # your cluster. + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector. + nodeSelector: {} + # If specified, the pod's tolerations + # https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # Allows to specify which nodes your pod is eligible to be scheduled based on labels on pods that are already running on the node. + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + podAffinity: {} + # Allows to specify conditions for preventing pods from being scheduled on nodes where certain labels are already present on other pods. + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + podAntiAffinity: {} + # Allows to specify which nodes your pod is eligible to be scheduled on based on labels on the node + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + nodeAffinity: {} + # Resources describes the compute resource requirements. + resources: + csiProvisioner: + limits: + memory: 400Mi + requests: + cpu: 10m + memory: 20Mi + csiSnapshotter: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + csiAttacher: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + csiResizer: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + csiVastPlugin: + limits: + memory: 400Mi + requests: + cpu: 100m + memory: 50Mi + extraArgs: + # For further options, check + # https://github.com/kubernetes-csi/external-provisioner#command-line-options + # Example: + # controller: + # extraArgs: + # csiProvisioner: + # - kube-api-qps=500 + # - kube-api-burst=1000 + csiProvisioner: [] + # For further options, check + # https://github.com/kubernetes-csi/external-snapshotter#csi-external-snapshotter-sidecar-command-line-options + csiSnapshotter: [] + # For further options, check + # https://github.com/kubernetes-csi/external-attacher#command-line-options + csiAttacher: [] + # For further options, check + # https://github.com/kubernetes-csi/external-resizer#recommended-optional-arguments + csiResizer: [] + # priorityClassName is the name of priority class to be used for the pod. + priorityClassName: system-cluster-critical + +node: + # See controller.dnsPolicy section for details + dnsPolicy: Default + # See controller.nodeSelector section for details + nodeSelector: {} + # See controller.tolerations section for details + tolerations: [] + # See controller.podAffinity section for details + podAffinity: {} + # See controller.podAntiAffinity section for details + podAntiAffinity: {} + # See controller.nodeAffinity section for details + nodeAffinity: {} + # the default host `/etc/nfsmount.d` mount configuration directory as source for mount options + # https://man7.org/linux/man-pages/man5/nfsmount.conf.5.html + propagateHostMountOptions: true + # see controller.resources section for details + resources: + nodeDriverRegistrar: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + csiVastPlugin: + limits: + memory: 400Mi + requests: + cpu: 100m + memory: 50Mi + # See controller.priorityClassName section for details + priorityClassName: system-cluster-critical + +# The path to the kubelet root dir. must be provided when Kubernetes is not installed in its default directory. +kubeletPath: "/var/lib/kubelet" +# rbac flag indicated it Role-based access control from this chart should be used to create apropriate +# permissions for CSI Controller and CSI Node. Use default value unless you understand necessary permissions +# and can provide it other way. +rbac: true +# Reference to one or more secrets to be used when pulling images +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +# - name: "image-pull-secret" +imagePullSecrets: [] +# Log level of CSI plugin +logLevel: 5 diff --git a/examples/cosi/cosi-all-in-one.yaml b/examples/cosi/cosi-all-in-one.yaml new file mode 100644 index 00000000..446acdde --- /dev/null +++ b/examples/cosi/cosi-all-in-one.yaml @@ -0,0 +1,41 @@ +kind: BucketClass +apiVersion: objectstorage.k8s.io/v1alpha1 +driverName: csi.vastdata.com +metadata: + name: vastdata-bucket +parameters: + view_policy: s3-policy + vip_pool: vippool-1 + +--- + +kind: BucketClaim +apiVersion: objectstorage.k8s.io/v1alpha1 +metadata: + name: sample-bucket +spec: + bucketClassName: vastdata-bucket + protocols: + - s3 + +--- + +kind: BucketAccessClass +apiVersion: objectstorage.k8s.io/v1alpha1 +metadata: + name: sample-bac +driverName: csi.vastdata.com +authenticationType: KEY + +--- + +kind: BucketAccess +apiVersion: objectstorage.k8s.io/v1alpha1 +metadata: + name: sample-access +spec: + bucketClaimName: sample-bucket + bucketAccessClassName: sample-bac + credentialsSecretName: sample-access-secret + +--- diff --git a/examples/csi-operator/cluster.yaml b/examples/csi-operator/cluster.yaml new file mode 100644 index 00000000..891413dc --- /dev/null +++ b/examples/csi-operator/cluster.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.vastdata.com/v1 +kind: VastCluster +metadata: + name: cluster + namespace: vast-csi +spec: + endpoint: 10.27.113.27 + username: admin + password: "123456" diff --git a/examples/csi-operator/csidriver.yaml b/examples/csi-operator/csidriver.yaml new file mode 100644 index 00000000..83f116f8 --- /dev/null +++ b/examples/csi-operator/csidriver.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.vastdata.com/v1 +kind: VastCSIDriver +metadata: + name: csidriver + namespace: vast-csi +spec: + image: + csiVastPlugin: + image: vastdataorg/csi:v2.5.0 diff --git a/examples/csi-operator/deploy/catalog.yaml b/examples/csi-operator/deploy/catalog.yaml new file mode 100644 index 00000000..64c77663 --- /dev/null +++ b/examples/csi-operator/deploy/catalog.yaml @@ -0,0 +1,13 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: CatalogSource +metadata: + annotations: + operators.operatorframework.io/index-image: quay.io/operator-framework/opm:latest + operators.operatorframework.io/injected-bundles: '[{"imageTag":"110450271409.dkr.ecr.eu-west-1.amazonaws.com/dev/vast-csi:1467916-operator-bundle","mode":"semver"},{"imageTag":"110450271409.dkr.ecr.eu-west-1.amazonaws.com/dev/vast-csi:1467917-operator-bundle","mode":"semver"}]' + name: vast-csi-operator-catalog + namespace: vast-csi +spec: + image: quay.io/operatorhubio/catalog:latest + secrets: + - regcred + sourceType: grpc diff --git a/examples/csi-operator/deploy/operatorgroup.yaml b/examples/csi-operator/deploy/operatorgroup.yaml new file mode 100644 index 00000000..12b623e6 --- /dev/null +++ b/examples/csi-operator/deploy/operatorgroup.yaml @@ -0,0 +1,9 @@ +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: vast-csi-operatorgroup + namespace: vast-csi +spec: + targetNamespaces: + - vast-csi + diff --git a/examples/csi-operator/deploy/subscription.yaml b/examples/csi-operator/deploy/subscription.yaml new file mode 100644 index 00000000..de4f25ba --- /dev/null +++ b/examples/csi-operator/deploy/subscription.yaml @@ -0,0 +1,11 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: vast-csi-operator-0-0-1-sub + namespace: vast-csi +spec: + channel: alpha + installPlanApproval: Manual + name: vast-csi-operator + source: vast-csi-operator-catalog + sourceNamespace: vast-csi diff --git a/examples/csi-operator/storage.yaml b/examples/csi-operator/storage.yaml new file mode 100644 index 00000000..f97261a1 --- /dev/null +++ b/examples/csi-operator/storage.yaml @@ -0,0 +1,12 @@ +apiVersion: storage.vastdata.com/v1 +kind: VastStorage +metadata: + name: vastdata-filesystem + namespace: vast-csi +spec: + clusterName: cluster + storagePath: "/k8s" + viewPolicy: "default" + vipPool: "vippool-1" + allowVolumeExpansion: true + createSnapshotClass: true diff --git a/examples/csi/csi-app.yaml b/examples/csi/csi-app.yaml new file mode 100644 index 00000000..7c4da10e --- /dev/null +++ b/examples/csi/csi-app.yaml @@ -0,0 +1,16 @@ +kind: Pod +apiVersion: v1 +metadata: + name: my-csi-app +spec: + containers: + - name: my-frontend + image: busybox + volumeMounts: + - mountPath: "/data" + name: my-csi-volume + command: [ "sleep", "1000000" ] + volumes: + - name: my-csi-volume + persistentVolumeClaim: + claimName: csi-pvc # defined in csi-pvc.yaml diff --git a/examples/csi/csi-clone.yaml b/examples/csi/csi-clone.yaml new file mode 100644 index 00000000..374b3a51 --- /dev/null +++ b/examples/csi/csi-clone.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: csi-pvc-clone +spec: + accessModes: + - ReadWriteOnce + storageClassName: vastdata-filesystem + resources: + requests: + storage: 2Gi + dataSource: + kind: PersistentVolumeClaim + name: csi-pvc diff --git a/examples/csi/csi-pod-raw.yaml b/examples/csi/csi-pod-raw.yaml new file mode 100644 index 00000000..4b7149ae --- /dev/null +++ b/examples/csi/csi-pod-raw.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: pod-raw + labels: + name: busybox-test +spec: + restartPolicy: Always + containers: + - image: gcr.io/google_containers/busybox + command: ["/bin/sh", "-c"] + args: [ "tail -f /dev/null" ] + name: busybox + volumeDevices: + - name: vol + devicePath: /dev/loop3 # This device path needs to be replaced with the site specific + volumes: + - name: vol + persistentVolumeClaim: + claimName: pvc-raw diff --git a/examples/csi/csi-pvc-from-snapshot.yaml b/examples/csi/csi-pvc-from-snapshot.yaml new file mode 100644 index 00000000..32275990 --- /dev/null +++ b/examples/csi/csi-pvc-from-snapshot.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-restore +spec: + dataSource: + name: new-snapshot-demo + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/examples/csi/csi-pvc.yaml b/examples/csi/csi-pvc.yaml new file mode 100644 index 00000000..076b8ffa --- /dev/null +++ b/examples/csi/csi-pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: csi-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: csi-vast-sc \ No newline at end of file diff --git a/examples/csi/csi-raw-pv-snapshot.yaml b/examples/csi/csi-raw-pv-snapshot.yaml new file mode 100644 index 00000000..70146526 --- /dev/null +++ b/examples/csi/csi-raw-pv-snapshot.yaml @@ -0,0 +1,9 @@ +apiVersion: snapshot.storage.k8s.io/v1alpha1 +kind: VolumeSnapshot +metadata: + name: raw-pv-snapshot +spec: + snapshotClassName: csi-vast-snapclass + source: + name: pvc-raw + kind: PersistentVolumeClaim diff --git a/examples/csi/csi-restore.yaml b/examples/csi/csi-restore.yaml new file mode 100644 index 00000000..04126867 --- /dev/null +++ b/examples/csi/csi-restore.yaml @@ -0,0 +1,35 @@ +# ReadOnly mode. Volume is pointed directly to snapshot. +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: hpvc-restore-1 +spec: + storageClassName: csi-vast-sc + dataSource: + name: new-snapshot-demo + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + accessModes: + - ReadOnlyMany + resources: + requests: + storage: 1Gi + +--- + +# ReadWrite mode. Snapshot data is fully replicated to VAST view folder via intermediate GlobalSnapshotStream. +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: hpvc-restore-2 +spec: + storageClassName: csi-vast-sc + dataSource: + name: new-snapshot-demo + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/examples/csi/csi-snapshot.yaml b/examples/csi/csi-snapshot.yaml new file mode 100644 index 00000000..d97569f4 --- /dev/null +++ b/examples/csi/csi-snapshot.yaml @@ -0,0 +1,9 @@ +apiVersion: snapshot.storage.k8s.io/v1alpha1 +kind: VolumeSnapshot +metadata: + name: new-snapshot-demo +spec: + snapshotClassName: csi-vast-snapclass + source: + name: csi-pvc + kind: PersistentVolumeClaim diff --git a/examples/csi/csi-storageclass.yaml b/examples/csi/csi-storageclass.yaml new file mode 100644 index 00000000..d0007d8c --- /dev/null +++ b/examples/csi/csi-storageclass.yaml @@ -0,0 +1,16 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-vast-sc +provisioner: csi.vastdata.com +parameters: + vip_pool_name: vippool-1 + root_export: /k8s + view_policy: default + volume_name_fmt: csi:{namespace}:{name}:{id} +reclaimPolicy: Delete +volumeBindingMode: Immediate +allowVolumeExpansion: true +mountOptions: + - nolock + - vers=4 diff --git a/examples/csi/pod-with-ephemeral-volume.yaml b/examples/csi/pod-with-ephemeral-volume.yaml new file mode 100644 index 00000000..fd75ea92 --- /dev/null +++ b/examples/csi/pod-with-ephemeral-volume.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: some-pod +spec: + containers: + - command: + - sh + - -c + - while true; do date -Iseconds >> /shared/$HOSTNAME; sleep 1; done + image: busybox + name: my-frontend + volumeMounts: + - mountPath: /shared + name: my-eph-vol + volumes: + - csi: + driver: csi.vastdata.com + volumeAttributes: + root_export: /k8s + size: 1G + view_policy: default + vip_pool_name: vippool-1 + nodePublishSecretRef: + name: vast-mgmt + name: my-eph-vol diff --git a/examples/csi/pod-with-static-volume.yaml b/examples/csi/pod-with-static-volume.yaml new file mode 100644 index 00000000..f4f35e00 --- /dev/null +++ b/examples/csi/pod-with-static-volume.yaml @@ -0,0 +1,16 @@ +kind: Pod +apiVersion: v1 +metadata: + name: my-csi-app +spec: + containers: + - name: my-frontend + image: busybox + volumeMounts: + - mountPath: "/data" + name: my-csi-volume + command: [ "sleep", "1000000" ] + volumes: + - name: my-csi-volume + persistentVolumeClaim: + claimName: csi-pvc-static diff --git a/examples/csi/static-volume.yaml b/examples/csi/static-volume.yaml new file mode 100644 index 00000000..89eefcff --- /dev/null +++ b/examples/csi/static-volume.yaml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: csi-pv-static +spec: + storageClassName: vastdata-filesystem + capacity: + storage: 1Gi + accessModes: + - ReadWriteOnce + mountOptions: + - nfsvers=3 + csi: + driver: csi.vastdata.com + volumeAttributes: + vip_pool_name: vippool-1 # or vip_pool_fqdn + view_policy: default # needed only when `static_pv_create_views` is set to "yes/true" + size: 1G + static_pv_create_views: "yes" + static_pv_create_quotas: "yes" + controllerPublishSecretRef: + name: vast-mgmt + namespace: default + volumeHandle: /full/path/to/view + +--- + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: csi-pvc-static +spec: + storageClassName: vastdata-filesystem + volumeName: csi-pv-static + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/examples/csi/vast_credentials.yaml b/examples/csi/vast_credentials.yaml new file mode 100644 index 00000000..28b273c0 --- /dev/null +++ b/examples/csi/vast_credentials.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: vast-mgmt +type: Opaque +data: + username: user1 + password: 654321 diff --git a/k8s_supported.txt b/k8s_supported.txt index 991591ef..43af072e 100644 --- a/k8s_supported.txt +++ b/k8s_supported.txt @@ -1,5 +1,4 @@ -1.19.0 -1.20.0 -1.21.0 -1.21.2 -1.23.3 +1.22.15 +1.23.14 +1.24.8 +1.25.4 diff --git a/packaging/Dockerfile b/packaging/Dockerfile index 5ab83a0a..2ec80122 100644 --- a/packaging/Dockerfile +++ b/packaging/Dockerfile @@ -1,29 +1,23 @@ -FROM python:3.9-alpine - -WORKDIR /root - -COPY packaging/files . -# RUN apt-get update && apt-get install -y nfs-common && \ -RUN apk add --no-cache \ - # used by driver to mount - nfs-utils \ - # used to compile grpcio - linux-headers build-base && \ - pip install --no-cache-dir -r requirements.txt && \ - cd /usr/libexec/gcc/x86_64-alpine-linux-musl/*/ && rm -fv cc1 cc1obj cc1plus lto1 - -COPY vast_csi vast_csi -COPY deployment . -COPY k8s_supported.txt . +ARG BASE_IMAGE_NAME +FROM $BASE_IMAGE_NAME ARG NAME=csi.vastdata.com ARG VERSION ARG GIT_COMMIT ARG CI_PIPELINE_ID -RUN echo "$NAME $VERSION $GIT_COMMIT" > version.info +RUN echo "$NAME $VERSION $GIT_COMMIT $CI_PIPELINE_ID" > version.info + +COPY vast_csi vast_csi +COPY charts charts +COPY tests tests +COPY k8s_supported.txt . -LABEL name=$NAME -LABEL version=$VERSION.$GIT_COMMIT.$CI_PIPELINE_ID -LABEL vendor=vastdata +LABEL \ + name="VASTData CSI Driver" \ + vendor="VASTData" \ + version=$VERSION \ + release="1" \ + summary="VASTData CSI Driver" \ + description="This container image deploys the VASTData Container Storage Interface (CSI) driver. The CSI driver provides storage management and provisioning capabilities for containerized applications, allowing seamless integration with VASTData's storage solutions. It supports dynamic volume provisioning, snapshot management, and volume attachment operations for Kubernetes clusters." -ENTRYPOINT ["python", "-m", "vast_csi"] +ENTRYPOINT ["poetry", "run", "python", "-m", "vast_csi"] diff --git a/packaging/base.Dockerfile b/packaging/base.Dockerfile new file mode 100644 index 00000000..269cd2b3 --- /dev/null +++ b/packaging/base.Dockerfile @@ -0,0 +1,43 @@ +FROM registry.access.redhat.com/ubi9/ubi-minimal + +WORKDIR /root + +# Install basic tools and dependencies +RUN microdnf upgrade -y \ + && microdnf install -y python3 python3-devel python3-pip gcc g++ make findutils which \ + && microdnf clean all + +# Add CentOS Stream 9 repository for nfs-utils installation +RUN echo "[centos-stream]" > /etc/yum.repos.d/centos-stream.repo \ + && echo "name=CentOS Stream 9 - BaseOS" >> /etc/yum.repos.d/centos-stream.repo \ + && echo "baseurl=https://mirror.stream.centos.org/9-stream/BaseOS/\$basearch/os/" >> /etc/yum.repos.d/centos-stream.repo \ + && echo "enabled=1" >> /etc/yum.repos.d/centos-stream.repo \ + && echo "gpgcheck=0" >> /etc/yum.repos.d/centos-stream.repo \ + && microdnf install -y nfs-utils \ + && microdnf clean all + +COPY pyproject.toml poetry.lock* ./ +# Required Licenses +COPY LICENSE /licenses/LICENSE + +# Install Poetry and python dependencies +RUN curl -sSL https://install.python-poetry.org | python3 - --version 1.8.3 \ + && mv /root/.local/bin/poetry /usr/local/bin/poetry \ + && poetry config virtualenvs.create false \ + && poetry config virtualenvs.in-project true \ + && poetry config virtualenvs.options.no-pip true \ + && mkdir .venv \ + && poetry install --no-dev \ + && rm -f poetry.lock* + +# Dynamically find the GCC directory and remove GCC files +RUN set -ex; \ + gcc_dirs=$(find /usr/libexec/gcc -mindepth 1 -maxdepth 1 -type d); \ + if [ -n "$gcc_dirs" ]; then \ + for gcc_dir in $gcc_dirs; do \ + echo "Found GCC directory: $gcc_dir"; \ + cd "$gcc_dir" && rm -fv cc1 cc1obj cc1plus lto1 || true; \ + done; \ + else \ + echo "No suitable GCC directories found."; \ + fi diff --git a/packaging/build_image.sh b/packaging/build_image.sh new file mode 100755 index 00000000..0a8b49ed --- /dev/null +++ b/packaging/build_image.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +set -e + +log() { echo -e "\033[93m$(date) >> $@\033[0m" 1>&2; } + +# Required environment variables +: "${IMAGE_TAG:?ERROR: IMAGE_TAG is not set.}" +: "${DOCKERFILE:?ERROR: DOCKERFILE is not set.}" + +# Optional environment variables with defaults +BASE_IMAGE_NAME=${BASE_IMAGE_NAME:-""} +PLATFORMS=${PLATFORMS:-""} # Default to empty if not provided +CACHE_FROM=${CACHE_FROM:-""} # Default to empty if not provided +PUSH_ON_SUCCESS=${PUSH_ON_SUCCESS:-"false"} # Default to "false" if not provided +DOCKERFILE_PATH="$(cd "$(dirname "$0")" && pwd)/$DOCKERFILE" + +if [ ! -f version.txt ] || [ ! -s version.txt ]; then + log "ERROR" "version.txt does not exist or is empty." && exit 1 +fi + +VERSION=$(cat version.txt) + +if [ -z "$CI_COMMIT_SHA" ]; then + CI_COMMIT_SHA=$(git rev-parse HEAD || { log "ERROR" "Failed to get git commit SHA."; exit 1; }) +fi + +CACHE_FROM_ARG="" +if [ -n "$CACHE_FROM" ]; then + CACHE_FROM_ARG="--cache-from $CACHE_FROM" +fi + +if [ -n "$PLATFORMS" ]; then + # Use Buildx if platforms are specified + log "INFO" "Using Buildx for platforms: $PLATFORMS" + if ! docker buildx inspect builder > /dev/null 2>&1; then + log "INFO" "Creating a new Buildx builder instance." + docker buildx create --name builder --use + else + log "INFO" "Using existing Buildx builder instance." + fi + # Build and push the Docker image using Buildx + if ! docker buildx build \ + --platform "$PLATFORMS" \ + -t "$IMAGE_TAG" \ + $CACHE_FROM_ARG \ + --build-arg=GIT_COMMIT="$CI_COMMIT_SHA" \ + --build-arg=VERSION="$VERSION" \ + --build-arg=CI_PIPELINE_ID="${CI_PIPELINE_ID:-local}" \ + --build-arg=BASE_IMAGE_NAME="$BASE_IMAGE_NAME" \ + -f "$DOCKERFILE_PATH" \ + --push \ + .; then + log "ERROR" "Buildx build failed." && exit 1 + fi +else + # Use standard Docker build if no platforms are specified + if ! docker build \ + -t "$IMAGE_TAG" \ + $CACHE_FROM_ARG \ + --build-arg=GIT_COMMIT="$CI_COMMIT_SHA" \ + --build-arg=VERSION="$VERSION" \ + --build-arg=CI_PIPELINE_ID="${CI_PIPELINE_ID:-local}" \ + --build-arg=BASE_IMAGE_NAME="$BASE_IMAGE_NAME" \ + -f "$DOCKERFILE_PATH" \ + .; then + log "ERROR" "Docker build failed." && exit 1 + fi + if [ "$PUSH_ON_SUCCESS" == "true" ]; then + log "INFO" "Pushing image $IMAGE_TAG to specified registry." + docker push "$IMAGE_TAG" + fi +fi + +# Log build completion +log "INFO" "Build and push completed for image: $IMAGE_TAG" diff --git a/packaging/ci.Dockerfile b/packaging/ci.Dockerfile new file mode 100644 index 00000000..6482e2a7 --- /dev/null +++ b/packaging/ci.Dockerfile @@ -0,0 +1,24 @@ +FROM docker:latest + +RUN apk add --no-cache make curl bash git go + +# Install helm +# Need to install on-edge version to have toYamlPretty function. https://github.com/helm/helm/pull/12583 +RUN git clone https://github.com/helm/helm.git \ + && cd helm \ + && make install \ + && cd .. \ + && rm -rf helm + +# Install operator-sdk +RUN curl -LO https://github.com/operator-framework/operator-sdk/releases/download/v1.11.0/operator-sdk_linux_amd64 \ + && chmod +x operator-sdk_linux_amd64 \ + && mkdir -p /usr/local/bin/ \ + && mv operator-sdk_linux_amd64 /usr/local/bin/operator-sdk \ + && operator-sdk version + +# Install preflight +RUN curl -LO https://github.com/redhat-openshift-ecosystem/openshift-preflight/releases/download/1.10.0/preflight-linux-amd64 \ + && chmod +x preflight-linux-amd64 \ + && mv preflight-linux-amd64 /usr/local/bin/preflight \ + && preflight version diff --git a/packaging/gen-operator-bundle.sh b/packaging/gen-operator-bundle.sh new file mode 100755 index 00000000..420bb090 --- /dev/null +++ b/packaging/gen-operator-bundle.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env sh + +set -e + +ROOT_DIR=$1 +CHANNEL=$2 +OUT_DIR=$ROOT_DIR/bundle +shift 2 # Shift out the first three arguments (CHART_DIR, OUT_DIR and CHANNEL) + +# Store remaining arguments in a variable +HELM_ARGS="$@" + +echo "CHANNEL: $CHANNEL" +echo "OUT_DIR: $OUT_DIR" +echo "Helm args: $HELM_ARGS" + +CSI_OPERATOR_CHART_PATH=$ROOT_DIR/charts/vastcsi-operator + +# Run Helm template command with dynamic arguments +echo "Generate manifests" +helm template --dry-run --debug csi-operator $CSI_OPERATOR_CHART_PATH $HELM_ARGS \ +| awk -v out=$OUT_DIR/manifests -F"/" '$0~/^# Source: /{file=out"/"$NF; print "Creating "file; system ("mkdir -p $(dirname "file"); echo -n "" > "file)} $0!~/^#/ && $0!="---"{print $0 >> file}' + +echo "Generate metadata" +METADATA_DIR=$OUT_DIR/metadata +mkdir -p $METADATA_DIR +cat < $METADATA_DIR/annotations.yaml +annotations: + operators.operatorframework.io.bundle.channels.v1: ${CHANNEL} + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: vast-csi-operator + operators.operatorframework.io.metrics.builder: operator-sdk-v1.3.0-ocp + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: helm.sdk.operatorframework.io/v1 + + # Annotations for testing. + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 + operators.operatorframework.io.test.config.v1: tests/scorecard/ + + # Annotations to specify supported OCP versions. + com.redhat.openshift.versions: v4.14-v4.15 +EOF + +echo "Generate scorecard testing template" +TESTS_DIR=$OUT_DIR/tests/scorecard +mkdir -p $TESTS_DIR +cp $CSI_OPERATOR_CHART_PATH/scorecard_config.yaml $TESTS_DIR/config.yaml diff --git a/packaging/operator.Dockerfile b/packaging/operator.Dockerfile new file mode 100644 index 00000000..28e9be76 --- /dev/null +++ b/packaging/operator.Dockerfile @@ -0,0 +1,21 @@ +FROM registry.redhat.io/openshift4/ose-helm-operator@sha256:4882ede68eeb45fc62b3ac25f0a46ff9485f3f2ddf133b6d349560ef65f9012a + +ARG VERSION +# Required OpenShift Labels +LABEL name="VASTData CSI Driver Operator" +LABEL vendor="VASTData" +LABEL version="v${VERSION}" +LABEL release="1" +LABEL summary="VASTData CSI Driver Operator" +LABEL description="This operator will deploy VASTData CSI driver to the cluster." + +# Required Licenses +COPY LICENSE /licenses/LICENSE + +ENV HOME=/opt/helm +COPY charts/vastcsi-operator/watches.yaml ${HOME}/watches.yaml +COPY charts/vastcsi-operator/crd-charts ${HOME}/helm-charts + +# Update chart versions +RUN find ${HOME}/helm-charts -name "Chart.yaml" -exec sed -i.bak "s/^version: .*/version: $VERSION/" {} \; +WORKDIR ${HOME} diff --git a/packaging/operator_bundle.Dockerfile b/packaging/operator_bundle.Dockerfile new file mode 100644 index 00000000..b75d8386 --- /dev/null +++ b/packaging/operator_bundle.Dockerfile @@ -0,0 +1,23 @@ +FROM scratch + +ARG CHANNEL + +LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 +LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ +LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ +LABEL operators.operatorframework.io.bundle.package.v1=vast-csi-operator +LABEL operators.operatorframework.io.bundle.channels.v1=alpha,beta,stable +LABEL operators.operatorframework.io.bundle.channel.default.v1=${CHANNEL} +LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.3.0-ocp +LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 +LABEL operators.operatorframework.io.metrics.project_layout=helm.sdk.operatorframework.io/v1 +LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ +LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 + +LABEL com.redhat.openshift.versions="v4.14-v4.15" +LABEL com.redhat.delivery.operator.bundle=true +LABEL com.redhat.delivery.backport=true + +COPY bundle/manifests /manifests/ +COPY bundle/metadata /metadata/ +COPY bundle/tests/scorecard /tests/scorecard/ diff --git a/packaging/run_preflight.sh b/packaging/run_preflight.sh new file mode 100755 index 00000000..4d9d5a93 --- /dev/null +++ b/packaging/run_preflight.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# Function to display usage information +usage() { + echo "Usage: $0" + echo + echo "This script publishes a Docker image to the Red Hat registry and runs a preflight check for certification." + echo + echo "Required environment variables:" + echo " PROJECT_ID - The project ID used for the Red Hat certification. (5f7595a16fd1fbdbe36c0b50 for CSI Driver and 66e6d0dd49f52e86c9d56b1c for Operator)" + echo " IMAGE_TAG - The full tag of the image to be published." + echo " PYXIS_API_TOKEN - The API token for Pyxis. (Can be found in the Red Hat certification project. (Product management -> Container API keys)" + echo + echo "Example:" + echo " export PROJECT_ID='your_project_id'" + echo " export IMAGE_TAG='quay.io/redhat-isv-containers/your_project_id:your_image_tag'" + echo " export PYXIS_API_TOKEN='your_pyxis_api_token'" + echo " ./run_preflight.sh" + exit 1 +} + +# Ensure the script exits on any error +set -e + +# Check for required environment variables +if [ -z "$PROJECT_ID" ] || [ -z "$IMAGE_TAG" ] || [ -z "$PYXIS_API_TOKEN" ]; then + echo "Error: Missing required environment variables." + usage +fi + +# Run preflight check +echo "Running preflight check..." +preflight check container \ + "$IMAGE_TAG" \ + --submit \ + --pyxis-api-token="$PYXIS_API_TOKEN" \ + --certification-project-id="$PROJECT_ID" \ + --docker-config="$HOME/.docker/config.json" + +echo "Image publication process completed successfully." diff --git a/packaging/sanity.sh b/packaging/sanity.sh index 2b025d16..f073a45e 100755 --- a/packaging/sanity.sh +++ b/packaging/sanity.sh @@ -4,7 +4,14 @@ set -e log() { echo -e "\033[92m$(date $DATE_PARAM) >> $@\033[0m" 1>&2; } -export VERSION=v2.3.0 +# Check if the vast-csi image is provided +if [ -z "$1" ]; then + log "ERROR" "Usage: $0 " + exit 1 +fi + +VAST_CSI_IMAGE=$1 +export VERSION=v4.3.0 docker build -t csi-sanity:$VERSION -< /dev/null || true docker rm test-subject 2> /dev/null || true +docker volume rm -f csi-tests +docker network rm $NETWORK 2> /dev/null || true docker network create $NETWORK 2> /dev/null || true -trap "(docker kill nfs test-subject; docker network rm $NETWORK) 1> /dev/null 2>&1 || true" exit +trap "(docker kill nfs test-subject; docker network rm $NETWORK; docker volume rm -f csi-tests) 1> /dev/null 2>&1 || true" exit docker run -d --name nfs --rm --privileged --network $NETWORK erezhorev/dockerized_nfs_server docker run \ --init \ - --name test-subject \ + --name test-subject \ --network $NETWORK \ --privileged \ + -v csi-tests:/tmp \ -e PYTHONFAULTHANDLER=yes \ -e CSI_ENDPOINT=0.0.0.0:50051 \ -e X_CSI_MOCK_VAST=yes \ -e X_CSI_SANITY_TEST=yes \ -e X_CSI_NFS_SERVER=nfs \ -e X_CSI_NFS_EXPORT=/exports \ - vast-csi:dev serve & + $VAST_CSI_IMAGE serve & + +# -h \ if docker run \ - --name csi-sanity \ - --network $NETWORK \ - --rm \ - csi-sanity:$VERSION \ - /csi-sanity \ - -csi.endpoint=test-subject:50051 \ - -ginkgo.failFast \ - -ginkgo.progress \ - -ginkgo.debug; then + --name csi-sanity \ + --network $NETWORK \ + -v csi-tests:/tmp \ + --rm \ + csi-sanity:$VERSION \ + /csi-sanity \ + --ginkgo.failFast \ + --csi.endpoint=test-subject:50051 \ + --ginkgo.progress \ + --ginkgo.v \ + --ginkgo.seed=1; then log "All Good Bananas" else log "Sanity test failed" diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 00000000..645219e3 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,871 @@ +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. + +[[package]] +name = "appnope" +version = "0.1.4" +description = "Disable App Nap on macOS >= 10.9" +optional = false +python-versions = ">=3.6" +files = [ + {file = "appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c"}, + {file = "appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee"}, +] + +[[package]] +name = "attrs" +version = "24.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, + {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, +] + +[package.extras] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] + +[[package]] +name = "backcall" +version = "0.2.0" +description = "Specifications for callback functions passed in to an API" +optional = false +python-versions = "*" +files = [ + {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"}, + {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"}, +] + +[[package]] +name = "certifi" +version = "2024.8.30" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, + {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, +] + +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "cryptography" +version = "43.0.0" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "cryptography-43.0.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:64c3f16e2a4fc51c0d06af28441881f98c5d91009b8caaff40cf3548089e9c74"}, + {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3dcdedae5c7710b9f97ac6bba7e1052b95c7083c9d0e9df96e02a1932e777895"}, + {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d9a1eca329405219b605fac09ecfc09ac09e595d6def650a437523fcd08dd22"}, + {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ea9e57f8ea880eeea38ab5abf9fbe39f923544d7884228ec67d666abd60f5a47"}, + {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9a8d6802e0825767476f62aafed40532bd435e8a5f7d23bd8b4f5fd04cc80ecf"}, + {file = "cryptography-43.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:cc70b4b581f28d0a254d006f26949245e3657d40d8857066c2ae22a61222ef55"}, + {file = "cryptography-43.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4a997df8c1c2aae1e1e5ac49c2e4f610ad037fc5a3aadc7b64e39dea42249431"}, + {file = "cryptography-43.0.0-cp37-abi3-win32.whl", hash = "sha256:6e2b11c55d260d03a8cf29ac9b5e0608d35f08077d8c087be96287f43af3ccdc"}, + {file = "cryptography-43.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:31e44a986ceccec3d0498e16f3d27b2ee5fdf69ce2ab89b52eaad1d2f33d8778"}, + {file = "cryptography-43.0.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:7b3f5fe74a5ca32d4d0f302ffe6680fcc5c28f8ef0dc0ae8f40c0f3a1b4fca66"}, + {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac1955ce000cb29ab40def14fd1bbfa7af2017cca696ee696925615cafd0dce5"}, + {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:299d3da8e00b7e2b54bb02ef58d73cd5f55fb31f33ebbf33bd00d9aa6807df7e"}, + {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ee0c405832ade84d4de74b9029bedb7b31200600fa524d218fc29bfa371e97f5"}, + {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cb013933d4c127349b3948aa8aaf2f12c0353ad0eccd715ca789c8a0f671646f"}, + {file = "cryptography-43.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:fdcb265de28585de5b859ae13e3846a8e805268a823a12a4da2597f1f5afc9f0"}, + {file = "cryptography-43.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2905ccf93a8a2a416f3ec01b1a7911c3fe4073ef35640e7ee5296754e30b762b"}, + {file = "cryptography-43.0.0-cp39-abi3-win32.whl", hash = "sha256:47ca71115e545954e6c1d207dd13461ab81f4eccfcb1345eac874828b5e3eaaf"}, + {file = "cryptography-43.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:0663585d02f76929792470451a5ba64424acc3cd5227b03921dab0e2f27b1709"}, + {file = "cryptography-43.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2c6d112bf61c5ef44042c253e4859b3cbbb50df2f78fa8fae6747a7814484a70"}, + {file = "cryptography-43.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:844b6d608374e7d08f4f6e6f9f7b951f9256db41421917dfb2d003dde4cd6b66"}, + {file = "cryptography-43.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:51956cf8730665e2bdf8ddb8da0056f699c1a5715648c1b0144670c1ba00b48f"}, + {file = "cryptography-43.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:aae4d918f6b180a8ab8bf6511a419473d107df4dbb4225c7b48c5c9602c38c7f"}, + {file = "cryptography-43.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:232ce02943a579095a339ac4b390fbbe97f5b5d5d107f8a08260ea2768be8cc2"}, + {file = "cryptography-43.0.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5bcb8a5620008a8034d39bce21dc3e23735dfdb6a33a06974739bfa04f853947"}, + {file = "cryptography-43.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:08a24a7070b2b6804c1940ff0f910ff728932a9d0e80e7814234269f9d46d069"}, + {file = "cryptography-43.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e9c5266c432a1e23738d178e51c2c7a5e2ddf790f248be939448c0ba2021f9d1"}, + {file = "cryptography-43.0.0.tar.gz", hash = "sha256:b88075ada2d51aa9f18283532c9f60e72170041bba88d7f37e49cbb10275299e"}, +] + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi", "cryptography-vectors (==43.0.0)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "decorator" +version = "5.1.1" +description = "Decorators for Humans" +optional = false +python-versions = ">=3.5" +files = [ + {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, + {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "grpcio" +version = "1.25.0" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = "*" +files = [ + {file = "grpcio-1.25.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:7535b3e52f498270e7877dde1c8944d6b7720e93e2e66b89c82a11447b5818f5"}, + {file = "grpcio-1.25.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:4450352a87094fd58daf468b04c65a9fa19ad11a0ac8ac7b7ff17d46f873cbc1"}, + {file = "grpcio-1.25.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:24db2fa5438f3815a4edb7a189035051760ca6aa2b0b70a6a948b28bfc63c76b"}, + {file = "grpcio-1.25.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:d46c1f95672b73288e08cdca181e14e84c6229b5879561b7b8cfd48374e09287"}, + {file = "grpcio-1.25.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:bc0d41f4eb07da8b8d3ea85e50b62f6491ab313834db86ae2345be07536a4e5a"}, + {file = "grpcio-1.25.0-cp27-cp27m-win32.whl", hash = "sha256:ec759ece4786ae993a5b7dc3b3dead6e9375d89a6c65dfd6860076d2eb2abe7b"}, + {file = "grpcio-1.25.0-cp27-cp27m-win_amd64.whl", hash = "sha256:5de86c182667ec68cf84019aa0d8ceccf01d352cdca19bf9e373725204bdbf50"}, + {file = "grpcio-1.25.0-cp27-cp27mu-linux_armv7l.whl", hash = "sha256:7c4e495bcabc308198b8962e60ca12f53b27eb8f03a21ac1d2d711d6dd9ecfca"}, + {file = "grpcio-1.25.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:39671b7ff77a962bd745746d9d2292c8ed227c5748f16598d16d8631d17dd7e5"}, + {file = "grpcio-1.25.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:3247834d24964589f8c2b121b40cd61319b3c2e8d744a6a82008643ef8a378b1"}, + {file = "grpcio-1.25.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:aaeac2d552772b76d24eaff67a5d2325bc5205c74c0d4f9fbe71685d4a971db2"}, + {file = "grpcio-1.25.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:559b1a3a8be7395ded2943ea6c2135d096f8cc7039d6d12127110b6496f251fe"}, + {file = "grpcio-1.25.0-cp34-cp34m-linux_armv7l.whl", hash = "sha256:ebb211a85248dbc396b29320273c1ffde484b898852432613e8df0164c091006"}, + {file = "grpcio-1.25.0-cp34-cp34m-macosx_10_7_intel.whl", hash = "sha256:4413b11c2385180d7de03add6c8845dd66692b148d36e27ec8c9ef537b2553a1"}, + {file = "grpcio-1.25.0-cp34-cp34m-manylinux1_i686.whl", hash = "sha256:f8370ad332b36fbad117440faf0dd4b910e80b9c49db5648afd337abdde9a1b6"}, + {file = "grpcio-1.25.0-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:40a0b8b2e6f6dd630f8b267eede2f40a848963d0f3c40b1b1f453a4a870f679e"}, + {file = "grpcio-1.25.0-cp34-cp34m-manylinux2010_i686.whl", hash = "sha256:bf51051c129b847d1bb63a9b0826346b5f52fb821b15fe5e0d5ef86f268510f5"}, + {file = "grpcio-1.25.0-cp34-cp34m-manylinux2010_x86_64.whl", hash = "sha256:8eb5d54b87fb561dc2e00a5c5226c33ffe8dbc13f2e4033a412bafb7b37b194d"}, + {file = "grpcio-1.25.0-cp34-cp34m-win32.whl", hash = "sha256:49ffda04a6e44de028b3b786278ac9a70043e7905c3eea29eed88b6524d53a29"}, + {file = "grpcio-1.25.0-cp34-cp34m-win_amd64.whl", hash = "sha256:eb4bf58d381b1373bd21d50837a53953d625d1693f1b58fed12743c75d3dd321"}, + {file = "grpcio-1.25.0-cp35-cp35m-linux_armv7l.whl", hash = "sha256:423f76aa504c84cb94594fb88b8a24027c887f1c488cf58f2173f22f4fbd046c"}, + {file = "grpcio-1.25.0-cp35-cp35m-macosx_10_7_intel.whl", hash = "sha256:40f9a74c7aa210b3e76eb1c9d56aa8d08722b73426a77626967019df9bbac287"}, + {file = "grpcio-1.25.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:d5d58309b42064228b16b0311ff715d6c6e20230e81b35e8d0c8cfa1bbdecad8"}, + {file = "grpcio-1.25.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:dc6e2e91365a1dd6314d615d80291159c7981928b88a4c65654e3fefac83a836"}, + {file = "grpcio-1.25.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:8a8fc4a0220367cb8370cedac02272d574079ccc32bffbb34d53aaf9e38b5060"}, + {file = "grpcio-1.25.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:5fc069bb481fe3fad0ba24d3baaf69e22dfa6cc1b63290e6dfeaf4ac1e996fb7"}, + {file = "grpcio-1.25.0-cp35-cp35m-win32.whl", hash = "sha256:bb611e447559b3b5665e12a7da5160c0de6876097f62bf1d23ba66911564868e"}, + {file = "grpcio-1.25.0-cp35-cp35m-win_amd64.whl", hash = "sha256:2adb1cdb7d33e91069517b41249622710a94a1faece1fed31cd36904e4201cde"}, + {file = "grpcio-1.25.0-cp36-cp36m-linux_armv7l.whl", hash = "sha256:f55108397a8fa164268238c3e69cc134e945d1f693572a2f05a028b8d0d2b837"}, + {file = "grpcio-1.25.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:94cdef0c61bd014bb7af495e21a1c3a369dd0399c3cd1965b1502043f5c88d94"}, + {file = "grpcio-1.25.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:d435a01334157c3b126b4ee5141401d44bdc8440993b18b05e2f267a6647f92d"}, + {file = "grpcio-1.25.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:a5eaae8700b87144d7dfb475aa4675e500ff707292caba3deff41609ddc5b845"}, + {file = "grpcio-1.25.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:9d9f3be69c7a5e84c3549a8c4403fa9ac7672da456863d21e390b2bbf45ccad1"}, + {file = "grpcio-1.25.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:2cd51f35692b551aeb1fdeb7a256c7c558f6d78fcddff00640942d42f7aeba5f"}, + {file = "grpcio-1.25.0-cp36-cp36m-win32.whl", hash = "sha256:6a19d654da49516296515d6f65de4bbcbd734bc57913b21a610cfc45e6df3ff1"}, + {file = "grpcio-1.25.0-cp36-cp36m-win_amd64.whl", hash = "sha256:0419ae5a45f49c7c40d9ae77ae4de9442431b7822851dfbbe56ee0eacb5e5654"}, + {file = "grpcio-1.25.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4a38c4dde4c9120deef43aaabaa44f19186c98659ce554c29788c4071ab2f0a4"}, + {file = "grpcio-1.25.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:9fb6fb5975a448169756da2d124a1beb38c0924ff6c0306d883b6848a9980f38"}, + {file = "grpcio-1.25.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:f6c706866d424ff285b85a02de7bbe5ed0ace227766b2c42cbe12f3d9ea5a8aa"}, + {file = "grpcio-1.25.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:50b1febdfd21e2144b56a9aa226829e93a79c354ef22a4e5b013d9965e1ec0ed"}, + {file = "grpcio-1.25.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:8b008515e067232838daca020d1af628bf6520c8cc338bf383284efe6d8bd083"}, + {file = "grpcio-1.25.0-cp37-cp37m-win32.whl", hash = "sha256:1e8631eeee0fb0b4230aeb135e4890035f6ef9159c2a3555fa184468e325691a"}, + {file = "grpcio-1.25.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e0dfb5f7a39029a6cbec23affa923b22a2c02207960fd66f109e01d6f632c1eb"}, + {file = "grpcio-1.25.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:3433cb848b4209717722b62392e575a77a52a34d67c6730138102abc0a441685"}, + {file = "grpcio-1.25.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:43bd04cec72281a96eb361e1b0232f0f542b46da50bcfe72ef7e5a1b41d00cb3"}, + {file = "grpcio-1.25.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:43e38762635c09e24885d15e3a8e374b72d105d4178ee2cc9491855a8da9c380"}, + {file = "grpcio-1.25.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:8d1684258e1385e459418f3429e107eec5fb3d75e1f5a8c52e5946b3f329d6ea"}, + {file = "grpcio-1.25.0.tar.gz", hash = "sha256:c948c034d8997526011960db54f512756fb0b4be1b81140a15b4ef094c6594a4"}, +] + +[package.dependencies] +six = ">=1.5.2" + +[[package]] +name = "grpcio-tools" +version = "1.25.0" +description = "Protobuf code generator for gRPC" +optional = false +python-versions = "*" +files = [ + {file = "grpcio-tools-1.25.0.tar.gz", hash = "sha256:988014c714ca654b3b7ca9f4dabfe487b00e023bfdd9eaf1bb0fed82bf8c4255"}, + {file = "grpcio_tools-1.25.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:67d12ec4548dd2b1f15c9e3a953c8f48d8c3441c2d8bd143fc3af95a1c041c2b"}, + {file = "grpcio_tools-1.25.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:4cc95d5fddebb9348fafcc4c0147745882794ded7cfd5282b2aa158596c77a8a"}, + {file = "grpcio_tools-1.25.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:b013d93bc6dc5c7bf3642bf30e673daee46f9a4984fbd9588a9cda1071278414"}, + {file = "grpcio_tools-1.25.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:5a8d44add097e0a3a7c27e66a8ed0aa2fd561cda77381e818cf7862d4ad0f629"}, + {file = "grpcio_tools-1.25.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:902e13dbaca9733e4668928967b301526197ecffacb8c7a0acc0c7045de8836f"}, + {file = "grpcio_tools-1.25.0-cp27-cp27m-win32.whl", hash = "sha256:834564c2fba02c31179af081bd80aada8dfdcca52c80e241353f6063b6154bd2"}, + {file = "grpcio_tools-1.25.0-cp27-cp27m-win_amd64.whl", hash = "sha256:2f10226bfea4f947de355008b14fb4711c85fc1121570833a96f0e2cd8de580f"}, + {file = "grpcio_tools-1.25.0-cp27-cp27mu-linux_armv7l.whl", hash = "sha256:6e64214709f37b347875ac83cfed4e9cfd287f255dab2836521f591620412c40"}, + {file = "grpcio_tools-1.25.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:9a83d39e198cbed5d093f43790b92945ab74140357ec00e53ae13b421489ffb7"}, + {file = "grpcio_tools-1.25.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:cc950fb17c1172d0c0129e8c6e787206e7ef8c24a8e39005f8cc297e9faa4f9a"}, + {file = "grpcio_tools-1.25.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:f550c94728b67a7eeddc35b03c99552f2d7aac09c52935ad4b0552d0843fd03c"}, + {file = "grpcio_tools-1.25.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:007c075eb9611379fa8f520a1865b9afd850469495b0e4a46e1349b2dc1744ce"}, + {file = "grpcio_tools-1.25.0-cp34-cp34m-linux_armv7l.whl", hash = "sha256:c5ad07adae3fe62761bc662c554c2734203f0f700616fc58138b852a7ef5e40e"}, + {file = "grpcio_tools-1.25.0-cp34-cp34m-macosx_10_9_intel.whl", hash = "sha256:dc17a8a8b39cb37380d927d4669882af4ccc7d3ee298a15a3004f4b18ecd2ac3"}, + {file = "grpcio_tools-1.25.0-cp34-cp34m-manylinux1_i686.whl", hash = "sha256:4ce0261bd4426482a96467ed9ad8411417a6932c331a5bb35aa1907f618f34f6"}, + {file = "grpcio_tools-1.25.0-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:4a5c2b38078fc4b949e4e70f7e25cb80443d1ee9a648ce4223aa3c040a0d3b9b"}, + {file = "grpcio_tools-1.25.0-cp34-cp34m-manylinux2010_i686.whl", hash = "sha256:8b17347a90a14386641ffe57743bbb01a16a7149c95905364d3c8091ad377bd8"}, + {file = "grpcio_tools-1.25.0-cp34-cp34m-manylinux2010_x86_64.whl", hash = "sha256:818f2b8168760cf16e66fe85894a37afcff5378a64939549663a371216618498"}, + {file = "grpcio_tools-1.25.0-cp34-cp34m-win32.whl", hash = "sha256:6c029341132a0e64cbd2dba1dda9a125e06a798b9ec864569afdecce626dd5d5"}, + {file = "grpcio_tools-1.25.0-cp34-cp34m-win_amd64.whl", hash = "sha256:7d02755480cec3c0222f35397e810bfaf4cf9f2bf2e626f7f6efc1d40fffb7fa"}, + {file = "grpcio_tools-1.25.0-cp35-cp35m-linux_armv7l.whl", hash = "sha256:4b72b04cba6ecd1940d6eda07886f80fe71fb2e669f1095ebab58b1eb17a53fa"}, + {file = "grpcio_tools-1.25.0-cp35-cp35m-macosx_10_9_intel.whl", hash = "sha256:314354c7321c84a6e176a99afe1945c933b8a38b4f837255c8decfef8d07f24e"}, + {file = "grpcio_tools-1.25.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:52aab4cbab10683f8830420c0b55ccdc6344702b4a0940913d71fe928dd731c9"}, + {file = "grpcio_tools-1.25.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:532a19419535a92a1b621222f70d6da7624151fe69afa4a1063be56e7a2b884a"}, + {file = "grpcio_tools-1.25.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:f7fc690a517c8f3765796ed005bb3273895a985a8593977291bad24568e018e3"}, + {file = "grpcio_tools-1.25.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:49e7682e505e6a1d35459dae1d8a616a08d5cfa6f05de00235aff2e15786af14"}, + {file = "grpcio_tools-1.25.0-cp35-cp35m-win32.whl", hash = "sha256:651b0441e8d8f302b44fb50397fe73dcd5e61b790533438e690055abdef3b234"}, + {file = "grpcio_tools-1.25.0-cp35-cp35m-win_amd64.whl", hash = "sha256:d3619b43009a5c82cb7ef11847518236140d7ffdcc6600e1a151b8b49350693a"}, + {file = "grpcio_tools-1.25.0-cp36-cp36m-linux_armv7l.whl", hash = "sha256:64f6027887e32a938f00b2344c337c6d4f7c4cf157ec2e84b1dd6b6fddad8e50"}, + {file = "grpcio_tools-1.25.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:5226371a2b569c62be0d0590ccff7bbb9566762f243933efbd4b695f9f108cd5"}, + {file = "grpcio_tools-1.25.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:6fb4739eb5eef051945b16b3c434d08653ea05f0313cf88495ced5d9db641745"}, + {file = "grpcio_tools-1.25.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:02ae9708bdd3f329b1abe1ee16b1d768b2dd7a036a8a57e342d08ee8ca054cec"}, + {file = "grpcio_tools-1.25.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:c40efc662fa037898488e31756242af68a8ab5729f939bc8c9ba259bc32e7d6a"}, + {file = "grpcio_tools-1.25.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:f258b32dffd27ef1eb5f5f01ebb115dfad07677b0510b41f786c511a62ded033"}, + {file = "grpcio_tools-1.25.0-cp36-cp36m-win32.whl", hash = "sha256:c871f5a89012ae44d9233305d74dfdd2059a78f0cb0303d38a4b6a562c6f9ba7"}, + {file = "grpcio_tools-1.25.0-cp36-cp36m-win_amd64.whl", hash = "sha256:4b40291d67a1fecb5170ed9ec32016e2ae07908a8fa143d2d37311b2bcbeb2c5"}, + {file = "grpcio_tools-1.25.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:406b530c283a2bb804a10ee97928290b0b60788cd114ddfce0faa681cccfe4b8"}, + {file = "grpcio_tools-1.25.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:79b5b1c172dafb0e76aa95bf572d4c7afc0bf97a1669b2228a0bc151071c4666"}, + {file = "grpcio_tools-1.25.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:bb8264ccf8ff904a1a396dc757ac1560b24f270b90e7dabb0ae3f637cb351bb3"}, + {file = "grpcio_tools-1.25.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:ac7649cff7354d2f04ebe2872f786a1d07547deded61f3d39036ebb569de91bc"}, + {file = "grpcio_tools-1.25.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:bbfb58f5c0aa27b599141bb5eacaf8116b55ad89bc5a2c3afd5e965d840ad341"}, + {file = "grpcio_tools-1.25.0-cp37-cp37m-win32.whl", hash = "sha256:b0ef0da2eec959def8ba508b2a763c492f1fb989446a422d1456ac17dc1b19f4"}, + {file = "grpcio_tools-1.25.0-cp37-cp37m-win_amd64.whl", hash = "sha256:eab3684ce9dec3a934a36ba79e8435210d07c50906425ab157eeb4b14503a925"}, + {file = "grpcio_tools-1.25.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:6f70fc9a82a0145296358720cf24f83a657a745e8b51ec9564f4c9e678c5b872"}, + {file = "grpcio_tools-1.25.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:c765512cb5cb4afaf652837b8cc69229dee14c8e92f15a6ea0f4dfd646902dd2"}, + {file = "grpcio_tools-1.25.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:c1a482fdd8952a7f0098f78161a4deef8a500e54babef302548cd9f1e326d42c"}, + {file = "grpcio_tools-1.25.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:b02701d40f1ccf16bc8c46f56bdbf89e03110bd8fd570c854e72299ce2920c35"}, +] + +[package.dependencies] +grpcio = ">=1.25.0" +protobuf = ">=3.5.0.post1" + +[[package]] +name = "idna" +version = "3.9" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +files = [ + {file = "idna-3.9-py3-none-any.whl", hash = "sha256:69297d5da0cc9281c77efffb4e730254dd45943f45bbfb461de5991713989b1e"}, + {file = "idna-3.9.tar.gz", hash = "sha256:e5c5dafde284f26e9e0f28f6ea2d6400abd5ca099864a67f576f3981c6476124"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "ipython" +version = "7.9.0" +description = "IPython: Productive Interactive Computing" +optional = false +python-versions = ">=3.5" +files = [ + {file = "ipython-7.9.0-py3-none-any.whl", hash = "sha256:ed7ebe1cba899c1c3ccad6f7f1c2d2369464cc77dba8eebc65e2043e19cda995"}, + {file = "ipython-7.9.0.tar.gz", hash = "sha256:dfd303b270b7b5232b3d08bd30ec6fd685d8a58cabd54055e3d69d8f029f7280"}, +] + +[package.dependencies] +appnope = {version = "*", markers = "sys_platform == \"darwin\""} +backcall = "*" +colorama = {version = "*", markers = "sys_platform == \"win32\""} +decorator = "*" +jedi = ">=0.10" +pexpect = {version = "*", markers = "sys_platform != \"win32\""} +pickleshare = "*" +prompt-toolkit = ">=2.0.0,<2.1.0" +pygments = "*" +setuptools = ">=18.5" +traitlets = ">=4.2" + +[package.extras] +all = ["Sphinx (>=1.3)", "ipykernel", "ipyparallel", "ipywidgets", "nbconvert", "nbformat", "nose (>=0.10.1)", "notebook", "numpy", "pygments", "qtconsole", "requests", "testpath"] +doc = ["Sphinx (>=1.3)"] +kernel = ["ipykernel"] +nbconvert = ["nbconvert"] +nbformat = ["nbformat"] +notebook = ["ipywidgets", "notebook"] +parallel = ["ipyparallel"] +qtconsole = ["qtconsole"] +test = ["ipykernel", "nbformat", "nose (>=0.10.1)", "numpy", "pygments", "requests", "testpath"] + +[[package]] +name = "ipython-genutils" +version = "0.2.0" +description = "Vestigial utilities from IPython" +optional = false +python-versions = "*" +files = [ + {file = "ipython_genutils-0.2.0-py2.py3-none-any.whl", hash = "sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8"}, + {file = "ipython_genutils-0.2.0.tar.gz", hash = "sha256:eb2e116e75ecef9d4d228fdc66af54269afa26ab4463042e33785b887c628ba8"}, +] + +[[package]] +name = "jedi" +version = "0.15.1" +description = "An autocompletion tool for Python that can be used for text editors." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "jedi-0.15.1-py2.py3-none-any.whl", hash = "sha256:786b6c3d80e2f06fd77162a07fed81b8baa22dde5d62896a790a331d6ac21a27"}, + {file = "jedi-0.15.1.tar.gz", hash = "sha256:ba859c74fa3c966a22f2aeebe1b74ee27e2a462f56d3f5f7ca4a59af61bfe42e"}, +] + +[package.dependencies] +parso = ">=0.5.0" + +[package.extras] +testing = ["colorama", "docopt", "pytest (>=3.1.0,<5.0.0)"] + +[[package]] +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, +] + +[[package]] +name = "parso" +version = "0.5.1" +description = "A Python Parser" +optional = false +python-versions = "*" +files = [ + {file = "parso-0.5.1-py2.py3-none-any.whl", hash = "sha256:63854233e1fadb5da97f2744b6b24346d2750b85965e7e399bec1620232797dc"}, + {file = "parso-0.5.1.tar.gz", hash = "sha256:666b0ee4a7a1220f65d367617f2cd3ffddff3e205f3f16a0284df30e774c2a9c"}, +] + +[package.extras] +testing = ["docopt", "pytest (>=3.0.7)"] + +[[package]] +name = "pexpect" +version = "4.7.0" +description = "Pexpect allows easy control of interactive console applications." +optional = false +python-versions = "*" +files = [ + {file = "pexpect-4.7.0-py2.py3-none-any.whl", hash = "sha256:2094eefdfcf37a1fdbfb9aa090862c1a4878e5c7e0e7e7088bdb511c558e5cd1"}, + {file = "pexpect-4.7.0.tar.gz", hash = "sha256:9e2c1fd0e6ee3a49b28f95d4b33bc389c89b20af6a1255906e90ff1262ce62eb"}, +] + +[package.dependencies] +ptyprocess = ">=0.5" + +[[package]] +name = "pickleshare" +version = "0.7.5" +description = "Tiny 'shelve'-like database with concurrency support" +optional = false +python-versions = "*" +files = [ + {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"}, + {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"}, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "plumbum" +version = "1.8.0" +description = "Plumbum: shell combinators library" +optional = false +python-versions = ">=3.6" +files = [ + {file = "plumbum-1.8.0-py3-none-any.whl", hash = "sha256:0f6b59c8a03bfcdddd1efc04a126062663348e892ce7ddef49ec60e47b9e2c09"}, + {file = "plumbum-1.8.0.tar.gz", hash = "sha256:f1da1f167a2afe731a85de3f56810f424926c0a1a8fd1999ceb2ef20b618246d"}, +] + +[package.dependencies] +pywin32 = {version = "*", markers = "platform_system == \"Windows\" and platform_python_implementation != \"PyPy\""} + +[package.extras] +dev = ["paramiko", "psutil", "pytest (>=6.0)", "pytest-cov", "pytest-mock", "pytest-timeout"] +docs = ["Sphinx (>=4.0.0)", "sphinx-rtd-theme (>=1.0.0)"] +ssh = ["paramiko"] + +[[package]] +name = "prompt-toolkit" +version = "2.0.9" +description = "Library for building powerful interactive command lines in Python" +optional = false +python-versions = "*" +files = [ + {file = "prompt_toolkit-2.0.9-py2-none-any.whl", hash = "sha256:977c6583ae813a37dc1c2e1b715892461fcbdaa57f6fc62f33a528c4886c8f55"}, + {file = "prompt_toolkit-2.0.9-py3-none-any.whl", hash = "sha256:11adf3389a996a6d45cc277580d0d53e8a5afd281d0c9ec71b28e6f121463780"}, + {file = "prompt_toolkit-2.0.9.tar.gz", hash = "sha256:2519ad1d8038fd5fc8e770362237ad0364d16a7650fb5724af6997ed5515e3c1"}, +] + +[package.dependencies] +six = ">=1.9.0" +wcwidth = "*" + +[[package]] +name = "protobuf" +version = "4.25.1" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "protobuf-4.25.1-cp310-abi3-win32.whl", hash = "sha256:193f50a6ab78a970c9b4f148e7c750cfde64f59815e86f686c22e26b4fe01ce7"}, + {file = "protobuf-4.25.1-cp310-abi3-win_amd64.whl", hash = "sha256:3497c1af9f2526962f09329fd61a36566305e6c72da2590ae0d7d1322818843b"}, + {file = "protobuf-4.25.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:0bf384e75b92c42830c0a679b0cd4d6e2b36ae0cf3dbb1e1dfdda48a244f4bcd"}, + {file = "protobuf-4.25.1-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:0f881b589ff449bf0b931a711926e9ddaad3b35089cc039ce1af50b21a4ae8cb"}, + {file = "protobuf-4.25.1-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:ca37bf6a6d0046272c152eea90d2e4ef34593aaa32e8873fc14c16440f22d4b7"}, + {file = "protobuf-4.25.1-cp38-cp38-win32.whl", hash = "sha256:abc0525ae2689a8000837729eef7883b9391cd6aa7950249dcf5a4ede230d5dd"}, + {file = "protobuf-4.25.1-cp38-cp38-win_amd64.whl", hash = "sha256:1484f9e692091450e7edf418c939e15bfc8fc68856e36ce399aed6889dae8bb0"}, + {file = "protobuf-4.25.1-cp39-cp39-win32.whl", hash = "sha256:8bdbeaddaac52d15c6dce38c71b03038ef7772b977847eb6d374fc86636fa510"}, + {file = "protobuf-4.25.1-cp39-cp39-win_amd64.whl", hash = "sha256:becc576b7e6b553d22cbdf418686ee4daa443d7217999125c045ad56322dda10"}, + {file = "protobuf-4.25.1-py3-none-any.whl", hash = "sha256:a19731d5e83ae4737bb2a089605e636077ac001d18781b3cf489b9546c7c80d6"}, + {file = "protobuf-4.25.1.tar.gz", hash = "sha256:57d65074b4f5baa4ab5da1605c02be90ac20c8b40fb137d6a8df9f416b0d0ce2"}, +] + +[[package]] +name = "psutil" +version = "5.6.3" +description = "Cross-platform lib for process and system monitoring in Python." +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "psutil-5.6.3-cp27-none-win32.whl", hash = "sha256:d5350cb66690915d60f8b233180f1e49938756fb2d501c93c44f8fb5b970cc63"}, + {file = "psutil-5.6.3-cp27-none-win_amd64.whl", hash = "sha256:b6e08f965a305cd84c2d07409bc16fbef4417d67b70c53b299116c5b895e3f45"}, + {file = "psutil-5.6.3-cp35-cp35m-win32.whl", hash = "sha256:cf49178021075d47c61c03c0229ac0c60d5e2830f8cab19e2d88e579b18cdb76"}, + {file = "psutil-5.6.3-cp35-cp35m-win_amd64.whl", hash = "sha256:bc96d437dfbb8865fc8828cf363450001cb04056bbdcdd6fc152c436c8a74c61"}, + {file = "psutil-5.6.3-cp36-cp36m-win32.whl", hash = "sha256:eba238cf1989dfff7d483c029acb0ac4fcbfc15de295d682901f0e2497e6781a"}, + {file = "psutil-5.6.3-cp36-cp36m-win_amd64.whl", hash = "sha256:954f782608bfef9ae9f78e660e065bd8ffcfaea780f9f2c8a133bb7cb9e826d7"}, + {file = "psutil-5.6.3-cp37-cp37m-win32.whl", hash = "sha256:503e4b20fa9d3342bcf58191bbc20a4a5ef79ca7df8972e6197cc14c5513e73d"}, + {file = "psutil-5.6.3-cp37-cp37m-win_amd64.whl", hash = "sha256:028a1ec3c6197eadd11e7b46e8cc2f0720dc18ac6d7aabdb8e8c0d6c9704f000"}, + {file = "psutil-5.6.3-cp38-cp38-win_amd64.whl", hash = "sha256:12542c3642909f4cd1928a2fba59e16fa27e47cbeea60928ebb62a8cbd1ce123"}, + {file = "psutil-5.6.3.tar.gz", hash = "sha256:863a85c1c0a5103a12c05a35e59d336e1d665747e531256e061213e2e90f63f3"}, +] + +[package.extras] +enum = ["enum34"] + +[[package]] +name = "ptyprocess" +version = "0.6.0" +description = "Run a subprocess in a pseudo terminal" +optional = false +python-versions = "*" +files = [ + {file = "ptyprocess-0.6.0-py2.py3-none-any.whl", hash = "sha256:d7cc528d76e76342423ca640335bd3633420dc1366f258cb31d05e865ef5ca1f"}, + {file = "ptyprocess-0.6.0.tar.gz", hash = "sha256:923f299cc5ad920c68f2bc0bc98b75b9f838b93b599941a6b63ddbc2476394c0"}, +] + +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] + +[[package]] +name = "pygments" +version = "2.4.2" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "Pygments-2.4.2-py2.py3-none-any.whl", hash = "sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127"}, + {file = "Pygments-2.4.2.tar.gz", hash = "sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297"}, +] + +[[package]] +name = "pytest" +version = "7.2.0" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-7.2.0-py3-none-any.whl", hash = "sha256:892f933d339f068883b6fd5a459f03d85bfcb355e4981e146d2c7616c21fef71"}, + {file = "pytest-7.2.0.tar.gz", hash = "sha256:c4014eb40e10f11f355ad4e3c2fb2c6c6d1919c73f3b5a433de4708202cade59"}, +] + +[package.dependencies] +attrs = ">=19.2.0" +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] + +[[package]] +name = "pywin32" +version = "306" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +files = [ + {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, + {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, + {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, + {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, + {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, + {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, + {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, + {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, + {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, + {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, + {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, + {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, + {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, + {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, +] + +[[package]] +name = "real-easypy" +version = "0.4.3" +description = "easypy is a collection of python modules that makes developers happy" +optional = false +python-versions = "*" +files = [ + {file = "real-easypy-0.4.3.tar.gz", hash = "sha256:999aa9523bb881000115707bf32c8c596f507e3c6ec2e7796776adde2b18b4ef"}, +] + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "setuptools" +version = "75.0.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "setuptools-75.0.0-py3-none-any.whl", hash = "sha256:791ae94f04f78c880b5e614e560dd32d4b4af5d151bd9e7483e3377846caf90a"}, + {file = "setuptools-75.0.0.tar.gz", hash = "sha256:25af69c809d9334cd8e653d385277abeb5a102dca255954005a7092d282575ea"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"] +core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.11.*)", "pytest-mypy"] + +[[package]] +name = "six" +version = "1.13.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*" +files = [ + {file = "six-1.13.0-py2.py3-none-any.whl", hash = "sha256:1f1b7d42e254082a9db6279deae68afb421ceba6158efa6131de7b3003ee93fd"}, + {file = "six-1.13.0.tar.gz", hash = "sha256:30f610279e8b2578cab6db20741130331735c781b56053c59c4076da27f06b66"}, +] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "traitlets" +version = "4.3.3" +description = "Traitlets Python config system" +optional = false +python-versions = "*" +files = [ + {file = "traitlets-4.3.3-py2.py3-none-any.whl", hash = "sha256:70b4c6a1d9019d7b4f6846832288f86998aa3b9207c6821f3578a6a6a467fe44"}, + {file = "traitlets-4.3.3.tar.gz", hash = "sha256:d023ee369ddd2763310e4c3eae1ff649689440d4ae59d7485eb4cfbbe3e359f7"}, +] + +[package.dependencies] +decorator = "*" +ipython-genutils = "*" +six = "*" + +[package.extras] +test = ["mock", "pytest"] + +[[package]] +name = "urllib3" +version = "2.2.3" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "wcwidth" +version = "0.1.7" +description = "Measures number of Terminal column cells of wide-character codes" +optional = false +python-versions = "*" +files = [ + {file = "wcwidth-0.1.7-py2.py3-none-any.whl", hash = "sha256:f4ebe71925af7b40a864553f761ed559b43544f8f71746c2d756c7fe788ade7c"}, + {file = "wcwidth-0.1.7.tar.gz", hash = "sha256:3df37372226d6e63e1b1e1eda15c594bca98a22d33a23832a90998faa96bc65e"}, +] + +[metadata] +lock-version = "2.0" +python-versions = "^3.9" +content-hash = "d363536ae4caa148f5ad97954f335d0dfa6feea574215faca484be9ed9e8c8de" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..7dd05f31 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,40 @@ +[tool.poetry] +name = "vast-data-csi-driver" +version = "2.5.0" +description = "VAST CSI Driver allows container orchestration frameworks such as Kubernetes to dynamically provision storage volumes on a VAST cluster." +authors = ["Ofer Koren ", "Volodymyr Boiko "] +license = "Apache-2.0" +readme = "README.md" +package-mode = false + +[tool.poetry.dependencies] +python = "^3.9" +grpcio = "1.25.0" +grpcio-tools = "1.25.0" +ipython = "7.9.0" +ipython-genutils = "0.2.0" +prompt_toolkit = "2.0.9" +jedi = "0.15.1" +parso = "0.5.1" +pexpect = "4.7.0" +pickleshare = "0.7.5" +plumbum = "1.8.0" +protobuf = "4.25.1" +ptyprocess = "0.6.0" +Pygments = "2.4.2" +six = "1.13.0" +traitlets = "4.3.3" +wcwidth = "0.1.7" +real-easypy = "0.4.3" +psutil = "5.6.3" +requests = "2.32.3" +cryptography = "43.0.0" +pytest = "7.2.0" + +[build-system] +requires = ["poetry-core>=1.2.0"] +build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +minversion = "6.0" +python_files = ["test_*.py", "*_test.py", "utest_*.py"] diff --git a/scripts/create-views.py b/scripts/create-views.py new file mode 100644 index 00000000..d1795fe0 --- /dev/null +++ b/scripts/create-views.py @@ -0,0 +1,306 @@ +""" +Script can be used to create missing views on existing PVs after migrating from CSI Driver 2.1. + +There are 2 prerequisites to execute script: + 1. Python version >= 3.6 is required + 2. kubectl utility should be installed and prepared to works with appropriate k8s cluster +""" +import re +import sys +import json +import base64 +import asyncio +from functools import partial +from typing import Optional, ClassVar +from argparse import ArgumentParser +import urllib.request +import urllib.parse +import ssl + +context = ssl._create_unverified_context() + +# Input and output markers. +# These markers are used to distinguish input commands and output text of these commands. +IN_ATTRS = 42, "in <<<" # color & label +OUT_ATTRS = 42, "out >>>" # color & label +INFO_ATTRS = 45, "info" # color & label ( used for any auxiliary information ) + + +def print_with_label(text: str, color: int, label: str, ): + spc = 8 - len(label) + label = label + ''.join(' ' for _ in range(spc)) if spc > 0 else label + print(f'\x1b[1;{color}m {label} \x1b[0m', text) + + +def is_ver_nfs4_present(mount_options: str) -> bool: + """Check if vers=4 or nfsvers=4 mount option is present in `mount_options` string""" + for opt in mount_options.split(","): + name, sep, value = opt.partition("=") + if name in ("vers", "nfsvers") and value.startswith("4"): + return True + return False + + +class UserError(Exception): + pass + + +class RestSession: + + def __init__(self, *args, auth, base_url, **kwargs): + super().__init__(*args, **kwargs) + self.base_url = base_url.rstrip("/") + auth = base64.b64encode(bytes('{}:{}'.format(*auth), "utf8")).decode() + self.headers = { + "content-type": "application/json", + "authorization": f"Basic {auth}" + } + + def _request(self, method, url, data={}): + if method == "get": + url += '?' + urllib.parse.urlencode(data) + data = None + else: + data = json.dumps(data).encode() + req = urllib.request.Request(url, headers=self.headers, data=data) + with urllib.request.urlopen(req, context=context) as resp: + if resp.getcode() not in (200, 201): + raise UserError(f"Error occurred while requesting url {url}, reason: {resp.reason}") + return json.loads(resp.read().decode()) + + def get_view_policy(self, policy_name): + if not (res := self._request("get", f"{self.base_url}/viewpolicies", data=dict(name=policy_name))): + raise UserError(f"Provided view policy: {policy_name!r} doesn't exist") + return res[0]["id"] + + def get_quota_by_id(self, quota_id): + return self._request("get", f"{self.base_url}/quotas/{quota_id}") + + def get_view_by_path(self, path): + return self._request("get", f"{self.base_url}/views", dict(path=path)) + + def create_view(self, path, policy_id, protocol): + data = { + "path": path, + "create_dir": True, + "protocols": [protocol], + "policy_id": policy_id + } + self._request("post", f"{self.base_url}/views/", data) + + +class ExecutionFactory: + """Wrapper around SubprocessProtocol that allows to communicate with subprocess and store subprocess stdout.""" + + VERBOSE: ClassVar[bool] = True # Show full command output. + + def __init__(self, executor: "SubprocessProtocol"): + self.executor = executor + self.executor.factory = self + self.stdout = "" + + def __call__(self, base_command: Optional[str] = ""): + self.base_command = base_command + return self.executor + + async def exec(self, command: str, keep_output: Optional[bool] = False) -> str: + """ + Execute command. If 'base_command were provided during instantiation then final command is combination of + base_command + command. + Args: + command: command to execute + keep_output: flag indicates that command output must be suppressed (if True). Only stdout + will be suppressed in this case. + Returns: + Combined output (stdout + stderr) after process is terminated. + """ + command = f"{self.base_command} {command}".strip() + + if self.VERBOSE: + color, label = IN_ATTRS + # Print input command + print_with_label(color=color, label=label, text=command) + + loop = asyncio.get_event_loop() + transport, prot = await loop.subprocess_shell(partial(self.executor, keep_output=keep_output), command) + # Wait process to complete. + await prot.wait() + transport.close() + return self.stdout.strip() + + +@ExecutionFactory +class SubprocessProtocol(asyncio.SubprocessProtocol): + + def __init__(self, keep_output: Optional[bool] = False): + """ + Args: + keep_output: Show command output only if this flag is False. + """ + super().__init__() + self.exit_sentinel = asyncio.Event() + self._factory_instance = self.factory + self._factory_instance.stdout = "" + self.keep_output = keep_output + + @classmethod + async def exec(cls, command: str, keep_output: Optional[bool] = False) -> str: + """ + Execute command in subprocess. + If you initialized executor with 'base_command' prefix make sure you provided only sub part of command. + Args: + command: command to execute + keep_output: flag indicates that command output must be suppressed (if True). Only stdout + will be suppressed in this case. + Returns: + Combined output (stdout + stderr) after process is terminated. + """ + return await cls.factory.exec(command=command, keep_output=keep_output) + + async def wait(self): + """Wait command is completed.""" + await self.exit_sentinel.wait() + + def pipe_data_received(self, fd: int, data: bytes): + """ + Called when the subprocess writes data into stdout/stderr pipe + Args: + fd: Integer file descriptor. 1 - stdout; 2 - stderr + data: Received byte data. + """ + verbose = self._factory_instance.VERBOSE + color, label = OUT_ATTRS + text = data.decode("utf-8") + self._factory_instance.stdout += text + + if int(fd) == 2 and self.keep_output: + # Use red color if file descriptor is stderr in order to highlight errors. + text = f"\x1b[1;30;31m{text.strip()} \x1b[0m" + # Show full output in case of error. Do not suppress stderr output in order to have full visibility + # of error. + print_with_label(color=color, label=label, text=text) + + elif verbose: + if self.keep_output: + # Show command output + print_with_label(color=color, label=label, text=text) + + else: + # If flag 'keep_output' is True show '...' instead full stdout data. + print_with_label(color=color, label=label, text="...") + + def process_exited(self): + """Called when subprocess has exited.""" + self.exit_sentinel.set() + + +async def grab_required_params(): + """ + Interaction with user. Gathering required params values from command line arguments + """ + color, label = INFO_ATTRS + parser = ArgumentParser() + + parser.add_argument("--view-policy", default="default", + help="The name of the existing view policy that will be allocated to newly created views.") + parser.add_argument("--verbose", help="Show commands output.", default=False, action='store_true') + args = parser.parse_args() + print_with_label(color=color, label=label, text=f"The user has chosen following parameters: {vars(args)}") + return args + + +async def main() -> None: + """Main script entrypoint""" + color, label = INFO_ATTRS + _print = partial(print_with_label, color=color, label=label) + + # Grab user inputs (root_export and vip_pool_name) from command line arguments. + user_params = await grab_required_params() + + # Create base bash executor. + verbose = user_params.verbose + SubprocessProtocol.VERBOSE = verbose + bash_ex = SubprocessProtocol() + + # Get kubectl system path. + kubectl_path = await bash_ex.exec("which kubectl") + if not kubectl_path: + raise UserError("Unable to find 'kubectl' within system path. Make sure kubectl is installed.") + + # Prepare kubectl executor. + kubectl_ex = SubprocessProtocol(base_command=kubectl_path) + + vers = await kubectl_ex.exec("version --client=true --output=yaml", verbose) + if "clientVersion" not in vers: + raise UserError("Something wrong with kubectl. Unable to get client version") + + namespaces = [ + ns["metadata"]["name"] for ns in json.loads(await kubectl_ex.exec(f"get ns -o json"))["items"] + ] + for namespace in namespaces: + try: + mgmt_secret = json.loads(await kubectl_ex.exec(f"get secret/csi-vast-mgmt -o json -n {namespace}", False)) + break + except json.JSONDecodeError: + pass + else: + _print(f"The CSI driver cannot be found in any of the available namespaces.") + sys.exit(1) + + all_pvs = json.loads(await kubectl_ex.exec("get pv -o json", False))["items"] + + username = base64.b64decode(mgmt_secret["data"]["username"]).decode('utf-8') + password = base64.b64decode(mgmt_secret["data"]["password"]).decode('utf-8') + + controller_info = json.loads( + await kubectl_ex.exec(f"get pod csi-vast-controller-0 -n {namespace} -o json", False)) + controller_env = { + env_pair["name"]: env_pair.get("value") + for container in controller_info["spec"]["containers"] + if container['name'] == 'csi-vast-plugin' + for env_pair in container["env"] + } + + session = RestSession(base_url=f'https://{controller_env["X_CSI_VMS_HOST"]}/api', auth=(username, password)) + policy_id = session.get_view_policy(user_params.view_policy) + _seen = set() + for pv in all_pvs: + pv_name = pv['metadata']['name'] + if pv["metadata"]["annotations"].get("pv.kubernetes.io/provisioned-by", "") != "csi.vastdata.com": + continue + quota_id = pv['spec']['csi']['volumeAttributes'].get("quota_id") + if not quota_id: + _print(f"PV {pv_name!r} is missing an expected 'quota_id' attribute; please consult with VAST support") + continue + if quota_id not in _seen: + _seen.add(quota_id) + quota_path = session.get_quota_by_id(quota_id)["path"] + if session.get_view_by_path(quota_path): + _print(f"View {quota_path} already exists") + else: + mount_options = pv["spec"].get("mountOptions", [""])[0] + mount_options = ",".join(re.sub(r"[\[\]]", "", mount_options).replace(",", " ").split()) + if is_ver_nfs4_present(mount_options): + protocol = "NFS4" + else: + protocol = "NFS" + session.create_view(quota_path, policy_id, protocol) + _print(f"View {quota_path} has been created") + + # Mark that pvc has been migrated from v2.1 to v2.2 of csi driver + await kubectl_ex.exec(f"annotate pv {pv_name} --overwrite=true csi.vastdata.com/migrated-from=2.1") + + +if __name__ == '__main__': + + if sys.version_info < (3, 6): + print("Make sure you're running script using version of python>=3.6") + sys.exit(1) + + loop = asyncio.get_event_loop() + + try: + loop.run_until_complete(main()) + except UserError as e: + print(e) + sys.exit(1) diff --git a/scripts/img_to_digest.sh b/scripts/img_to_digest.sh new file mode 100755 index 00000000..978e5951 --- /dev/null +++ b/scripts/img_to_digest.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Check if the image tag was provided +if [ "$#" -ne 1 ]; then + echo "Usage: $0 " >&2 + exit 1 +fi + +IMAGE_TAG=$1 + +# Pull the image and suppress output +docker pull "$IMAGE_TAG" >/dev/null 2>&1 + +# Extract the image digest +DIGEST=$(docker inspect --format='{{index .RepoDigests 0}}' "$IMAGE_TAG" | awk -F '@' '{print $2}' 2>/dev/null) + +# Check if digest extraction was successful +if [ -z "$DIGEST" ]; then + echo "Failed to retrieve digest for image: $IMAGE_TAG" >&2 + exit 1 +fi + +echo "$DIGEST" \ No newline at end of file diff --git a/scripts/migrate-pv.py b/scripts/migrate-pv.py new file mode 100644 index 00000000..98686378 --- /dev/null +++ b/scripts/migrate-pv.py @@ -0,0 +1,379 @@ +""" +Migration process from csi driver==2.0 to csi driver==2.1 +This script check if K8s cluster contains any PVs with storageClassName" == "vastdata-filesystem" and if +These PVs have all necessary parameters to works with driver 2.1. + +There are 2 prerequisites to execute script: + 1. Python version >= 3.6 is required + 2. kubectl utility should be installed and prepared to works with appropriate k8s cluster + +Usage: + python migrate-pv.py --vast-csi-namespace < your input > --verbose < True|False > +""" +import os +import sys +import json +import asyncio +from pathlib import Path +from functools import partial +from tempfile import gettempdir +from argparse import ArgumentParser, Namespace +from typing import Optional, Dict, Any, List, ClassVar + +TMP = Path(gettempdir()) + +# Input and output markers. +# These markers are used to distinguish input commands and output text of these commands. +IN_ATTRS = 47, "in <<<" # color & label +OUT_ATTRS = 42, "out >>>" # color & label +INFO_ATTRS = 45, "info" # color & label ( used for any auxiliary information ) + +# Filter criteria by storage class name. +# We should skip PersistentVolumes where +# "pv.kubernetes.io/provisioned-by" annotation is not equal to csi.vastdata.com +VAST_PROVISIONER = "csi.vastdata.com" +VAST_DRIVER_VERSION = "2.0" + +# These fields are required for csi driver 2.1 to work properly. If at least one parameter is missing it means +# PV must be updated. +REQUIRED_PARAMETERS = {"root_export", "vip_pool_name"} + + +def print_with_label(color: int, label: str, text: str): + spc = 8 - len(label) + label = label + ''.join(' ' for _ in range(spc)) if spc > 0 else label + print(f'\x1b[1;{color}m {label} \x1b[0m', text) + + +class UserError(Exception): + pass + + +class ExecutionFactory: + """Wrapper around SubprocessProtocol that allows to communicate with subprocess and store subprocess stdout.""" + + VERBOSE: ClassVar[bool] = True # Show full command output. + + def __init__(self, executor: "SubprocessProtocol"): + self.executor = executor + self.executor.factory = self + self.stdout = "" + + def __call__(self, base_command: Optional[str] = ""): + self.base_command = base_command + return self.executor + + async def exec(self, command: str, supress_output: Optional[bool] = False) -> str: + """ + Execute command. If 'base_command were provided during instantiation then final command is combination of + base_command + command. + Args: + command: command to execute + supress_output: flag indicates that command output must be suppressed (if True). Only stdout + will be suppressed in this case. + Returns: + Combined output (stdout + stderr) after process is terminated. + """ + command = f"{self.base_command} {command}".strip() + + if self.VERBOSE: + color, label = IN_ATTRS + # Print input command + print_with_label(color=color, label=label, text=command) + + loop = asyncio.get_event_loop() + transport, prot = await loop.subprocess_shell(partial(self.executor, supress_output=supress_output), command) + # Wait process to complete. + await prot.wait() + transport.close() + return self.stdout.strip() + + +@ExecutionFactory +class SubprocessProtocol(asyncio.SubprocessProtocol): + + def __init__(self, supress_output: Optional[bool] = False): + """ + Args: + supress_output: Show command output only if this flag is False. + """ + super().__init__() + self.exit_sentinel = asyncio.Event() + self._factory_instance = self.factory + self._factory_instance.stdout = "" + self.supress_output = supress_output + + @classmethod + async def exec(cls, command: str, supress_output: Optional[bool] = False) -> str: + """ + Execute command in subprocess. + If you initialized executor with 'base_command' prefix make sure you provided only sub part of command. + Args: + command: command to execute + supress_output: flag indicates that command output must be suppressed (if True). Only stdout + will be suppressed in this case. + Returns: + Combined output (stdout + stderr) after process is terminated. + """ + return await cls.factory.exec(command=command, supress_output=supress_output) + + async def wait(self): + """Wait command is completed.""" + await self.exit_sentinel.wait() + + def pipe_data_received(self, fd: int, data: bytes): + """ + Called when the subprocess writes data into stdout/stderr pipe + Args: + fd: Integer file descriptor. 1 - stdout; 2 - stderr + data: Received byte data. + """ + verbose = self._factory_instance.VERBOSE + color, label = OUT_ATTRS + text = data.decode("utf-8") + self._factory_instance.stdout += text + + if int(fd) == 2: + # Use red color if file descriptor is stderr in order to highlight errors. + text = f"\x1b[1;30;31m{text.strip()} \x1b[0m" + # Show full output in case of error. Do not suppress stderr output in order to have full visibility + # of error. + print_with_label(color=color, label=label, text=text) + + elif verbose: + if not self.supress_output: + # Show command output + print_with_label(color=color, label=label, text=text) + + else: + # If flag 'supress_output' is True show '...' instead full stdout data. + print_with_label(color=color, label=label, text="...") + + def process_exited(self): + """Called when subprocess has exited.""" + self.exit_sentinel.set() + + +async def grab_required_params() -> Namespace: + """ + Interaction with user. Gathering required params values from command line arguments + """ + color, label = INFO_ATTRS + parser = ArgumentParser() + + parser.add_argument("--vast-csi-namespace", default="vast-csi", help="Namespace where csi driver was deployed.") + parser.add_argument("--verbose", help="Show commands output.", default=False) + parser.add_argument("--root_export", help="Base path where volumes will be located on VAST") + parser.add_argument("--vip_pool_name", help="Name of VAST VIP pool to use") + parser.add_argument("--mount_options", help="Custom NFS mount options, comma-separated (specify '' for no mount options).", default="") + parser.add_argument( + "--force", + help="Forced migration - refer to Vast Support documentation on when to use this flag", + action='store_true') + + args = parser.parse_args() + print_with_label(color=color, label=label, text=f"The user has chosen following parameters: {vars(args)}") + return args + + +async def patch_terminated_pv(pv_name: str, executor: SubprocessProtocol) -> None: + """Delete finalizers from provided PV where PV has 'Terminating' state.""" + while True: + await asyncio.sleep(.5) + if await executor.exec(f"get pv {pv_name} | grep Terminating"): + await executor.exec(f"patch pv {pv_name} " + "-p '{\"metadata\":{\"finalizers\":null}}'") + return + + +async def process_migrate( + candidates: List[Dict[str, Any]], + user_params: Namespace, + executor: SubprocessProtocol, + loop: asyncio.AbstractEventLoop) -> None: + """ + Main migration process. + 1. Update manifest of candidate PV with missing params + 2. Write manifest to temporary file + 3. Remove all finalizers from PV + 4. replace PV resource on kubernetes from temporary file. + 5. Add annotation csi.vastdata.com/migrated-from=2.0 + """ + color, label = INFO_ATTRS + _print = partial(print_with_label, color=color, label=label) + + if user_params.force: + root_export = user_params.root_export + vip_pool_name = user_params.vip_pool_name + mount_options = user_params.mount_options + + else: + csi_namespace = user_params.vast_csi_namespace + controller_info = await executor.exec(f"get pod csi-vast-controller-0 -n {csi_namespace} -o json", True) + try: + controller_info = json.loads(controller_info) + except json.decoder.JSONDecodeError: + # In case of 'Error from server (NotFound) ...' + raise UserError(f"Could not find our csi driver in namespace '{csi_namespace}'" + f" - please verify that the 'csi-vast-controller-0' is deployed in your cluster.\n" + f"If you've used a different namespace, specify it using the --vast-csi-namespace flag.") + + controller_env = { + env_pair["name"]: env_pair.get("value") + for container in controller_info["spec"]["containers"] + if container['name'] == 'csi-vast-plugin' + for env_pair in container["env"] + } + + nodes_info = await executor.exec(f"get pod -l app=csi-vast-node -n {csi_namespace} -o json", True) + try: + nodes_info = json.loads(nodes_info) + node_info = nodes_info['items'][0] + except (json.decoder.JSONDecodeError, KeyError, IndexError): + # In case of 'Error from server (NotFound) ...' + raise UserError(f"Could not find our csi driver nodes in namespace '{csi_namespace}'.\n" + f"If you've used a different namespace, specify it using the --vast-csi-namespace flag.") + + node_env = { + env_pair["name"]: env_pair.get("value") + for container in node_info["spec"]["containers"] + if container['name'] == 'csi-vast-plugin' + for env_pair in container["env"] + } + + root_export = controller_env.get("X_CSI_NFS_EXPORT") + vip_pool_name = controller_env.get("X_CSI_VIP_POOL_NAME") + mount_options = node_env.get("X_CSI_MOUNT_OPTIONS") or "" + + if not root_export or not vip_pool_name: + raise UserError( + "It looks like you've already upgraded your Vast CSI Driver - " + "Please refer to Vast Support documentation on how to use this script in 'post-upgrade' mode.") + + patch_params = { + "root_export": root_export, + "vip_pool_name": vip_pool_name, + "mount_options": mount_options, + "schema": "2" + } + + _print(text=f"Parameters for migration: {patch_params}") + + for candidate in candidates: + pv_name = candidate['metadata']['name'] + pvc_name = candidate['spec'].get('claimRef', {}).get('name') + pv_manifest = TMP / f"{pv_name}.json" + + patch_params['export_path'] = os.path.join(root_export, pv_name) + candidate["spec"]["csi"]["volumeAttributes"].update(patch_params) + if mount_options: + candidate["spec"]["mountOptions"] = mount_options.split(",") + + with pv_manifest.open("w") as f: + json.dump(candidate, f) + + # Add custom finalizer "vastdata.com/pv-migration-protection" in order to protect PV from being deleted + # by csi driver at the moment of patching. + await executor.exec( + f"patch pv {pv_name} " + "-p '{\"metadata\":{\"finalizers\":[\"vastdata.com/pv-migration-protection\"]}}'") + + # Run task that remove all finalizers in the background. + loop.create_task(patch_terminated_pv(pv_name=pv_name, executor=executor)) + + # Replace original PV resource with patched version. + await executor.exec(f"replace -f {pv_manifest} --force") + # Add new annotation to existing PV's annotations. Use --overwrite=true in case migration key already exist. + await executor.exec(f"annotate pv {pv_name} --overwrite=true csi.vastdata.com/migrated-from=2.0") + _print(text=f"PV {pv_name} updated.") + + if pvc_name: + # Remove PVC events about "Lost" status + await asyncio.sleep(5) + pvc_events = await executor.exec(f'get events --field-selector involvedObject.name={pvc_name} -o json') + pvc_events = json.loads(pvc_events)['items'] + + for event in pvc_events: + if 'Bound claim has lost its PersistentVolume' in event['message']: + event_name = event['metadata']['name'] + await executor.exec(f'delete event {event_name}') + + +async def main(loop: asyncio.AbstractEventLoop) -> None: + """Main script entrypoint""" + color, label = INFO_ATTRS + _print = partial(print_with_label, color=color, label=label) + + # Grab user inputs (root_export and vip_pool_name) from command line arguments. + user_params = await grab_required_params() + + force_migrate = user_params.force + + if force_migrate and not all([ + user_params.root_export, + user_params.vip_pool_name, + user_params.mount_options is not None]): + raise UserError( + "--vip_pool_name, --root_export and --mount_options must be provided if you're using --force flag") + + # Set output verbosity + SubprocessProtocol.VERBOSE = user_params.verbose + + # Create base bash executor. + bash_ex = SubprocessProtocol() + + # Get kubectl system path. + kubectl_path = await bash_ex.exec("which kubectl") + if not kubectl_path: + raise UserError("Unable to find 'kubectl' within system path. Make sure kubectl is installed.") + + # Prepare kubectl executor. + kubectl_ex = SubprocessProtocol(base_command=kubectl_path) + + vers = await kubectl_ex.exec("version --client=true --output=yaml") + if "clientVersion" not in vers: + raise UserError("Something wrong with kubectl. Unable to get client version") + + all_pvs = await kubectl_ex.exec("get pv -o json", True) + all_pvs = json.loads(all_pvs)["items"] + + candidates = [] + for pv in all_pvs: + pv_annotations = pv["metadata"].get("annotations", {}) + + if pv_annotations.get("pv.kubernetes.io/provisioned-by", "") != VAST_PROVISIONER: + continue + + pv_spec = pv["spec"] + volume_attributes = pv_spec["csi"]["volumeAttributes"] + missing_params = REQUIRED_PARAMETERS.difference(volume_attributes) + + if pv_annotations.get("csi.vastdata.com/migrated-from") == VAST_DRIVER_VERSION and force_migrate: + # Force migrate. Assumed previous parameters will be overwritten. + _print(text=f"PV {pv['metadata']['name']} will be patched (re-migrating)") + candidates.append(pv) + + elif missing_params: + # Regular migrate. Assumed only PVs with missing required parameters will be updated. + _print(text=f"PV {pv['metadata']['name']} will be patched {', '.join(missing_params)}") + candidates.append(pv) + + # Start migration process + if candidates: + await process_migrate(candidates=candidates, user_params=user_params, executor=kubectl_ex, loop=loop) + else: + _print(text="No outdated PVs found.") + + +if __name__ == '__main__': + + if sys.version_info < (3, 6): + print("Make sure you're running script using version of python>=3.6") + sys.exit(1) + + loop = asyncio.get_event_loop() + + try: + loop.run_until_complete(main(loop)) + except UserError as e: + print(e) + sys.exit(1) diff --git a/scripts/migrate_vast_cluster.py b/scripts/migrate_vast_cluster.py new file mode 100644 index 00000000..d64b0e14 --- /dev/null +++ b/scripts/migrate_vast_cluster.py @@ -0,0 +1,204 @@ +""" +Script for migrating data between two VAST storages. + +Usage: + python migrate_vast_cluster.py --source-host --destination-host \ + --source-username --source-password --destination-username --destination-password --base-path=/k8s + +Example: + python migrate_vast_cluster.py --source-host 10.27.113.27 --destination-host 10.91.5.242 --source-username admin --destination-username admin --source-password 123456 --destination-password 123456 --base-path=/k8s [--dry-run] +""" +import sys +import requests +import urllib3 +import logging +import argparse + +# Set up logging +logging.basicConfig(level=logging.INFO) +urllib3.disable_warnings() + + +class NoResourceFound(Exception): + pass + + +class RestSession(requests.Session): + + def __init__(self, endpoint, username, password): + super().__init__() + self.verify = False + self.auth = (username, password) + self.base_url = f"https://{endpoint}/api/" + self.headers["Accept"] = "application/json" + self.headers["Content-Type"] = "application/json" + + def __getattr__(self, attr): + if attr.startswith("_"): + raise AttributeError(attr) + + def func(*args, **params): + res = self.request("get", f"{self.base_url}/{attr}", *args, timeout=10, params=params) + res.raise_for_status() + return res.json() + + func.__name__ = attr + func.__qualname__ = f"{self.__class__.__qualname__}.{attr}" + setattr(self, attr, func) + return func + + def get_tenant(self, tenant_name): + tenants = self.tenants(name=tenant_name) + if tenants: + return tenants[0] + raise NoResourceFound(f"No such tenant {tenant_name}") + + def get_view_policy(self, policy_name: str): + """Get view policy by name. Raise exception if not found.""" + viewpolicies = self.viewpolicies(name=policy_name) + if viewpolicies: + return viewpolicies[0] + raise NoResourceFound(f"No such view policy {policy_name}") + + def get_view(self, path): + views = self.views(path__contains=path) + if views: + return views[0] + raise NoResourceFound(f"No such view path {path}") + + def get_views_by_pref(self, pref): + return self.views(path__startswith=pref) + + def ensure_view(self, path, protocols, policy_id, tenant_id): + try: + self.get_view(path=path) + except NoResourceFound: + self.create_view( + path=path, protocols=protocols, policy_id=policy_id, tenant_id=tenant_id + ) + + def create_view(self, path, protocols, policy_id, tenant_id, create_dir=True): + data = { + "path": path, "create_dir": create_dir, + "protocols": protocols, "policy_id": policy_id, "tenant_id": tenant_id + } + res = self.post(f"{self.base_url}/views/", json=data) + res.raise_for_status() + + def get_vip_pool(self, vip_pool_name): + vip_pools = self.vippools(name=vip_pool_name) + if vip_pools: + return vip_pools[0] + raise NoResourceFound(f"No such vip pool {vip_pool_name}") + + def get_quota(self, path): + quotas = self.quotas(path__contains=path) + if quotas: + return quotas[0] + raise NoResourceFound(f"No such quota path {path}") + + def ensure_quota(self, path, name, tenant_id, hard_limit): + try: + self.get_quota(path=path) + except NoResourceFound: + self.create_quota(path=path, name=name, tenant_id=tenant_id, hard_limit=hard_limit) + + def create_quota(self, path, name, tenant_id, hard_limit): + data = { + "path": path, "name": name, + "tenant_id": tenant_id, "hard_limit": hard_limit + } + res = self.post(f"{self.base_url}/quotas/", json=data) + res.raise_for_status() + + def get_qos_policy(self, policy_name): + """Get QoS policy by name. Raise exception if not found.""" + qos_policies = self.qospolicies(name=policy_name) + if qos_policies: + return qos_policies[0] + raise NoResourceFound(f"No such QOS policy {policy_name}") + + +def parse_arguments(): + parser = argparse.ArgumentParser(description="VCSI Migration - Recreate Quotas and Views for PVCs on a target Vast Cluster") + parser.add_argument('--source-host', type=str, required=True, help='Source host IP or domain') + parser.add_argument('--source-username', type=str, required=True, help='Source username') + parser.add_argument('--source-password', type=str, required=True, help='Source password') + parser.add_argument('--destination-host', type=str, required=True, help='Destination host IP or domain') + parser.add_argument('--destination-username', type=str, required=True, help='Destination username') + parser.add_argument('--destination-password', type=str, required=True, help='Destination password') + parser.add_argument('--base-path', type=str, required=True, help='The root path for CSI related views (root_export)') + parser.add_argument('--dry-run', action="store_true", help='Run in dry-run mode') + args = parser.parse_args() + return args + + +if __name__ == "__main__": + args = parse_arguments() + source_host = args.source_host + source_username = args.source_username + source_password = args.source_password + destination_host = args.destination_host + destination_username = args.destination_username + destination_password = args.destination_password + base_path = args.base_path + dry_run = args.dry_run + + source_session = RestSession(endpoint=source_host, username=source_username, password=source_password) + destination_session = RestSession(endpoint=destination_host, username=destination_username, password=destination_password) + + not_found = False + vip_pools = source_session.vippools() + for vip_pool in vip_pools: + try: + destination_session.get_vip_pool(vip_pool["name"]) + except NoResourceFound: + not_found = True + logging.warning(f"No such vip pool {vip_pool['name']}") + + if not_found: + res = input("Missing vip pools detected. Do you want to proceed? [y/N]") + if res.lower() not in ("y", "yes"): + sys.exit(0) + + policies_mapping = {} + tenants_mapping = {} + + views = source_session.get_views_by_pref(base_path) + for view in views: + tenant = view["tenant_name"] + protocols = view["protocols"] + policy = view["policy"] + path = view["path"] + try: + quota = source_session.get_quota(path=path) + except NoResourceFound: + logging.warning(f"No such quota path {path}. Skipping sync for view.") + continue + + tenant_id = tenants_mapping.get(tenant) + if not tenant_id: + tenant_id = destination_session.get_tenant(tenant)["id"] + tenants_mapping[tenant] = tenant_id + + policy_id = policies_mapping.get(policy) + if not policy_id: + policy_id = destination_session.get_view_policy(policy)["id"] + + logging.info(f"Syncing view {path}") + if dry_run: + logging.info(f"Dry-run: (View: {path=}, {protocols=}, {policy_id=}, {tenant_id=})") + else: + destination_session.ensure_view(path=path, protocols=protocols, policy_id=policy_id, tenant_id=tenant_id) + + quota_name = quota["name"] + hard_limit = quota["hard_limit"] + + logging.info(f"Syncing quota {quota_name}") + if dry_run: + logging.info(f"Dry-run: (Quota: {path=}, {quota_name=}, {tenant_id=}, {hard_limit=})") + else: + destination_session.ensure_quota(path=path, name=quota_name, tenant_id=tenant_id, hard_limit=hard_limit) + + logging.info("Completed!") + logging.info(f"Total views: {len(views)}") diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..1267b335 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,166 @@ +import sys +import inspect +from pathlib import Path +from tempfile import gettempdir +from contextlib import contextmanager +from typing import List, Optional, Any +from unittest.mock import MagicMock, patch + +import pytest +from plumbum import local +from easypy.aliasing import aliases +from easypy.bunch import Bunch + +ROOT = Path(__file__).resolve().parents[1] +# Extend python import path to get vast_csi package from here +sys.path += [ROOT.as_posix()] + +with local.cwd(gettempdir()) as tempdir: + # Temporary change working directory and create version.info file in order to allow reading + # driver name, version and git commit by Config. + tempdir["version.info"].open("w").write("csi.vastdata.com v0.0.0 #### local") + from vast_csi.server import CsiController, CsiNode, CosiProvisioner, Config + import vast_csi.csi_types as types + +# Restore original methods on Controller and Node in order to get rid of Instrumented logging layer. +for cls in (CsiController, CsiNode, CosiProvisioner): + for name, _ in inspect.getmembers(cls.__base__, inspect.isfunction): + if name.startswith("_"): + continue + func = getattr(cls, name) + setattr(cls, name, func.__wrapped__) + # Simulate getting __wrapped__ context from function. This logic is used in csi driver so tests should also + # support this. + setattr(func, "__wrapped__", func.__wrapped__) + +# Load configuration +import vast_csi.server + +vast_csi.server.CONF = Config() + + +# ---------------------------------------------------------------------------------------------------------------------- +# Helper classes and decorators +# ---------------------------------------------------------------------------------------------------------------------- + + +class FakeQuota: + """Simulate quota attributes""" + + def __init__(self, hard_limit: int, quota_id: int, tenant_id): + self._hard_limit = hard_limit + self._id = quota_id + self.tenant_id = tenant_id + self.tenant_name = "test" + + @property + def id(self): + return self._id + + @property + def hard_limit(self): + return self._hard_limit + + +@aliases("mock", static=False) +class FakeSessionMethod: + """ + Method of FakeSession that enhances all methods of decorated class with + MagicMock capabilities eg: 'assert_called', 'call_args', 'assert_called_with' etc. + """ + + def __init__(self, return_value: Optional = None, return_condition: Optional[bool] = True): + # Mock to stare all execution calls + self.mock = MagicMock() + self.return_value = return_value + self.return_condition = return_condition + + def __call__(self, *args, **kwargs) -> Any: + self.mock(*args, **kwargs) + if self.return_condition: + return self.return_value + + +class FakeSession: + """Simulate VAST session behavior""" + + def __init__(self, + view: Optional[Bunch] = Bunch(path="/test/view", id=1, tenant_id=1, tenant_name="default"), + quota_id: Optional[int] = 1, + quota_hard_limit: Optional[int] = 1000 + ): + """ + Args: + view: Returned view path by 'get_view_by_path' method. + Use this variable to control returned view. Specify None to simulate 'view doesn't exist` behavior + quota_id: Id of returned quota by 'get_quota' method + quota_hard_limit: Hard limit of quota returned by 'get_quota' method + Use this variable to control returned quota. Specify None to simulate 'quota doesn't exist` behavior + """ + self.view = view + self.quota_id = quota_id + self.quota_hard_limit = quota_hard_limit + + # Methods declaration + self.get_quota = FakeSessionMethod( + return_value=FakeQuota(self.quota_hard_limit, self.quota_id, tenant_id=1), + return_condition=self.quota_hard_limit is not None) + self.ensure_quota = FakeSessionMethod( + return_value=FakeQuota(self.quota_hard_limit, self.quota_id, tenant_id=1), + return_condition=self.quota_hard_limit is not None) + self.get_view_by_path = FakeSessionMethod(return_value=self.view) + self.ensure_view = FakeSessionMethod(return_value=self.view) + self.get_view = FakeSessionMethod(return_value=self.view) + self.get_vip = FakeSessionMethod(return_value="127.0.0.1") + + +# ---------------------------------------------------------------------------------------------------------------------- +# Fixtures +# ---------------------------------------------------------------------------------------------------------------------- + +@pytest.fixture +def vms_session(monkeypatch, tmpdir): + from vast_csi.server import get_vms_session + from vast_csi.configuration import Config + tmpdir.join("username").write("test") + tmpdir.join("password").write("test") + monkeypatch.setattr(Config, "vms_credentials_store", local.path(tmpdir)) + with patch("vast_csi.vms_session.VmsSession.refresh_auth_token", MagicMock()): + get_vms_session.cache_clear() + yield get_vms_session() + + +@pytest.fixture +def volume_capabilities(): + """Factory for building VolumeCapabilities""" + + def __wrapped( + fs_type: str, mount_flags: str, mode: types.AccessModeType + ) -> List[types.VolumeCapability]: + return [ + types.VolumeCapability( + mount=types.MountVolume(fs_type=fs_type, mount_flags=mount_flags), + access_mode=types.AccessMode(mode=mode), + ) + ] + + return __wrapped + + +@pytest.fixture +def fake_session(): + """ + FakeSession factory. + Use this fixture as context manager to mock original vms session. + """ + + @contextmanager + def __wrapped( + quota_id: Optional[int] = 1, + quota_hard_limit: Optional[int] = 1000, + view: Optional[str] = Bunch(path="/test/view", id=1, tenant_id=1) + ): + session_mock = FakeSession(view=view, quota_id=quota_id, quota_hard_limit=quota_hard_limit) + yield session_mock + + yield __wrapped diff --git a/tests/test_controller.py b/tests/test_controller.py new file mode 100644 index 00000000..37052642 --- /dev/null +++ b/tests/test_controller.py @@ -0,0 +1,224 @@ +import re +import uuid +import pytest +from unittest.mock import patch, MagicMock +from vast_csi.server import CsiController, Abort, MissingParameter + +import grpc +import vast_csi.csi_types as types +from easypy.bunch import Bunch + + +class TestControllerSuite: + + @pytest.mark.parametrize("fs_type, mount_flags, mode, err_message", [ + ("abc", "abc", types.AccessModeType.SINGLE_NODE_WRITER, "Unsupported file system type: abc"), + ("ext4", "", types.AccessModeType.MULTI_NODE_SINGLE_WRITER, "Unsupported access mode: 4 (use [1, 2, 3, 5])"), + ]) + def test_create_volume_invalid_capability(self, volume_capabilities, fs_type, mount_flags, mode, err_message): + """Test invalid VolumeCapabilities must be validated""" + # Preparation + cont = CsiController() + capabilities = volume_capabilities(fs_type=fs_type, mount_flags=mount_flags, mode=mode) + + # Execution + with pytest.raises(Abort) as ex_context: + cont.CreateVolume(None,"test_volume", capabilities) + + # Assertion + err = ex_context.value + assert err.message == err_message + assert err.code == grpc.StatusCode.INVALID_ARGUMENT + + @pytest.mark.parametrize("parameters, err_message", [ + (dict(view_policy="default", vip_pool_name="vippool-1"), "Parameter 'root_export' cannot be empty"), + (dict(root_export="/k8s", vip_pool_name="vippool-1"), "Parameter 'view_policy' cannot be empty"), + ]) + def test_validate_parameters(self, volume_capabilities, parameters, err_message): + """Test all required parameters must be provided""" + # Preparation + cont = CsiController() + capabilities = volume_capabilities(fs_type="ext4", mount_flags="", mode=types.AccessModeType.SINGLE_NODE_WRITER) + + # Execution + with pytest.raises(MissingParameter) as ex_context: + cont.CreateVolume(None, name="test_volume", volume_capabilities=capabilities, parameters=parameters) + + # Assertion + err = ex_context.value + assert err_message in err.message + assert err.code == grpc.StatusCode.INVALID_ARGUMENT + + def test_local_ip_for_mount(self, volume_capabilities, vms_session, monkeypatch): + # Preparation + cont = CsiController() + monkeypatch.setattr(vms_session.config, "use_local_ip_for_mount", "test.com") + data = dict(root_export="/k8s", view_policy="default") + capabilities = volume_capabilities(fs_type="ext4", mount_flags="", mode=types.AccessModeType.SINGLE_NODE_WRITER) + + # Execution + with pytest.raises(Abort) as ex_context: + cont.CreateVolume(vms_session=vms_session, name="test_volume", volume_capabilities=capabilities, parameters=data) + + # Assertion + err = ex_context.value + assert "Local IP address: test.com is invalid" in err.message + assert err.code == grpc.StatusCode.INVALID_ARGUMENT + + # Execution + monkeypatch.setattr(vms_session.config, "use_local_ip_for_mount", "") + with pytest.raises(Abort) as ex_context: + cont.CreateVolume(vms_session=vms_session, name="test_volume", volume_capabilities=capabilities, parameters=data) + + # Assertion + err = ex_context.value + assert "either vip_pool_name, vip_pool_fqdn or use_local_ip_for_mount" in err.message + assert err.code == grpc.StatusCode.INVALID_ARGUMENT + + def test_quota_hard_limit_not_match(self, volume_capabilities, vms_session): + """Test quota exists but provided capacity doesnt match""" + # Preparation + cont = CsiController() + parameters = dict(root_export="/foo/bar", view_policy="default", vip_pool_name="vippool-1") + capabilities = volume_capabilities(fs_type="ext4", mount_flags="", mode=types.AccessModeType.SINGLE_NODE_WRITER) + vms_session.ensure_view = MagicMock() + vms_session.get_quota = MagicMock(return_value=Bunch(tenant_id=1, hard_limit=999)) + + + # Execution + with pytest.raises(Exception) as ex_context: + cont.CreateVolume( + vms_session=vms_session, name="test_volume", + volume_capabilities=capabilities, parameters=parameters, capacity_range=Bunch(required_bytes=1000) + ) + # Assertion + err = ex_context.value + assert str(err) == "Volume already exists with different capacity than requested (999)" + assert vms_session.ensure_view.call_count == 1 + assert vms_session.get_quota.call_count == 1 + assert vms_session.ensure_view.call_args.args == () + assert vms_session.get_quota.call_args.kwargs["path"] == "/foo/bar/test_volume" + + @pytest.mark.parametrize("raw_mount_options", [ + "[vers=4 , nolock, proto=tcp, nconnect=4]", + "[vers=4 nolock proto=tcp nconnect=4]", + "[vers=4,nolock,proto=tcp,nconnect=4]", + "vers=4 , nolock, proto=tcp, nconnect=4", + "vers=4 nolock proto=tcp nconnect=4", + "vers=4,nolock,proto=tcp,nconnect=4", + ]) + def test_parse_mount_options(self, raw_mount_options): + mount_options = ",".join(re.sub(r"[\[\]]", "", raw_mount_options).replace(",", " ").split()) + assert mount_options == "vers=4,nolock,proto=tcp,nconnect=4" + + @patch("vast_csi.vms_session.VmsSession.get_quota", MagicMock(return_value=Bunch(tenant_id=1))) + @patch("vast_csi.vms_session.VmsSession.get_vip", MagicMock(return_value="2.2.2.2")) + @pytest.mark.parametrize("local_ip", ["1.1.1.1", "::1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"]) + @pytest.mark.parametrize("vip_pool_name", ["", "test-vip"]) + def test_publish_volume_with_local_ip(self, vms_session, volume_capabilities, monkeypatch, local_ip, vip_pool_name): + """ + Test if use_local_ip_for_mount is set, it will use local IP for mount (even when vip_pool_name is provided) + """ + # Preparation + cont = CsiController() + conf = vms_session.config + node_id = "test-node" + volume_id = "test-volume" + monkeypatch.setattr(conf, "use_local_ip_for_mount", local_ip), + capabilities = volume_capabilities(fs_type="ext4", mount_flags="", mode=types.AccessModeType.SINGLE_NODE_WRITER) + volume_context = dict(root_export="/test", vip_pool_name=vip_pool_name) + + # Execution + resp = cont.ControllerPublishVolume( + vms_session=vms_session, node_id=node_id, volume_id=volume_id, volume_capability=capabilities[0], volume_context=volume_context + ) + publish_context = resp.publish_context + + # Assertion + assert publish_context["export_path"] == "/test/test-volume" + if vip_pool_name: + assert publish_context["nfs_server_ip"] == "2.2.2.2" + else: + assert publish_context["nfs_server_ip"] == local_ip + + def test_static_volume_no_vip_pool(self, vms_session, volume_capabilities): + # Prepare test data + volume_id = "/static/volume/path" + node_id = "node1" + capabilities = volume_capabilities(fs_type="ext4", mount_flags="", mode=types.AccessModeType.SINGLE_NODE_WRITER) + cont = CsiController() + + with pytest.raises(Abort) as ex_context: + cont.ControllerPublishVolume(vms_session, node_id, volume_id, capabilities[0], {}) + + err = ex_context.value + assert "either vip_pool_name, vip_pool_fqdn or use_local_ip_for_mount must be provided." in err.message + + def test_static_volume_no_vip_policy(self, vms_session, volume_capabilities): + # Prepare test data + volume_id = "/static/volume/path" + node_id = "node1" + volume_context = dict(vip_pool_name="vippool-1", static_pv_create_views="yes") + capabilities = volume_capabilities(fs_type="ext4", mount_flags="", mode=types.AccessModeType.SINGLE_NODE_WRITER) + cont = CsiController() + + with pytest.raises(Abort) as ex_context: + cont.ControllerPublishVolume(vms_session, node_id, volume_id, capabilities[0], volume_context) + + err = ex_context.value + assert "Parameter 'view_policy' cannot be empty string or None" in err.message + + @pytest.mark.parametrize("kwargs", [ + dict(static_pv_create_views="yes"), + dict(static_pv_create_quotas="yes"), + dict(static_pv_create_view="yes", static_pv_create_quotas="yes"), + ]) + def test_static_volume_create_create_view_and_quota(self, fake_session, volume_capabilities, kwargs): + # Prepare test data + volume_id = "/static/volume/path/" + node_id = "node1" + volume_context = dict(vip_pool_name="vippool-1", view_policy="default", **kwargs) + capabilities = volume_capabilities( + fs_type="ext4", mount_flags=["test"], mode=types.AccessModeType.SINGLE_NODE_WRITER + ) + cont = CsiController() + + with fake_session(view=Bunch(path=volume_id, id=1, tenant_id=1, tenant_name="default")) as session: + resp = cont.ControllerPublishVolume(session, node_id, volume_id, capabilities[0], volume_context) + + publish_context = dict(resp.publish_context) + assert publish_context["nfs_server_ip"] == "127.0.0.1" + assert publish_context["export_path"] == volume_id.rstrip("/") + assert publish_context["mount_options"] == "test" + + if kwargs.get("static_pv_create_views"): + session.ensure_view.mock.assert_called_once_with( + path=volume_id.rstrip("/"), protocols=['NFS'], view_policy='default', qos_policy=None + ) + else: + session.ensure_view.mock.assert_not_called() + if kwargs.get("static_pv_create_quotas"): + session.ensure_quota.mock.assert_called_once_with( + volume_id="csi-" + str(uuid.uuid5(uuid.NAMESPACE_DNS, volume_id.rstrip("/"))), + view_path=volume_id.rstrip("/"), tenant_id=1, requested_capacity=0, + ) + else: + session.ensure_quota.mock.assert_not_called() + + def test_static_volume_wrong_tenant(self, vms_session, volume_capabilities): + # Prepare test data + volume_id = "/static/volume/path/" + node_id = "node1" + volume_context = dict(vip_pool_name="vippool-1", view_policy="default", static_pv_create_quotas="yes") + capabilities = volume_capabilities( + fs_type="ext4", mount_flags=["test"], mode=types.AccessModeType.SINGLE_NODE_WRITER + ) + vms_session.get_view = MagicMock(return_value=Bunch(path=volume_id, id=1, tenant_id=5, tenant_name="default")) + vms_session.get_quota = MagicMock(return_value=Bunch(tenant_id=1, hard_limit=999, tenant_name="test")) + cont = CsiController() + + with pytest.raises(Exception) as ex_context: + cont.ControllerPublishVolume(vms_session, node_id, volume_id, capabilities[0], volume_context) + + err = ex_context.value + assert "Volume already exists with different tenancy ownership (test)" in str(err) diff --git a/tests/test_cosi_privisioner.py b/tests/test_cosi_privisioner.py new file mode 100644 index 00000000..63206263 --- /dev/null +++ b/tests/test_cosi_privisioner.py @@ -0,0 +1,134 @@ +import pytest +import grpc +from easypy.bunch import Bunch +from unittest.mock import patch, MagicMock +from vast_csi.server import CosiProvisioner, MissingParameter + + +COMMON_PARAMS = dict( + root_export="/buckets", + vip_pool_name="vippool-1", + view_policy="default", + qos_policy="default", + protocols="nfs, nfs4, smb", + scheme="http", + s3_locks_retention_mode="COMPILANCE", + s3_versioning="true", + s3_locks="true", + locking="true", + s3_locks_retention_period="1d", + default_retention_period="1d", + allow_s3_anonymous_access="true", +) + + +@patch("vast_csi.vms_session.VmsSession.get_vip", MagicMock(return_value="172.0.0.1")) +@patch("vast_csi.vms_session.VmsSession.get_view", MagicMock(return_value=None)) +@patch( + "vast_csi.vms_session.VmsSession.get_view_policy", + MagicMock(return_value=Bunch(id=1, tenant_id=1, tenant_name="default")), +) +@patch( + "vast_csi.vms_session.VmsSession.get_qos_policy", + MagicMock(return_value=Bunch(id=1, tenant_id=1)), +) +class TestCosiProvisionerSuite: + def _create_bucket(self, name, parameters, vms_session): + cosi = CosiProvisioner() + return cosi.DriverCreateBucket(name=name, parameters=parameters, vms_session=vms_session) + + @patch("vast_csi.vms_session.VmsSession.ensure_user") + @patch("vast_csi.vms_session.VmsSession.create_view") + def test_create_bucket(self, m_create_view, m_ensure_user, vms_session): + """Test successful bucket creation""" + # Preparation + cosi = CosiProvisioner() + bucket_name = "test-bucket" + m_create_view.return_value = Bunch(tenant_id=1) + + # Execution + params = COMMON_PARAMS.copy() + res = self._create_bucket(name=bucket_name, parameters=params, vms_session=vms_session) + + # Assertion + assert res.bucket_id == "test-bucket@1@http://172.0.0.1:80" + bucket_id, tenant_id, endpoint = res.bucket_id.split("@") + assert bucket_id == bucket_name + assert tenant_id == "1" + assert endpoint == "http://172.0.0.1:80" + + assert m_create_view.call_args.kwargs == { + "bucket": "test-bucket", + "bucket_owner": "test-bucket", + "path": "/buckets/test-bucket", + "protocols": ["NFS", "NFS4", "SMB", "S3"], + "policy_id": 1, + "tenant_id": 1, + "qos_policy": "default", + "s3_locks_retention_mode": "COMPILANCE", + "s3_versioning": True, + "s3_locks": True, + "locking": True, + "s3_locks_retention_period": "1d", + "default_retention_period": "1d", + "allow_s3_anonymous_access": True, + } + ensure_user_kwargs = m_ensure_user.call_args.kwargs + assert 50000 <= ensure_user_kwargs.pop("uid") <= 60000 + assert ensure_user_kwargs == { + "name": "test-bucket", + "allow_create_bucket": True, + } + + @pytest.mark.parametrize("root_export", ["", "/"]) + @patch("vast_csi.vms_session.VmsSession.ensure_user", MagicMock()) + @patch("vast_csi.vms_session.VmsSession.create_view") + def test_create_bucket_with_root_storage_path(self, m_create_view, root_export, vms_session): + """Test successful bucket creation with root storage path""" + # Preparation + common_params = COMMON_PARAMS.copy() + common_params["root_export"] = root_export + bucket_name = "test-bucket" + + # Execution + res = self._create_bucket(name=bucket_name, parameters=common_params, vms_session=vms_session) + + # Assertion + create_view_kwargs = m_create_view.call_args.kwargs + assert create_view_kwargs["path"] == "/test-bucket" + + @patch("vast_csi.vms_session.VmsSession.ensure_user", MagicMock()) + @patch("vast_csi.vms_session.VmsSession.create_view") + def test_create_bucket_only_required_params(self, m_create_view, vms_session): + params = dict(root_export="/buckets", vip_pool_name="vippool-1") + bucket_name = "test-bucket" + + # Execution + self._create_bucket(name=bucket_name, parameters=params, vms_session=vms_session) + + # Assertion + assert m_create_view.call_args.kwargs == { + "path": "/buckets/test-bucket", + "protocols": ["S3"], + "policy_id": 1, + "bucket": "test-bucket", + "bucket_owner": "test-bucket", + "tenant_id": 1, + } + + @pytest.mark.parametrize("missing_param", ["root_export", "vip_pool_name"]) + def test_create_bucket_missing_required_params(self, missing_param, vms_session): + """Test missing required parameters""" + # Preparation + params = COMMON_PARAMS.copy() + del params[missing_param] + bucket_name = "test-bucket" + + # Execution + with pytest.raises(MissingParameter) as ex_context: + self._create_bucket(name=bucket_name, parameters=params, vms_session=vms_session) + + # Assertion + err = ex_context.value + assert "cannot be empty" in err.message + assert err.code == grpc.StatusCode.INVALID_ARGUMENT diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 00000000..a8f621e1 --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,42 @@ +import pytest +from vast_csi.utils import is_ver_nfs4_present, generate_ip_range + + +@pytest.mark.parametrize( + "options, exepected", + [ + ("vers=4,soft=true,noac", True), + ("nfsvers=4,soft=true,noac", True), + ("vers=4.1,soft=true,noac", True), + ("vers=4.0,soft=true,noac", True), + ("soft=true,vers=4.0,noac", True), + ("soft=true,vers=4.0", True), + ("", False), + ("nfsverss=4,soft=true,noac", False), + ("vers=3,soft=true,noac", False), + ("avers=4,soft=true,noac", False), + ("nfsvers=3.4,soft=true,noac", False), + ("soft=true,vers=3,4noac", False), + ("noac,vers= 4,noac", False), + ("noac,vers = 4,noac", False), + ] +) +def test_parse_nfs4_mount_option(options, exepected): + """Test if nfsvers|vers=4 is parsed properly""" + assert is_ver_nfs4_present(options) == exepected + + +@pytest.mark.parametrize("ip_ranges, expected", [ + ( + [["15.0.0.1", "15.0.0.4"], ["10.0.0.27", "10.0.0.30"]], + ['15.0.0.1', '15.0.0.2', '15.0.0.3', '15.0.0.4', '10.0.0.27', '10.0.0.28', '10.0.0.29', '10.0.0.30'] + ), + ( + [["15.0.0.1", "15.0.0.1"], ["10.0.0.20", "10.0.0.20"]], + ['15.0.0.1', '10.0.0.20'] + ), + ([], []) +]) +def test_generate_ip_range(ip_ranges, expected): + ips = generate_ip_range(ip_ranges) + assert ips == expected diff --git a/tests/test_vms_session.py b/tests/test_vms_session.py new file mode 100644 index 00000000..d7e961da --- /dev/null +++ b/tests/test_vms_session.py @@ -0,0 +1,134 @@ +import pytest +from io import BytesIO +from unittest.mock import patch, PropertyMock, MagicMock +from vast_csi.server import CsiController +from requests import Response, Request, HTTPError +from vast_csi.exceptions import OperationNotSupported, ApiError +from easypy.semver import SemVer + + +@patch("vast_csi.configuration.Config.vms_user", PropertyMock("test")) +@patch("vast_csi.configuration.Config.vms_password", PropertyMock("test")) +@patch("vast_csi.vms_session.VmsSession.refresh_auth_token", MagicMock()) +class TestVmsSessionSuite: + + @pytest.mark.parametrize("cluster_version", [ + "4.3.9", "4.0.11.12", "3.4.6.123.1", "4.5.6-1", "4.6.0", "4.6.0-1", "4.6.0-1.1", "4.6.9" + ]) + def test_requisite_decorator(self, cluster_version, vms_session): + """Test `requisite` decorator produces exception when cluster version doesn't met requirements""" + # Preparation + fake_mgmt = PropertyMock(return_value=SemVer.loads_fuzzy(cluster_version)) + stripped_version = SemVer.loads_fuzzy(cluster_version).dumps() + + def raise_http_err(*args, **kwargs): + resp = Response() + resp.status_code = 404 + resp.raw = BytesIO(b"not found") + req = Request() + req.path_url = "/abc" + raise HTTPError(response=resp, request=req) + + # Execution + with patch("vast_csi.vms_session.VmsSession.sw_version", fake_mgmt): + with pytest.raises(OperationNotSupported) as exc: + vms_session.delete_folder("/abc", 1) + + # Assertion + assert f"Cluster does not support this operation - 'delete_folder'" \ + f" (needs 4.7-0, got {stripped_version})\n current_version = {stripped_version}\n" \ + f" op = delete_folder\n required_version = 4.7-0" in exc.value.render(color=False) + + def test_trash_api_disabled_helm_config(self, vms_session): + """Test trash api disable in helm chart cause Exception""" + # Preparation + vms_session.config.dont_use_trash_api = True + fake_mgmt = PropertyMock(return_value=SemVer.loads_fuzzy("4.7.0")) + + # Execution + with patch("vast_csi.vms_session.VmsSession.sw_version", fake_mgmt): + with pytest.raises(OperationNotSupported) as exc: + vms_session.delete_folder("/abc", 1) + + # Assertion + assert "Cannot delete folder via VMS: Disabled by Vast CSI settings" in exc.value.render(color=False) + + def test_trash_api_disabled_cluster_settings(self, vms_session): + """Test trash api disable on cluster cause Exception""" + # Preparation + vms_session.config.dont_use_trash_api = True + fake_mgmt = PropertyMock(return_value=SemVer.loads_fuzzy("5.0.0.25")) + + def raise_http_err(*args, **kwargs): + resp = Response() + resp.status_code = 400 + resp.raw = BytesIO(b"trash folder disabled") + raise ApiError(response=resp) + + # Execution + with ( + patch("vast_csi.vms_session.VmsSession.sw_version", fake_mgmt), + patch("vast_csi.vms_session.VmsSession.delete", side_effect=raise_http_err), + ): + with pytest.raises(OperationNotSupported) as exc: + vms_session.delete_folder("/abc", 1) + + # Assertion + assert "Cannot delete folder via VMS: Disabled by Vast CSI settings" in exc.value.render(color=False) + + def test_delete_folder_local_mounting_requires_configuration(self, vms_session): + """Test deleting the folder via local mounting requires deletionVipPool and deletionVipPolicy to be provided.""" + # Preparation + cont = CsiController() + vms_session.config.dont_use_trash_api = True + fake_mgmt = PropertyMock(return_value=SemVer.loads_fuzzy("4.6.0")) + + # Execution + with patch("vast_csi.vms_session.VmsSession.sw_version", fake_mgmt): + with pytest.raises(AssertionError) as exc: + cont._delete_data_from_storage(vms_session, "/abc", 1) + + # Assertion + assert "Ensure that deletionViewPolicy is properly configured" in str(exc.value) + + def test_delete_folder_unsuccesful_attempt_cache_result(self, vms_session): + """Test if Trash API has been failed it wont be executed second time.""" + # Preparation + cont = CsiController() + vms_session.config.dont_use_trash_api = False + vms_session.config.avoid_trash_api.reset(-1) + fake_mgmt = PropertyMock(return_value=SemVer.loads_fuzzy("4.7.0")) + + # Execution + def raise_http_err(*args, **kwargs): + resp = Response() + resp.status_code = 400 + resp.raw = BytesIO(b"trash folder disabled") + raise ApiError(response=resp) + + assert vms_session.config.avoid_trash_api.expired + # Execution + with ( + patch("vast_csi.vms_session.VmsSession.sw_version", fake_mgmt), + patch("vast_csi.vms_session.VmsSession.delete", side_effect=raise_http_err) as mocked_request, + ): + with pytest.raises(AssertionError) as exc: + cont._delete_data_from_storage(vms_session, "/abc", 1) + + assert mocked_request.call_count == 1 + assert not vms_session.config.avoid_trash_api.expired + + with pytest.raises(AssertionError) as exc: + cont._delete_data_from_storage(vms_session,"/abc", 1) + + assert mocked_request.call_count == 1 + assert not vms_session.config.avoid_trash_api.expired + + # reset timer. trash API should be executed again + vms_session.config.avoid_trash_api.reset(-1) + + with pytest.raises(AssertionError) as exc: + cont._delete_data_from_storage(vms_session,"/abc", 1) + + assert mocked_request.call_count == 2 + assert not vms_session.config.avoid_trash_api.expired diff --git a/vast_csi/__main__.py b/vast_csi/__main__.py index c6b4dadf..7ef6fa05 100644 --- a/vast_csi/__main__.py +++ b/vast_csi/__main__.py @@ -1,26 +1,12 @@ +import os import sys -import re import argparse -from easypy.colors import C from easypy.bunch import Bunch -from easypy.semver import SemVer - - -IS_INTERACTIVE = sys.stdin.isatty() - - -CSI_SIDECAR_VERSIONS = { - 'csi-provisioner': 'v1.6.0', # min k8s: v1.17 - 'csi-attacher': 'v3.1.0', # min k8s: v1.17 - 'csi-resizer': 'v1.1.0', # min k8s: v1.16 - 'csi-node-driver-registrar': 'v2.0.1', # min k8s: v1.13 -} def main(): parser = argparse.ArgumentParser( - description="Vast CSI Plugin", - usage="docker run -it --net=host -v `pwd`:/out template") + description="Vast CSI Plugin") parser.set_defaults(func=lambda *_, **__: parser.print_help()) subparsers = parser.add_subparsers() @@ -28,25 +14,26 @@ def main(): serve_parse = subparsers.add_parser("serve", help='Start the CSI Plugin Server (not for humans)') serve_parse.set_defaults(func=_serve) - template_parse = subparsers.add_parser("template", help='Generate a kubectl template for deploying this CSI plugin') - for p in "image hostname username password vippool export load-balancing pull-policy mount-options".split(): - template_parse.add_argument("--" + p) - template_parse.set_defaults(func=_template) - info_parse = subparsers.add_parser("info", help='Print versioning information for this CSI plugin') info_parse.add_argument("--output", default="json", choices=['json', 'yaml'], help="Output format") info_parse.set_defaults(func=_info) + info_parse = subparsers.add_parser("system_info", help='Print system information') + info_parse.set_defaults(func=_system_info) + + test_parse = subparsers.add_parser("test", help='Start unit tests') + test_parse.set_defaults(func=_test) + args = parser.parse_args(namespace=Bunch()) args.pop("func")(args) def _info(args): - from . server import Config + from . configuration import Config conf = Config() info = dict( name=conf.plugin_name, version=conf.plugin_version, commit=conf.git_commit, - supported_k8s_versions=open("k8s_supported.txt").read().split() + supported_k8s_versions=open("k8s_supported.txt").read().split(), ) if args.output == "yaml": import yaml @@ -57,128 +44,20 @@ def _info(args): else: assert False, f"invalid output format: {args.output}" +def _system_info(*_): + os.system("cat /etc/os-release") + + +def _test(args): + """Runs the tests without code coverage""" + import pytest + sys.exit(pytest.main(["-x", "tests", "-s", "-v"])) + def _serve(args): from . server import serve return serve() -def _template(args): - try: - fname = "vast-csi-deployment.yaml" - with open(f"/out/{fname}", "w") as file: - generate_deployment(file, **args) - print(C(f"\nWritten to WHITE<<{fname}>>\n")) - print("Inspect the file and then run:") - print(C(f">> CYAN<>\n")) - print(C("YELLOW<>\n")) - except KeyboardInterrupt: - return - - -def generate_deployment( - file, load_balancing=None, pull_policy=None, image=None, hostname=None, - username=None, password=None, vippool=None, export=None, mount_options=None): - - from . utils import RESTSession - from requests import HTTPError, ConnectionError - from base64 import b64encode - from prompt_toolkit.completion import WordCompleter - from prompt_toolkit.shortcuts import prompt as _prompt - from prompt_toolkit.styles import Style - - style = Style.from_dict({'': '#AAAABB', 'prompt': '#ffffff'}) - context = Bunch() - - def prompt(arg, message, **kwargs): - if not IS_INTERACTIVE: - raise Exception(f"Missing argument: {arg}") - return _prompt([('class:prompt', message)], style=style, **kwargs) - - print(C("\n\nWHITE<>\n\n")) - - context.IMAGE_NAME = image or prompt("image", "Name of this Docker Image: ") - - context.LB_STRATEGY = "roundrobin" - # opts = ['random', 'roundrobin'] - # context.LB_STRATEGY = prompt( - # "load_balancing" - # f"Load-Balancing Strategy ({'|'.join(opts)}): ", default="random", completer=WordCompleter(opts)) - - opts = ['Never', 'Always', 'IfNotPresent', 'Auto'] - context.PULL_POLICY = pull_policy or prompt( - "pull_policy", - f"Image Pull Policy ({'|'.join(opts)}): ", default="IfNotPresent", completer=WordCompleter(opts)) - - if context.PULL_POLICY.lower() == 'auto': - context.PULL_POLICY = 'null' - - exports = vippools = [] - while True: - context.VMS_HOST = hostname or prompt("hostname", "Vast Management hostname: ", default="vms") - username = username or prompt("username", "Vast Management username: ", default="admin") - password = password or prompt("password", "Vast Management password: ", is_password=True) - - context.DISABLE_SSL = '"false"' - ssl_verify = context.DISABLE_SSL != '"false"' - if not ssl_verify: - import urllib3 - urllib3.disable_warnings() - - vms = RESTSession( - base_url=f"https://{context.VMS_HOST}/api", - auth=(username, password), - ssl_verify=ssl_verify) - - try: - versions = vms.versions() - except (ConnectionError, HTTPError) as exc: - print(C(f"YELLOW<>: {exc}")) - if IS_INTERACTIVE and not prompt(None, "Hit (y) to ignore, any other key to retry: "): - continue - else: - break - else: - vippools = sorted(p.name for p in vms.vippools()) - latest_ver = max(versions, key=lambda v: v.created) - version = SemVer.loads(latest_ver.sys_version or "3.0.0") # in QA this is sometimes empty - if version >= SemVer(3, 4): - exports = sorted({(v.alias or v.path) for v in vms.views() if "NFS" in v.protocols}) - else: - print(C("RED<>"), "VMS Version:", version) - print(C("This plugin supports WHITE<>")) - raise SystemExit(5) - - print() - print(C("GREEN<>"), "VMS Version:", version) - print(" - VIP Pools:", ", ".join(vippools or ["(none)"])) - print(" - Exports:", ", ".join(exports or ["(none)"])) - print() - break - - context.VIP_POOL_NAME = vippool or prompt( - "vippool", - "Virtual IP Pool Name: ", default="vippool-1", - completer=WordCompleter(vippools), complete_while_typing=True) - - context.NFS_EXPORT = export or prompt( - "export", - "NFS Export Path: ", default="/k8s", - completer=WordCompleter(exports), complete_while_typing=True) - - context.MOUNT_OPTIONS = prompt( - "mount_options", - "Additional Mount Options: ", default="" - ) if mount_options is None else mount_options - - context.B64_USERNAME = b64encode(username.encode("utf8")).decode("utf8") - context.B64_PASSWORD = b64encode(password.encode("utf8")).decode("utf8") - - context.update(CSI_SIDECAR_VERSIONS) - - template = open("vast-csi.yaml").read() - print(re.sub("#.*", "", template.format(**context)).strip(), file=file) - - if __name__ == '__main__': main() diff --git a/vast_csi/configuration.py b/vast_csi/configuration.py new file mode 100644 index 00000000..bfc44ec2 --- /dev/null +++ b/vast_csi/configuration.py @@ -0,0 +1,96 @@ +import socket + +from plumbum import local +from plumbum.typed_env import TypedEnv + +from easypy.tokens import ( + Token, + CONTROLLER_AND_NODE, + CONTROLLER, + NODE, + COSI_PLUGIN +) +from .exceptions import LookupFieldError + +from easypy.caching import cached_property +from easypy.timing import Timer +from easypy.units import HOUR + + +class Config(TypedEnv): + class Path(TypedEnv.Str): + convert = staticmethod(local.path) + + vms_credentials_store = local.path("/opt/vms-auth") + plugin_name, plugin_version, git_commit, ci_pipe = ( + open("version.info").read().strip().split() + ) + plugin_name = TypedEnv.Str("X_CSI_PLUGIN_NAME", default=plugin_name) + + controller_root_mount = Path( + "X_CSI_CTRL_ROOT_MOUNT", default=local.path("/csi-volumes") + ) + mock_vast = TypedEnv.Bool("X_CSI_MOCK_VAST", default=False) + nfs_server = TypedEnv.Str("X_CSI_NFS_SERVER", default="127.0.0.1") + deletion_vip_pool = TypedEnv.Str("X_CSI_DELETION_VIP_POOL_NAME", default="k8s") + deletion_view_policy = TypedEnv.Str("X_CSI_DELETION_VIEW_POLICY", default="") + sanity_test_nfs_export = Path("X_CSI_NFS_EXPORT", default=local.path("/k8s")) + + log_level = TypedEnv.Str("X_CSI_LOG_LEVEL", default="info") + csi_sanity_test = TypedEnv.Bool("X_CSI_SANITY_TEST", default=False) + node_id = TypedEnv.Str("X_CSI_NODE_ID", default=socket.getfqdn()) + + vms_host = TypedEnv.Str("X_CSI_VMS_HOST", default="vast") + ssl_verify = TypedEnv.Bool("X_CSI_ENABLE_VMS_SSL_VERIFICATION", default=False) + truncate_volume_name = TypedEnv.Int("X_CSI_TRUNCATE_VOLUME_NAME", default=None) + worker_threads = TypedEnv.Int("X_CSI_WORKER_THREADS", default=10) + dont_use_trash_api = TypedEnv.Bool("X_CSI_DONT_USE_TRASH_API", default=False) + use_local_ip_for_mount = TypedEnv.Str("X_CSI_USE_LOCALIP_FOR_MOUNT", default="") + attach_required = TypedEnv.Bool("X_CSI_ATTACH_REQUIRED", default=True) + + _mode = TypedEnv.Str("X_CSI_MODE", default="controller_and_node") + _endpoint = TypedEnv.Str("CSI_ENDPOINT", default="unix:///var/run/csi.sock") + _mount_options = TypedEnv.Str("X_CSI_MOUNT_OPTIONS", default="") # For example: "port=2049,nolock,vers=3" + name_fmt = "csi:{namespace}:{name}:{id}" + + fake_quota_store = local.path("/tmp/volumes") + fake_snapshot_store = local.path("/tmp/snapshots") + + timeout = TypedEnv.Int("X_CSI_VMS_TIMEOUT", default=30) + + @cached_property + def vms_user(self): + if not self.vms_credentials_store['username'].exists(): + raise LookupFieldError( + field="username", + tip="Make sure username is present in global VMS credentials secret" + ) + return self.vms_credentials_store['username'].read().strip() + + @cached_property + def vms_password(self): + if not self.vms_credentials_store['password'].exists(): + raise LookupFieldError( + field="password", + tip="Make sure password is present in global VMS credentials secret" + ) + return self.vms_credentials_store['password'].read().strip() + + @property + def mount_options(self): + s = self._mount_options.strip() + return list({p for p in s.split(',') if p}) + + unmount_attempts = TypedEnv.Int("X_CSI_UNMOUNT_ATTEMPTS", default=10) + + @property + def mode(self): + mode = Token(self._mode.upper()) + assert mode in {CONTROLLER_AND_NODE, CONTROLLER, NODE, COSI_PLUGIN}, f"invalid mode: {mode}" + return mode + + @property + def endpoint(self): + return self._endpoint.strip("tcp://") + + avoid_trash_api = Timer(now=-1, expiration=HOUR) diff --git a/vast_csi/csi_types.py b/vast_csi/csi_types.py index 7f3158c6..5d64ca10 100644 --- a/vast_csi/csi_types.py +++ b/vast_csi/csi_types.py @@ -1,8 +1,11 @@ from __future__ import absolute_import +import grpc +from google.protobuf.timestamp_pb2 import Timestamp from google.protobuf import wrappers_pb2 as wrappers -from . import csi_pb2 +from .proto import csi_pb2 +from .proto import cosi_pb2 class EnumWrapper(object): @@ -42,7 +45,11 @@ def __getattr__(self, name): CtrlExpandResp = csi_pb2.ControllerExpandVolumeResponse CapabilitiesResp = csi_pb2.GetPluginCapabilitiesResponse -AccessModeType = EnumWrapper(csi_pb2.VolumeCapability.AccessMode.Mode) + +VolumeCapability = csi_pb2.VolumeCapability +MountVolume = VolumeCapability.MountVolume +AccessMode = VolumeCapability.AccessMode +AccessModeType = EnumWrapper(AccessMode.Mode) StageResp = csi_pb2.NodeStageVolumeResponse UnstageResp = csi_pb2.NodeUnstageVolumeResponse @@ -70,3 +77,23 @@ def __getattr__(self, name): UsageUnit = EnumWrapper(VolumeUsage.Unit) Topology = csi_pb2.Topology + +# COSI types +DriverGetInfoResp = cosi_pb2.DriverGetInfoResponse +DriverCreateBucketResp = cosi_pb2.DriverCreateBucketResponse +DriverGrantBucketAccessResp = cosi_pb2.DriverGrantBucketAccessResponse +DriverRevokeBucketAccessResp = cosi_pb2.DriverRevokeBucketAccessResponse +DriverDeleteBucketResp = cosi_pb2.DriverDeleteBucketResponse +Protocol = cosi_pb2.Protocol +S3 = cosi_pb2.S3 +S3SignatureVersion = cosi_pb2.S3SignatureVersion +CredentialDetails = cosi_pb2.CredentialDetails + +# gRPC statuses +FAILED_PRECONDITION = grpc.StatusCode.FAILED_PRECONDITION +INVALID_ARGUMENT = grpc.StatusCode.INVALID_ARGUMENT +ALREADY_EXISTS = grpc.StatusCode.ALREADY_EXISTS +NOT_FOUND = grpc.StatusCode.NOT_FOUND +ABORTED = grpc.StatusCode.ABORTED +UNKNOWN = grpc.StatusCode.UNKNOWN +OUT_OF_RANGE = grpc.StatusCode.OUT_OF_RANGE diff --git a/vast_csi/exceptions.py b/vast_csi/exceptions.py new file mode 100644 index 00000000..b921b282 --- /dev/null +++ b/vast_csi/exceptions.py @@ -0,0 +1,60 @@ +import grpc +from easypy.exceptions import TException + + +class Abort(Exception): + @property + def code(self): + return self.args[0] + + @property + def message(self): + return self.args[1] + + +class ApiError(TException): + template = "HTTP {response.status_code}: {response.text}" + + +class OperationNotSupported(TException): + template = "Cluster does not support this operation - {op!r} (needs {required_version}, got {current_version})" + + +class LookupFieldError(TException): + template = "Could not find {field}." + + +class MissingParameter(Abort): + def __init__(self, param: str): + self.param = param + + @property + def code(self): + return grpc.StatusCode.INVALID_ARGUMENT + + @property + def message(self): + return ( + f"Parameter {self.param!r} cannot be empty string or None." + f" Please provide a valid value for this parameter " + f"in the parameters section of StorageClass" + ) + + +class MountFailed(TException): + template = "Mounting {src} failed" + + +class BuilderFailed(Exception): + + @property + def message(self): + return self.args[0] + + +class SourceNotFound(BuilderFailed): + pass + + +class VolumeAlreadyExists(BuilderFailed): + pass diff --git a/vast_csi/proto/__init__.py b/vast_csi/proto/__init__.py new file mode 100644 index 00000000..96559726 --- /dev/null +++ b/vast_csi/proto/__init__.py @@ -0,0 +1 @@ +from . import csi_pb2, csi_pb2_grpc, cosi_pb2, cosi_pb2_grpc diff --git a/vast_csi/proto/cosi_pb2.py b/vast_csi/proto/cosi_pb2.py new file mode 100644 index 00000000..21858bb8 --- /dev/null +++ b/vast_csi/proto/cosi_pb2.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: cosi.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\ncosi.proto\x12\rcosi.v1alpha1\x1a google/protobuf/descriptor.proto\"R\n\x02S3\x12\x0e\n\x06region\x18\x01 \x01(\t\x12<\n\x11signature_version\x18\x02 \x01(\x0e\x32!.cosi.v1alpha1.S3SignatureVersion\"$\n\tAzureBlob\x12\x17\n\x0fstorage_account\x18\x01 \x01(\t\"L\n\x03GCS\x12\x18\n\x10private_key_name\x18\x01 \x01(\t\x12\x12\n\nproject_id\x18\x02 \x01(\t\x12\x17\n\x0fservice_account\x18\x03 \x01(\t\"\x85\x01\n\x08Protocol\x12\x1f\n\x02s3\x18\x01 \x01(\x0b\x32\x11.cosi.v1alpha1.S3H\x00\x12-\n\tazureBlob\x18\x02 \x01(\x0b\x32\x18.cosi.v1alpha1.AzureBlobH\x00\x12!\n\x03gcs\x18\x03 \x01(\x0b\x32\x12.cosi.v1alpha1.GCSH\x00\x42\x06\n\x04type\"\x83\x01\n\x11\x43redentialDetails\x12>\n\x07secrets\x18\x01 \x03(\x0b\x32-.cosi.v1alpha1.CredentialDetails.SecretsEntry\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x16\n\x14\x44riverGetInfoRequest\"%\n\x15\x44riverGetInfoResponse\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xaa\x01\n\x19\x44riverCreateBucketRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12L\n\nparameters\x18\x02 \x03(\x0b\x32\x38.cosi.v1alpha1.DriverCreateBucketRequest.ParametersEntry\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"]\n\x1a\x44riverCreateBucketResponse\x12\x11\n\tbucket_id\x18\x01 \x01(\t\x12,\n\x0b\x62ucket_info\x18\x02 \x01(\x0b\x32\x17.cosi.v1alpha1.Protocol\"\xb9\x01\n\x19\x44riverDeleteBucketRequest\x12\x11\n\tbucket_id\x18\x01 \x01(\t\x12S\n\x0e\x64\x65lete_context\x18\x02 \x03(\x0b\x32;.cosi.v1alpha1.DriverDeleteBucketRequest.DeleteContextEntry\x1a\x34\n\x12\x44\x65leteContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x1c\n\x1a\x44riverDeleteBucketResponse\"\x87\x02\n\x1e\x44riverGrantBucketAccessRequest\x12\x11\n\tbucket_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12>\n\x13\x61uthentication_type\x18\x03 \x01(\x0e\x32!.cosi.v1alpha1.AuthenticationType\x12Q\n\nparameters\x18\x04 \x03(\x0b\x32=.cosi.v1alpha1.DriverGrantBucketAccessRequest.ParametersEntry\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe1\x01\n\x1f\x44riverGrantBucketAccessResponse\x12\x12\n\naccount_id\x18\x01 \x01(\t\x12T\n\x0b\x63redentials\x18\x02 \x03(\x0b\x32?.cosi.v1alpha1.DriverGrantBucketAccessResponse.CredentialsEntry\x1aT\n\x10\x43redentialsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12/\n\x05value\x18\x02 \x01(\x0b\x32 .cosi.v1alpha1.CredentialDetails:\x02\x38\x01\"\xec\x01\n\x1f\x44riverRevokeBucketAccessRequest\x12\x11\n\tbucket_id\x18\x01 \x01(\t\x12\x12\n\naccount_id\x18\x02 \x01(\t\x12\x66\n\x15revoke_access_context\x18\x03 \x03(\x0b\x32G.cosi.v1alpha1.DriverRevokeBucketAccessRequest.RevokeAccessContextEntry\x1a:\n\x18RevokeAccessContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\"\n DriverRevokeBucketAccessResponse*>\n\x12S3SignatureVersion\x12\x14\n\x10UnknownSignature\x10\x00\x12\x08\n\x04S3V2\x10\x01\x12\x08\n\x04S3V4\x10\x02*q\n\x19\x41nonymousBucketAccessMode\x12\x1b\n\x17UnknownBucketAccessMode\x10\x00\x12\x0b\n\x07Private\x10\x01\x12\x0c\n\x08ReadOnly\x10\x02\x12\r\n\tWriteOnly\x10\x03\x12\r\n\tReadWrite\x10\x04*E\n\x12\x41uthenticationType\x12\x1d\n\x19UnknownAuthenticationType\x10\x00\x12\x07\n\x03Key\x10\x01\x12\x07\n\x03IAM\x10\x02\x32h\n\x08Identity\x12\\\n\rDriverGetInfo\x12#.cosi.v1alpha1.DriverGetInfoRequest\x1a$.cosi.v1alpha1.DriverGetInfoResponse\"\x00\x32\xde\x03\n\x0bProvisioner\x12k\n\x12\x44riverCreateBucket\x12(.cosi.v1alpha1.DriverCreateBucketRequest\x1a).cosi.v1alpha1.DriverCreateBucketResponse\"\x00\x12k\n\x12\x44riverDeleteBucket\x12(.cosi.v1alpha1.DriverDeleteBucketRequest\x1a).cosi.v1alpha1.DriverDeleteBucketResponse\"\x00\x12x\n\x17\x44riverGrantBucketAccess\x12-.cosi.v1alpha1.DriverGrantBucketAccessRequest\x1a..cosi.v1alpha1.DriverGrantBucketAccessResponse\x12{\n\x18\x44riverRevokeBucketAccess\x12..cosi.v1alpha1.DriverRevokeBucketAccessRequest\x1a/.cosi.v1alpha1.DriverRevokeBucketAccessResponse:1\n\nalpha_enum\x12\x1c.google.protobuf.EnumOptions\x18\xdc\x08 \x01(\x08:<\n\x10\x61lpha_enum_value\x12!.google.protobuf.EnumValueOptions\x18\xdc\x08 \x01(\x08:3\n\x0b\x63osi_secret\x12\x1d.google.protobuf.FieldOptions\x18\xdb\x08 \x01(\x08:3\n\x0b\x61lpha_field\x12\x1d.google.protobuf.FieldOptions\x18\xdc\x08 \x01(\x08:7\n\ralpha_message\x12\x1f.google.protobuf.MessageOptions\x18\xdc\x08 \x01(\x08:5\n\x0c\x61lpha_method\x12\x1e.google.protobuf.MethodOptions\x18\xdc\x08 \x01(\x08:7\n\ralpha_service\x12\x1f.google.protobuf.ServiceOptions\x18\xdc\x08 \x01(\x08\x42:Z8sigs.k8s.io/container-object-storage-interface-spec;cosib\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'cosi_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + google_dot_protobuf_dot_descriptor__pb2.EnumOptions.RegisterExtension(alpha_enum) + google_dot_protobuf_dot_descriptor__pb2.EnumValueOptions.RegisterExtension(alpha_enum_value) + google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(cosi_secret) + google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(alpha_field) + google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(alpha_message) + google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(alpha_method) + google_dot_protobuf_dot_descriptor__pb2.ServiceOptions.RegisterExtension(alpha_service) + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'Z8sigs.k8s.io/container-object-storage-interface-spec;cosi' + _CREDENTIALDETAILS_SECRETSENTRY._options = None + _CREDENTIALDETAILS_SECRETSENTRY._serialized_options = b'8\001' + _DRIVERCREATEBUCKETREQUEST_PARAMETERSENTRY._options = None + _DRIVERCREATEBUCKETREQUEST_PARAMETERSENTRY._serialized_options = b'8\001' + _DRIVERDELETEBUCKETREQUEST_DELETECONTEXTENTRY._options = None + _DRIVERDELETEBUCKETREQUEST_DELETECONTEXTENTRY._serialized_options = b'8\001' + _DRIVERGRANTBUCKETACCESSREQUEST_PARAMETERSENTRY._options = None + _DRIVERGRANTBUCKETACCESSREQUEST_PARAMETERSENTRY._serialized_options = b'8\001' + _DRIVERGRANTBUCKETACCESSRESPONSE_CREDENTIALSENTRY._options = None + _DRIVERGRANTBUCKETACCESSRESPONSE_CREDENTIALSENTRY._serialized_options = b'8\001' + _DRIVERREVOKEBUCKETACCESSREQUEST_REVOKEACCESSCONTEXTENTRY._options = None + _DRIVERREVOKEBUCKETACCESSREQUEST_REVOKEACCESSCONTEXTENTRY._serialized_options = b'8\001' + _S3SIGNATUREVERSION._serialized_start=1851 + _S3SIGNATUREVERSION._serialized_end=1913 + _ANONYMOUSBUCKETACCESSMODE._serialized_start=1915 + _ANONYMOUSBUCKETACCESSMODE._serialized_end=2028 + _AUTHENTICATIONTYPE._serialized_start=2030 + _AUTHENTICATIONTYPE._serialized_end=2099 + _S3._serialized_start=63 + _S3._serialized_end=145 + _AZUREBLOB._serialized_start=147 + _AZUREBLOB._serialized_end=183 + _GCS._serialized_start=185 + _GCS._serialized_end=261 + _PROTOCOL._serialized_start=264 + _PROTOCOL._serialized_end=397 + _CREDENTIALDETAILS._serialized_start=400 + _CREDENTIALDETAILS._serialized_end=531 + _CREDENTIALDETAILS_SECRETSENTRY._serialized_start=485 + _CREDENTIALDETAILS_SECRETSENTRY._serialized_end=531 + _DRIVERGETINFOREQUEST._serialized_start=533 + _DRIVERGETINFOREQUEST._serialized_end=555 + _DRIVERGETINFORESPONSE._serialized_start=557 + _DRIVERGETINFORESPONSE._serialized_end=594 + _DRIVERCREATEBUCKETREQUEST._serialized_start=597 + _DRIVERCREATEBUCKETREQUEST._serialized_end=767 + _DRIVERCREATEBUCKETREQUEST_PARAMETERSENTRY._serialized_start=718 + _DRIVERCREATEBUCKETREQUEST_PARAMETERSENTRY._serialized_end=767 + _DRIVERCREATEBUCKETRESPONSE._serialized_start=769 + _DRIVERCREATEBUCKETRESPONSE._serialized_end=862 + _DRIVERDELETEBUCKETREQUEST._serialized_start=865 + _DRIVERDELETEBUCKETREQUEST._serialized_end=1050 + _DRIVERDELETEBUCKETREQUEST_DELETECONTEXTENTRY._serialized_start=998 + _DRIVERDELETEBUCKETREQUEST_DELETECONTEXTENTRY._serialized_end=1050 + _DRIVERDELETEBUCKETRESPONSE._serialized_start=1052 + _DRIVERDELETEBUCKETRESPONSE._serialized_end=1080 + _DRIVERGRANTBUCKETACCESSREQUEST._serialized_start=1083 + _DRIVERGRANTBUCKETACCESSREQUEST._serialized_end=1346 + _DRIVERGRANTBUCKETACCESSREQUEST_PARAMETERSENTRY._serialized_start=718 + _DRIVERGRANTBUCKETACCESSREQUEST_PARAMETERSENTRY._serialized_end=767 + _DRIVERGRANTBUCKETACCESSRESPONSE._serialized_start=1349 + _DRIVERGRANTBUCKETACCESSRESPONSE._serialized_end=1574 + _DRIVERGRANTBUCKETACCESSRESPONSE_CREDENTIALSENTRY._serialized_start=1490 + _DRIVERGRANTBUCKETACCESSRESPONSE_CREDENTIALSENTRY._serialized_end=1574 + _DRIVERREVOKEBUCKETACCESSREQUEST._serialized_start=1577 + _DRIVERREVOKEBUCKETACCESSREQUEST._serialized_end=1813 + _DRIVERREVOKEBUCKETACCESSREQUEST_REVOKEACCESSCONTEXTENTRY._serialized_start=1755 + _DRIVERREVOKEBUCKETACCESSREQUEST_REVOKEACCESSCONTEXTENTRY._serialized_end=1813 + _DRIVERREVOKEBUCKETACCESSRESPONSE._serialized_start=1815 + _DRIVERREVOKEBUCKETACCESSRESPONSE._serialized_end=1849 + _IDENTITY._serialized_start=2101 + _IDENTITY._serialized_end=2205 + _PROVISIONER._serialized_start=2208 + _PROVISIONER._serialized_end=2686 +# @@protoc_insertion_point(module_scope) diff --git a/vast_csi/proto/cosi_pb2_grpc.py b/vast_csi/proto/cosi_pb2_grpc.py new file mode 100644 index 00000000..6776265a --- /dev/null +++ b/vast_csi/proto/cosi_pb2_grpc.py @@ -0,0 +1,237 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from . import cosi_pb2 as cosi__pb2 + + +class IdentityStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.DriverGetInfo = channel.unary_unary( + '/cosi.v1alpha1.Identity/DriverGetInfo', + request_serializer=cosi__pb2.DriverGetInfoRequest.SerializeToString, + response_deserializer=cosi__pb2.DriverGetInfoResponse.FromString, + ) + + +class IdentityServicer(object): + """Missing associated documentation comment in .proto file.""" + + def DriverGetInfo(self, request, context): + """This call is meant to retrieve the unique provisioner Identity. + This identity will have to be set in BucketClaim.DriverName field in order to invoke this specific provisioner. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_IdentityServicer_to_server(servicer, server): + rpc_method_handlers = { + 'DriverGetInfo': grpc.unary_unary_rpc_method_handler( + servicer.DriverGetInfo, + request_deserializer=cosi__pb2.DriverGetInfoRequest.FromString, + response_serializer=cosi__pb2.DriverGetInfoResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'cosi.v1alpha1.Identity', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class Identity(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def DriverGetInfo(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/cosi.v1alpha1.Identity/DriverGetInfo', + cosi__pb2.DriverGetInfoRequest.SerializeToString, + cosi__pb2.DriverGetInfoResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + +class ProvisionerStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.DriverCreateBucket = channel.unary_unary( + '/cosi.v1alpha1.Provisioner/DriverCreateBucket', + request_serializer=cosi__pb2.DriverCreateBucketRequest.SerializeToString, + response_deserializer=cosi__pb2.DriverCreateBucketResponse.FromString, + ) + self.DriverDeleteBucket = channel.unary_unary( + '/cosi.v1alpha1.Provisioner/DriverDeleteBucket', + request_serializer=cosi__pb2.DriverDeleteBucketRequest.SerializeToString, + response_deserializer=cosi__pb2.DriverDeleteBucketResponse.FromString, + ) + self.DriverGrantBucketAccess = channel.unary_unary( + '/cosi.v1alpha1.Provisioner/DriverGrantBucketAccess', + request_serializer=cosi__pb2.DriverGrantBucketAccessRequest.SerializeToString, + response_deserializer=cosi__pb2.DriverGrantBucketAccessResponse.FromString, + ) + self.DriverRevokeBucketAccess = channel.unary_unary( + '/cosi.v1alpha1.Provisioner/DriverRevokeBucketAccess', + request_serializer=cosi__pb2.DriverRevokeBucketAccessRequest.SerializeToString, + response_deserializer=cosi__pb2.DriverRevokeBucketAccessResponse.FromString, + ) + + +class ProvisionerServicer(object): + """Missing associated documentation comment in .proto file.""" + + def DriverCreateBucket(self, request, context): + """This call is made to create the bucket in the backend. + This call is idempotent + 1. If a bucket that matches both name and parameters already exists, then OK (success) must be returned. + 2. If a bucket by same name, but different parameters is provided, then the appropriate error code ALREADY_EXISTS must be returned. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DriverDeleteBucket(self, request, context): + """This call is made to delete the bucket in the backend. + If the bucket has already been deleted, then no error should be returned. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DriverGrantBucketAccess(self, request, context): + """This call grants access to an account. The account_name in the request shall be used as a unique identifier to create credentials. + The account_id returned in the response will be used as the unique identifier for deleting this access when calling DriverRevokeBucketAccess. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DriverRevokeBucketAccess(self, request, context): + """This call revokes all access to a particular bucket from a principal. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_ProvisionerServicer_to_server(servicer, server): + rpc_method_handlers = { + 'DriverCreateBucket': grpc.unary_unary_rpc_method_handler( + servicer.DriverCreateBucket, + request_deserializer=cosi__pb2.DriverCreateBucketRequest.FromString, + response_serializer=cosi__pb2.DriverCreateBucketResponse.SerializeToString, + ), + 'DriverDeleteBucket': grpc.unary_unary_rpc_method_handler( + servicer.DriverDeleteBucket, + request_deserializer=cosi__pb2.DriverDeleteBucketRequest.FromString, + response_serializer=cosi__pb2.DriverDeleteBucketResponse.SerializeToString, + ), + 'DriverGrantBucketAccess': grpc.unary_unary_rpc_method_handler( + servicer.DriverGrantBucketAccess, + request_deserializer=cosi__pb2.DriverGrantBucketAccessRequest.FromString, + response_serializer=cosi__pb2.DriverGrantBucketAccessResponse.SerializeToString, + ), + 'DriverRevokeBucketAccess': grpc.unary_unary_rpc_method_handler( + servicer.DriverRevokeBucketAccess, + request_deserializer=cosi__pb2.DriverRevokeBucketAccessRequest.FromString, + response_serializer=cosi__pb2.DriverRevokeBucketAccessResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'cosi.v1alpha1.Provisioner', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class Provisioner(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def DriverCreateBucket(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/cosi.v1alpha1.Provisioner/DriverCreateBucket', + cosi__pb2.DriverCreateBucketRequest.SerializeToString, + cosi__pb2.DriverCreateBucketResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def DriverDeleteBucket(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/cosi.v1alpha1.Provisioner/DriverDeleteBucket', + cosi__pb2.DriverDeleteBucketRequest.SerializeToString, + cosi__pb2.DriverDeleteBucketResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def DriverGrantBucketAccess(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/cosi.v1alpha1.Provisioner/DriverGrantBucketAccess', + cosi__pb2.DriverGrantBucketAccessRequest.SerializeToString, + cosi__pb2.DriverGrantBucketAccessResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def DriverRevokeBucketAccess(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/cosi.v1alpha1.Provisioner/DriverRevokeBucketAccess', + cosi__pb2.DriverRevokeBucketAccessRequest.SerializeToString, + cosi__pb2.DriverRevokeBucketAccessResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/vast_csi/proto/csi_pb2.py b/vast_csi/proto/csi_pb2.py new file mode 100644 index 00000000..f1e9a7cd --- /dev/null +++ b/vast_csi/proto/csi_pb2.py @@ -0,0 +1,486 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: csi.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\tcsi.proto\x12\x06\x63si.v1\x1a google/protobuf/descriptor.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\"\x16\n\x14GetPluginInfoRequest\"\xad\x01\n\x15GetPluginInfoResponse\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x16\n\x0evendor_version\x18\x02 \x01(\t\x12=\n\x08manifest\x18\x03 \x03(\x0b\x32+.csi.v1.GetPluginInfoResponse.ManifestEntry\x1a/\n\rManifestEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x1e\n\x1cGetPluginCapabilitiesRequest\"O\n\x1dGetPluginCapabilitiesResponse\x12.\n\x0c\x63\x61pabilities\x18\x01 \x03(\x0b\x32\x18.csi.v1.PluginCapability\"\xca\x03\n\x10PluginCapability\x12\x33\n\x07service\x18\x01 \x01(\x0b\x32 .csi.v1.PluginCapability.ServiceH\x00\x12\x44\n\x10volume_expansion\x18\x02 \x01(\x0b\x32(.csi.v1.PluginCapability.VolumeExpansionH\x00\x1a\xb4\x01\n\x07Service\x12\x33\n\x04type\x18\x01 \x01(\x0e\x32%.csi.v1.PluginCapability.Service.Type\"t\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x16\n\x12\x43ONTROLLER_SERVICE\x10\x01\x12$\n VOLUME_ACCESSIBILITY_CONSTRAINTS\x10\x02\x12!\n\x18GROUP_CONTROLLER_SERVICE\x10\x03\x1a\x03\xa0\x42\x01\x1a|\n\x0fVolumeExpansion\x12;\n\x04type\x18\x01 \x01(\x0e\x32-.csi.v1.PluginCapability.VolumeExpansion.Type\",\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06ONLINE\x10\x01\x12\x0b\n\x07OFFLINE\x10\x02\x42\x06\n\x04type\"\x0e\n\x0cProbeRequest\":\n\rProbeResponse\x12)\n\x05ready\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\"\xf9\x04\n\x13\x43reateVolumeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x0e\x63\x61pacity_range\x18\x02 \x01(\x0b\x32\x15.csi.v1.CapacityRange\x12\x35\n\x13volume_capabilities\x18\x03 \x03(\x0b\x32\x18.csi.v1.VolumeCapability\x12?\n\nparameters\x18\x04 \x03(\x0b\x32+.csi.v1.CreateVolumeRequest.ParametersEntry\x12>\n\x07secrets\x18\x05 \x03(\x0b\x32(.csi.v1.CreateVolumeRequest.SecretsEntryB\x03\x98\x42\x01\x12:\n\x15volume_content_source\x18\x06 \x01(\x0b\x32\x1b.csi.v1.VolumeContentSource\x12?\n\x1a\x61\x63\x63\x65ssibility_requirements\x18\x07 \x01(\x0b\x32\x1b.csi.v1.TopologyRequirement\x12S\n\x12mutable_parameters\x18\x08 \x03(\x0b\x32\x32.csi.v1.CreateVolumeRequest.MutableParametersEntryB\x03\xa0\x42\x01\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x38\n\x16MutableParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe3\x01\n\x13VolumeContentSource\x12>\n\x08snapshot\x18\x01 \x01(\x0b\x32*.csi.v1.VolumeContentSource.SnapshotSourceH\x00\x12:\n\x06volume\x18\x02 \x01(\x0b\x32(.csi.v1.VolumeContentSource.VolumeSourceH\x00\x1a%\n\x0eSnapshotSource\x12\x13\n\x0bsnapshot_id\x18\x01 \x01(\t\x1a!\n\x0cVolumeSource\x12\x11\n\tvolume_id\x18\x01 \x01(\tB\x06\n\x04type\"6\n\x14\x43reateVolumeResponse\x12\x1e\n\x06volume\x18\x01 \x01(\x0b\x32\x0e.csi.v1.Volume\"\xd9\x04\n\x10VolumeCapability\x12\x35\n\x05\x62lock\x18\x01 \x01(\x0b\x32$.csi.v1.VolumeCapability.BlockVolumeH\x00\x12\x35\n\x05mount\x18\x02 \x01(\x0b\x32$.csi.v1.VolumeCapability.MountVolumeH\x00\x12\x38\n\x0b\x61\x63\x63\x65ss_mode\x18\x03 \x01(\x0b\x32#.csi.v1.VolumeCapability.AccessMode\x1a\r\n\x0b\x42lockVolume\x1aO\n\x0bMountVolume\x12\x0f\n\x07\x66s_type\x18\x01 \x01(\t\x12\x13\n\x0bmount_flags\x18\x02 \x03(\t\x12\x1a\n\x12volume_mount_group\x18\x03 \x01(\t\x1a\xad\x02\n\nAccessMode\x12\x36\n\x04mode\x18\x01 \x01(\x0e\x32(.csi.v1.VolumeCapability.AccessMode.Mode\"\xe6\x01\n\x04Mode\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x16\n\x12SINGLE_NODE_WRITER\x10\x01\x12\x1b\n\x17SINGLE_NODE_READER_ONLY\x10\x02\x12\x1a\n\x16MULTI_NODE_READER_ONLY\x10\x03\x12\x1c\n\x18MULTI_NODE_SINGLE_WRITER\x10\x04\x12\x1b\n\x17MULTI_NODE_MULTI_WRITER\x10\x05\x12\"\n\x19SINGLE_NODE_SINGLE_WRITER\x10\x06\x1a\x03\xa0\x42\x01\x12!\n\x18SINGLE_NODE_MULTI_WRITER\x10\x07\x1a\x03\xa0\x42\x01\x42\r\n\x0b\x61\x63\x63\x65ss_type\"<\n\rCapacityRange\x12\x16\n\x0erequired_bytes\x18\x01 \x01(\x03\x12\x13\n\x0blimit_bytes\x18\x02 \x01(\x03\"\x88\x02\n\x06Volume\x12\x16\n\x0e\x63\x61pacity_bytes\x18\x01 \x01(\x03\x12\x11\n\tvolume_id\x18\x02 \x01(\t\x12\x39\n\x0evolume_context\x18\x03 \x03(\x0b\x32!.csi.v1.Volume.VolumeContextEntry\x12\x33\n\x0e\x63ontent_source\x18\x04 \x01(\x0b\x32\x1b.csi.v1.VolumeContentSource\x12-\n\x13\x61\x63\x63\x65ssible_topology\x18\x05 \x03(\x0b\x32\x10.csi.v1.Topology\x1a\x34\n\x12VolumeContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"_\n\x13TopologyRequirement\x12#\n\trequisite\x18\x01 \x03(\x0b\x32\x10.csi.v1.Topology\x12#\n\tpreferred\x18\x02 \x03(\x0b\x32\x10.csi.v1.Topology\"m\n\x08Topology\x12\x30\n\x08segments\x18\x01 \x03(\x0b\x32\x1e.csi.v1.Topology.SegmentsEntry\x1a/\n\rSegmentsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x98\x01\n\x13\x44\x65leteVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12>\n\x07secrets\x18\x02 \x03(\x0b\x32(.csi.v1.DeleteVolumeRequest.SecretsEntryB\x03\x98\x42\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x16\n\x14\x44\x65leteVolumeResponse\"\x8f\x03\n\x1e\x43ontrollerPublishVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x0f\n\x07node_id\x18\x02 \x01(\t\x12\x33\n\x11volume_capability\x18\x03 \x01(\x0b\x32\x18.csi.v1.VolumeCapability\x12\x10\n\x08readonly\x18\x04 \x01(\x08\x12I\n\x07secrets\x18\x05 \x03(\x0b\x32\x33.csi.v1.ControllerPublishVolumeRequest.SecretsEntryB\x03\x98\x42\x01\x12Q\n\x0evolume_context\x18\x06 \x03(\x0b\x32\x39.csi.v1.ControllerPublishVolumeRequest.VolumeContextEntry\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x34\n\x12VolumeContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xae\x01\n\x1f\x43ontrollerPublishVolumeResponse\x12T\n\x0fpublish_context\x18\x01 \x03(\x0b\x32;.csi.v1.ControllerPublishVolumeResponse.PublishContextEntry\x1a\x35\n\x13PublishContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc3\x01\n ControllerUnpublishVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x0f\n\x07node_id\x18\x02 \x01(\t\x12K\n\x07secrets\x18\x03 \x03(\x0b\x32\x35.csi.v1.ControllerUnpublishVolumeRequest.SecretsEntryB\x03\x98\x42\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"#\n!ControllerUnpublishVolumeResponse\"\x96\x05\n!ValidateVolumeCapabilitiesRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12T\n\x0evolume_context\x18\x02 \x03(\x0b\x32<.csi.v1.ValidateVolumeCapabilitiesRequest.VolumeContextEntry\x12\x35\n\x13volume_capabilities\x18\x03 \x03(\x0b\x32\x18.csi.v1.VolumeCapability\x12M\n\nparameters\x18\x04 \x03(\x0b\x32\x39.csi.v1.ValidateVolumeCapabilitiesRequest.ParametersEntry\x12L\n\x07secrets\x18\x05 \x03(\x0b\x32\x36.csi.v1.ValidateVolumeCapabilitiesRequest.SecretsEntryB\x03\x98\x42\x01\x12\x61\n\x12mutable_parameters\x18\x06 \x03(\x0b\x32@.csi.v1.ValidateVolumeCapabilitiesRequest.MutableParametersEntryB\x03\xa0\x42\x01\x1a\x34\n\x12VolumeContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x38\n\x16MutableParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x8f\x05\n\"ValidateVolumeCapabilitiesResponse\x12G\n\tconfirmed\x18\x01 \x01(\x0b\x32\x34.csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed\x12\x0f\n\x07message\x18\x02 \x01(\t\x1a\x8e\x04\n\tConfirmed\x12_\n\x0evolume_context\x18\x01 \x03(\x0b\x32G.csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.VolumeContextEntry\x12\x35\n\x13volume_capabilities\x18\x02 \x03(\x0b\x32\x18.csi.v1.VolumeCapability\x12X\n\nparameters\x18\x03 \x03(\x0b\x32\x44.csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.ParametersEntry\x12l\n\x12mutable_parameters\x18\x04 \x03(\x0b\x32K.csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.MutableParametersEntryB\x03\xa0\x42\x01\x1a\x34\n\x12VolumeContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x38\n\x16MutableParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"A\n\x12ListVolumesRequest\x12\x13\n\x0bmax_entries\x18\x01 \x01(\x05\x12\x16\n\x0estarting_token\x18\x02 \x01(\t\"\xa4\x02\n\x13ListVolumesResponse\x12\x32\n\x07\x65ntries\x18\x01 \x03(\x0b\x32!.csi.v1.ListVolumesResponse.Entry\x12\x12\n\nnext_token\x18\x02 \x01(\t\x1a\x62\n\x0cVolumeStatus\x12\x1a\n\x12published_node_ids\x18\x01 \x03(\t\x12\x36\n\x10volume_condition\x18\x02 \x01(\x0b\x32\x17.csi.v1.VolumeConditionB\x03\xa0\x42\x01\x1a\x61\n\x05\x45ntry\x12\x1e\n\x06volume\x18\x01 \x01(\x0b\x32\x0e.csi.v1.Volume\x12\x38\n\x06status\x18\x02 \x01(\x0b\x32(.csi.v1.ListVolumesResponse.VolumeStatus\"4\n\x1a\x43ontrollerGetVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t:\x03\xa0\x42\x01\"\xe3\x01\n\x1b\x43ontrollerGetVolumeResponse\x12\x1e\n\x06volume\x18\x01 \x01(\x0b\x32\x0e.csi.v1.Volume\x12@\n\x06status\x18\x02 \x01(\x0b\x32\x30.csi.v1.ControllerGetVolumeResponse.VolumeStatus\x1a]\n\x0cVolumeStatus\x12\x1a\n\x12published_node_ids\x18\x01 \x03(\t\x12\x31\n\x10volume_condition\x18\x02 \x01(\x0b\x32\x17.csi.v1.VolumeCondition:\x03\xa0\x42\x01\"\xc5\x02\n\x1d\x43ontrollerModifyVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12H\n\x07secrets\x18\x02 \x03(\x0b\x32\x32.csi.v1.ControllerModifyVolumeRequest.SecretsEntryB\x03\x98\x42\x01\x12X\n\x12mutable_parameters\x18\x03 \x03(\x0b\x32<.csi.v1.ControllerModifyVolumeRequest.MutableParametersEntry\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x38\n\x16MutableParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01:\x03\xa0\x42\x01\"%\n\x1e\x43ontrollerModifyVolumeResponse:\x03\xa0\x42\x01\"\xed\x01\n\x12GetCapacityRequest\x12\x35\n\x13volume_capabilities\x18\x01 \x03(\x0b\x32\x18.csi.v1.VolumeCapability\x12>\n\nparameters\x18\x02 \x03(\x0b\x32*.csi.v1.GetCapacityRequest.ParametersEntry\x12-\n\x13\x61\x63\x63\x65ssible_topology\x18\x03 \x01(\x0b\x32\x10.csi.v1.Topology\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xaa\x01\n\x13GetCapacityResponse\x12\x1a\n\x12\x61vailable_capacity\x18\x01 \x01(\x03\x12\x38\n\x13maximum_volume_size\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12=\n\x13minimum_volume_size\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x03\xa0\x42\x01\"\"\n ControllerGetCapabilitiesRequest\"^\n!ControllerGetCapabilitiesResponse\x12\x39\n\x0c\x63\x61pabilities\x18\x01 \x03(\x0b\x32#.csi.v1.ControllerServiceCapability\"\x8b\x04\n\x1b\x43ontrollerServiceCapability\x12\x36\n\x03rpc\x18\x01 \x01(\x0b\x32\'.csi.v1.ControllerServiceCapability.RPCH\x00\x1a\xab\x03\n\x03RPC\x12:\n\x04type\x18\x01 \x01(\x0e\x32,.csi.v1.ControllerServiceCapability.RPC.Type\"\xe7\x02\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x18\n\x14\x43REATE_DELETE_VOLUME\x10\x01\x12\x1c\n\x18PUBLISH_UNPUBLISH_VOLUME\x10\x02\x12\x10\n\x0cLIST_VOLUMES\x10\x03\x12\x10\n\x0cGET_CAPACITY\x10\x04\x12\x1a\n\x16\x43REATE_DELETE_SNAPSHOT\x10\x05\x12\x12\n\x0eLIST_SNAPSHOTS\x10\x06\x12\x10\n\x0c\x43LONE_VOLUME\x10\x07\x12\x14\n\x10PUBLISH_READONLY\x10\x08\x12\x11\n\rEXPAND_VOLUME\x10\t\x12 \n\x1cLIST_VOLUMES_PUBLISHED_NODES\x10\n\x12\x19\n\x10VOLUME_CONDITION\x10\x0b\x1a\x03\xa0\x42\x01\x12\x13\n\nGET_VOLUME\x10\x0c\x1a\x03\xa0\x42\x01\x12!\n\x18SINGLE_NODE_MULTI_WRITER\x10\r\x1a\x03\xa0\x42\x01\x12\x16\n\rMODIFY_VOLUME\x10\x0e\x1a\x03\xa0\x42\x01\x42\x06\n\x04type\"\xa7\x02\n\x15\x43reateSnapshotRequest\x12\x18\n\x10source_volume_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12@\n\x07secrets\x18\x03 \x03(\x0b\x32*.csi.v1.CreateSnapshotRequest.SecretsEntryB\x03\x98\x42\x01\x12\x41\n\nparameters\x18\x04 \x03(\x0b\x32-.csi.v1.CreateSnapshotRequest.ParametersEntry\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"<\n\x16\x43reateSnapshotResponse\x12\"\n\x08snapshot\x18\x01 \x01(\x0b\x32\x10.csi.v1.Snapshot\"\xb6\x01\n\x08Snapshot\x12\x12\n\nsize_bytes\x18\x01 \x01(\x03\x12\x13\n\x0bsnapshot_id\x18\x02 \x01(\t\x12\x18\n\x10source_volume_id\x18\x03 \x01(\t\x12\x31\n\rcreation_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cready_to_use\x18\x05 \x01(\x08\x12\x1e\n\x11group_snapshot_id\x18\x06 \x01(\tB\x03\xa0\x42\x01\"\x9e\x01\n\x15\x44\x65leteSnapshotRequest\x12\x13\n\x0bsnapshot_id\x18\x01 \x01(\t\x12@\n\x07secrets\x18\x02 \x03(\x0b\x32*.csi.v1.DeleteSnapshotRequest.SecretsEntryB\x03\x98\x42\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x18\n\x16\x44\x65leteSnapshotResponse\"\xe3\x01\n\x14ListSnapshotsRequest\x12\x13\n\x0bmax_entries\x18\x01 \x01(\x05\x12\x16\n\x0estarting_token\x18\x02 \x01(\t\x12\x18\n\x10source_volume_id\x18\x03 \x01(\t\x12\x13\n\x0bsnapshot_id\x18\x04 \x01(\t\x12?\n\x07secrets\x18\x05 \x03(\x0b\x32).csi.v1.ListSnapshotsRequest.SecretsEntryB\x03\x98\x42\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x8e\x01\n\x15ListSnapshotsResponse\x12\x34\n\x07\x65ntries\x18\x01 \x03(\x0b\x32#.csi.v1.ListSnapshotsResponse.Entry\x12\x12\n\nnext_token\x18\x02 \x01(\t\x1a+\n\x05\x45ntry\x12\"\n\x08snapshot\x18\x01 \x01(\x0b\x32\x10.csi.v1.Snapshot\"\x90\x02\n\x1d\x43ontrollerExpandVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12-\n\x0e\x63\x61pacity_range\x18\x02 \x01(\x0b\x32\x15.csi.v1.CapacityRange\x12H\n\x07secrets\x18\x03 \x03(\x0b\x32\x32.csi.v1.ControllerExpandVolumeRequest.SecretsEntryB\x03\x98\x42\x01\x12\x33\n\x11volume_capability\x18\x04 \x01(\x0b\x32\x18.csi.v1.VolumeCapability\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"Y\n\x1e\x43ontrollerExpandVolumeResponse\x12\x16\n\x0e\x63\x61pacity_bytes\x18\x01 \x01(\x03\x12\x1f\n\x17node_expansion_required\x18\x02 \x01(\x08\"\xf5\x03\n\x16NodeStageVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12K\n\x0fpublish_context\x18\x02 \x03(\x0b\x32\x32.csi.v1.NodeStageVolumeRequest.PublishContextEntry\x12\x1b\n\x13staging_target_path\x18\x03 \x01(\t\x12\x33\n\x11volume_capability\x18\x04 \x01(\x0b\x32\x18.csi.v1.VolumeCapability\x12\x41\n\x07secrets\x18\x05 \x03(\x0b\x32+.csi.v1.NodeStageVolumeRequest.SecretsEntryB\x03\x98\x42\x01\x12I\n\x0evolume_context\x18\x06 \x03(\x0b\x32\x31.csi.v1.NodeStageVolumeRequest.VolumeContextEntry\x1a\x35\n\x13PublishContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x34\n\x12VolumeContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x19\n\x17NodeStageVolumeResponse\"J\n\x18NodeUnstageVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x1b\n\x13staging_target_path\x18\x02 \x01(\t\"\x1b\n\x19NodeUnstageVolumeResponse\"\xa4\x04\n\x18NodePublishVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12M\n\x0fpublish_context\x18\x02 \x03(\x0b\x32\x34.csi.v1.NodePublishVolumeRequest.PublishContextEntry\x12\x1b\n\x13staging_target_path\x18\x03 \x01(\t\x12\x13\n\x0btarget_path\x18\x04 \x01(\t\x12\x33\n\x11volume_capability\x18\x05 \x01(\x0b\x32\x18.csi.v1.VolumeCapability\x12\x10\n\x08readonly\x18\x06 \x01(\x08\x12\x43\n\x07secrets\x18\x07 \x03(\x0b\x32-.csi.v1.NodePublishVolumeRequest.SecretsEntryB\x03\x98\x42\x01\x12K\n\x0evolume_context\x18\x08 \x03(\x0b\x32\x33.csi.v1.NodePublishVolumeRequest.VolumeContextEntry\x1a\x35\n\x13PublishContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x34\n\x12VolumeContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x1b\n\x19NodePublishVolumeResponse\"D\n\x1aNodeUnpublishVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x13\n\x0btarget_path\x18\x02 \x01(\t\"\x1d\n\x1bNodeUnpublishVolumeResponse\"`\n\x19NodeGetVolumeStatsRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x13\n\x0bvolume_path\x18\x02 \x01(\t\x12\x1b\n\x13staging_target_path\x18\x03 \x01(\t\"x\n\x1aNodeGetVolumeStatsResponse\x12\"\n\x05usage\x18\x01 \x03(\x0b\x32\x13.csi.v1.VolumeUsage\x12\x36\n\x10volume_condition\x18\x02 \x01(\x0b\x32\x17.csi.v1.VolumeConditionB\x03\xa0\x42\x01\"\x91\x01\n\x0bVolumeUsage\x12\x11\n\tavailable\x18\x01 \x01(\x03\x12\r\n\x05total\x18\x02 \x01(\x03\x12\x0c\n\x04used\x18\x03 \x01(\x03\x12&\n\x04unit\x18\x04 \x01(\x0e\x32\x18.csi.v1.VolumeUsage.Unit\"*\n\x04Unit\x12\x0b\n\x07UNKNOWN\x10\x00\x12\t\n\x05\x42YTES\x10\x01\x12\n\n\x06INODES\x10\x02\"9\n\x0fVolumeCondition\x12\x10\n\x08\x61\x62normal\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t:\x03\xa0\x42\x01\"\x1c\n\x1aNodeGetCapabilitiesRequest\"R\n\x1bNodeGetCapabilitiesResponse\x12\x33\n\x0c\x63\x61pabilities\x18\x01 \x03(\x0b\x32\x1d.csi.v1.NodeServiceCapability\"\xbe\x02\n\x15NodeServiceCapability\x12\x30\n\x03rpc\x18\x01 \x01(\x0b\x32!.csi.v1.NodeServiceCapability.RPCH\x00\x1a\xea\x01\n\x03RPC\x12\x34\n\x04type\x18\x01 \x01(\x0e\x32&.csi.v1.NodeServiceCapability.RPC.Type\"\xac\x01\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x18\n\x14STAGE_UNSTAGE_VOLUME\x10\x01\x12\x14\n\x10GET_VOLUME_STATS\x10\x02\x12\x11\n\rEXPAND_VOLUME\x10\x03\x12\x19\n\x10VOLUME_CONDITION\x10\x04\x1a\x03\xa0\x42\x01\x12!\n\x18SINGLE_NODE_MULTI_WRITER\x10\x05\x1a\x03\xa0\x42\x01\x12\x16\n\x12VOLUME_MOUNT_GROUP\x10\x06\x42\x06\n\x04type\"\x14\n\x12NodeGetInfoRequest\"s\n\x13NodeGetInfoResponse\x12\x0f\n\x07node_id\x18\x01 \x01(\t\x12\x1c\n\x14max_volumes_per_node\x18\x02 \x01(\x03\x12-\n\x13\x61\x63\x63\x65ssible_topology\x18\x03 \x01(\x0b\x32\x10.csi.v1.Topology\"\xb9\x02\n\x17NodeExpandVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x13\n\x0bvolume_path\x18\x02 \x01(\t\x12-\n\x0e\x63\x61pacity_range\x18\x03 \x01(\x0b\x32\x15.csi.v1.CapacityRange\x12\x1b\n\x13staging_target_path\x18\x04 \x01(\t\x12\x33\n\x11volume_capability\x18\x05 \x01(\x0b\x32\x18.csi.v1.VolumeCapability\x12\x45\n\x07secrets\x18\x06 \x03(\x0b\x32,.csi.v1.NodeExpandVolumeRequest.SecretsEntryB\x06\x98\x42\x01\xa0\x42\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"2\n\x18NodeExpandVolumeResponse\x12\x16\n\x0e\x63\x61pacity_bytes\x18\x01 \x01(\x03\",\n%GroupControllerGetCapabilitiesRequest:\x03\xa0\x42\x01\"m\n&GroupControllerGetCapabilitiesResponse\x12>\n\x0c\x63\x61pabilities\x18\x01 \x03(\x0b\x32(.csi.v1.GroupControllerServiceCapability:\x03\xa0\x42\x01\"\xfc\x01\n GroupControllerServiceCapability\x12;\n\x03rpc\x18\x01 \x01(\x0b\x32,.csi.v1.GroupControllerServiceCapability.RPCH\x00\x1a\x8d\x01\n\x03RPC\x12?\n\x04type\x18\x01 \x01(\x0e\x32\x31.csi.v1.GroupControllerServiceCapability.RPC.Type\"E\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x30\n\'CREATE_DELETE_GET_VOLUME_GROUP_SNAPSHOT\x10\x01\x1a\x03\xa0\x42\x01:\x03\xa0\x42\x01\x42\x06\n\x04type\"\xce\x02\n CreateVolumeGroupSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11source_volume_ids\x18\x02 \x03(\t\x12K\n\x07secrets\x18\x03 \x03(\x0b\x32\x35.csi.v1.CreateVolumeGroupSnapshotRequest.SecretsEntryB\x03\x98\x42\x01\x12L\n\nparameters\x18\x04 \x03(\x0b\x32\x38.csi.v1.CreateVolumeGroupSnapshotRequest.ParametersEntry\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01:\x03\xa0\x42\x01\"]\n!CreateVolumeGroupSnapshotResponse\x12\x33\n\x0egroup_snapshot\x18\x01 \x01(\x0b\x32\x1b.csi.v1.VolumeGroupSnapshot:\x03\xa0\x42\x01\"\xa3\x01\n\x13VolumeGroupSnapshot\x12\x19\n\x11group_snapshot_id\x18\x01 \x01(\t\x12#\n\tsnapshots\x18\x02 \x03(\x0b\x32\x10.csi.v1.Snapshot\x12\x31\n\rcreation_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cready_to_use\x18\x04 \x01(\x08:\x03\xa0\x42\x01\"\xd5\x01\n DeleteVolumeGroupSnapshotRequest\x12\x19\n\x11group_snapshot_id\x18\x01 \x01(\t\x12\x14\n\x0csnapshot_ids\x18\x02 \x03(\t\x12K\n\x07secrets\x18\x03 \x03(\x0b\x32\x35.csi.v1.DeleteVolumeGroupSnapshotRequest.SecretsEntryB\x03\x98\x42\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01:\x03\xa0\x42\x01\"(\n!DeleteVolumeGroupSnapshotResponse:\x03\xa0\x42\x01\"\xcf\x01\n\x1dGetVolumeGroupSnapshotRequest\x12\x19\n\x11group_snapshot_id\x18\x01 \x01(\t\x12\x14\n\x0csnapshot_ids\x18\x02 \x03(\t\x12H\n\x07secrets\x18\x03 \x03(\x0b\x32\x32.csi.v1.GetVolumeGroupSnapshotRequest.SecretsEntryB\x03\x98\x42\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01:\x03\xa0\x42\x01\"Z\n\x1eGetVolumeGroupSnapshotResponse\x12\x33\n\x0egroup_snapshot\x18\x01 \x01(\x0b\x32\x1b.csi.v1.VolumeGroupSnapshot:\x03\xa0\x42\x01\x32\xfa\x01\n\x08Identity\x12N\n\rGetPluginInfo\x12\x1c.csi.v1.GetPluginInfoRequest\x1a\x1d.csi.v1.GetPluginInfoResponse\"\x00\x12\x66\n\x15GetPluginCapabilities\x12$.csi.v1.GetPluginCapabilitiesRequest\x1a%.csi.v1.GetPluginCapabilitiesResponse\"\x00\x12\x36\n\x05Probe\x12\x14.csi.v1.ProbeRequest\x1a\x15.csi.v1.ProbeResponse\"\x00\x32\xbb\n\n\nController\x12K\n\x0c\x43reateVolume\x12\x1b.csi.v1.CreateVolumeRequest\x1a\x1c.csi.v1.CreateVolumeResponse\"\x00\x12K\n\x0c\x44\x65leteVolume\x12\x1b.csi.v1.DeleteVolumeRequest\x1a\x1c.csi.v1.DeleteVolumeResponse\"\x00\x12l\n\x17\x43ontrollerPublishVolume\x12&.csi.v1.ControllerPublishVolumeRequest\x1a\'.csi.v1.ControllerPublishVolumeResponse\"\x00\x12r\n\x19\x43ontrollerUnpublishVolume\x12(.csi.v1.ControllerUnpublishVolumeRequest\x1a).csi.v1.ControllerUnpublishVolumeResponse\"\x00\x12u\n\x1aValidateVolumeCapabilities\x12).csi.v1.ValidateVolumeCapabilitiesRequest\x1a*.csi.v1.ValidateVolumeCapabilitiesResponse\"\x00\x12H\n\x0bListVolumes\x12\x1a.csi.v1.ListVolumesRequest\x1a\x1b.csi.v1.ListVolumesResponse\"\x00\x12H\n\x0bGetCapacity\x12\x1a.csi.v1.GetCapacityRequest\x1a\x1b.csi.v1.GetCapacityResponse\"\x00\x12r\n\x19\x43ontrollerGetCapabilities\x12(.csi.v1.ControllerGetCapabilitiesRequest\x1a).csi.v1.ControllerGetCapabilitiesResponse\"\x00\x12Q\n\x0e\x43reateSnapshot\x12\x1d.csi.v1.CreateSnapshotRequest\x1a\x1e.csi.v1.CreateSnapshotResponse\"\x00\x12Q\n\x0e\x44\x65leteSnapshot\x12\x1d.csi.v1.DeleteSnapshotRequest\x1a\x1e.csi.v1.DeleteSnapshotResponse\"\x00\x12N\n\rListSnapshots\x12\x1c.csi.v1.ListSnapshotsRequest\x1a\x1d.csi.v1.ListSnapshotsResponse\"\x00\x12i\n\x16\x43ontrollerExpandVolume\x12%.csi.v1.ControllerExpandVolumeRequest\x1a&.csi.v1.ControllerExpandVolumeResponse\"\x00\x12\x63\n\x13\x43ontrollerGetVolume\x12\".csi.v1.ControllerGetVolumeRequest\x1a#.csi.v1.ControllerGetVolumeResponse\"\x03\xa0\x42\x01\x12l\n\x16\x43ontrollerModifyVolume\x12%.csi.v1.ControllerModifyVolumeRequest\x1a&.csi.v1.ControllerModifyVolumeResponse\"\x03\xa0\x42\x01\x32\xf6\x03\n\x0fGroupController\x12\x81\x01\n\x1eGroupControllerGetCapabilities\x12-.csi.v1.GroupControllerGetCapabilitiesRequest\x1a..csi.v1.GroupControllerGetCapabilitiesResponse\"\x00\x12u\n\x19\x43reateVolumeGroupSnapshot\x12(.csi.v1.CreateVolumeGroupSnapshotRequest\x1a).csi.v1.CreateVolumeGroupSnapshotResponse\"\x03\xa0\x42\x01\x12u\n\x19\x44\x65leteVolumeGroupSnapshot\x12(.csi.v1.DeleteVolumeGroupSnapshotRequest\x1a).csi.v1.DeleteVolumeGroupSnapshotResponse\"\x03\xa0\x42\x01\x12l\n\x16GetVolumeGroupSnapshot\x12%.csi.v1.GetVolumeGroupSnapshotRequest\x1a&.csi.v1.GetVolumeGroupSnapshotResponse\"\x03\xa0\x42\x01\x1a\x03\xa0\x42\x01\x32\xda\x05\n\x04Node\x12T\n\x0fNodeStageVolume\x12\x1e.csi.v1.NodeStageVolumeRequest\x1a\x1f.csi.v1.NodeStageVolumeResponse\"\x00\x12Z\n\x11NodeUnstageVolume\x12 .csi.v1.NodeUnstageVolumeRequest\x1a!.csi.v1.NodeUnstageVolumeResponse\"\x00\x12Z\n\x11NodePublishVolume\x12 .csi.v1.NodePublishVolumeRequest\x1a!.csi.v1.NodePublishVolumeResponse\"\x00\x12`\n\x13NodeUnpublishVolume\x12\".csi.v1.NodeUnpublishVolumeRequest\x1a#.csi.v1.NodeUnpublishVolumeResponse\"\x00\x12]\n\x12NodeGetVolumeStats\x12!.csi.v1.NodeGetVolumeStatsRequest\x1a\".csi.v1.NodeGetVolumeStatsResponse\"\x00\x12W\n\x10NodeExpandVolume\x12\x1f.csi.v1.NodeExpandVolumeRequest\x1a .csi.v1.NodeExpandVolumeResponse\"\x00\x12`\n\x13NodeGetCapabilities\x12\".csi.v1.NodeGetCapabilitiesRequest\x1a#.csi.v1.NodeGetCapabilitiesResponse\"\x00\x12H\n\x0bNodeGetInfo\x12\x1a.csi.v1.NodeGetInfoRequest\x1a\x1b.csi.v1.NodeGetInfoResponse\"\x00:1\n\nalpha_enum\x12\x1c.google.protobuf.EnumOptions\x18\xa4\x08 \x01(\x08:<\n\x10\x61lpha_enum_value\x12!.google.protobuf.EnumValueOptions\x18\xa4\x08 \x01(\x08:2\n\ncsi_secret\x12\x1d.google.protobuf.FieldOptions\x18\xa3\x08 \x01(\x08:3\n\x0b\x61lpha_field\x12\x1d.google.protobuf.FieldOptions\x18\xa4\x08 \x01(\x08:7\n\ralpha_message\x12\x1f.google.protobuf.MessageOptions\x18\xa4\x08 \x01(\x08:5\n\x0c\x61lpha_method\x12\x1e.google.protobuf.MethodOptions\x18\xa4\x08 \x01(\x08:7\n\ralpha_service\x12\x1f.google.protobuf.ServiceOptions\x18\xa4\x08 \x01(\x08\x42\x05Z\x03\x63sib\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'csi_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + google_dot_protobuf_dot_descriptor__pb2.EnumOptions.RegisterExtension(alpha_enum) + google_dot_protobuf_dot_descriptor__pb2.EnumValueOptions.RegisterExtension(alpha_enum_value) + google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(csi_secret) + google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(alpha_field) + google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(alpha_message) + google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(alpha_method) + google_dot_protobuf_dot_descriptor__pb2.ServiceOptions.RegisterExtension(alpha_service) + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'Z\003csi' + _GETPLUGININFORESPONSE_MANIFESTENTRY._options = None + _GETPLUGININFORESPONSE_MANIFESTENTRY._serialized_options = b'8\001' + _PLUGINCAPABILITY_SERVICE_TYPE.values_by_name["GROUP_CONTROLLER_SERVICE"]._options = None + _PLUGINCAPABILITY_SERVICE_TYPE.values_by_name["GROUP_CONTROLLER_SERVICE"]._serialized_options = b'\240B\001' + _CREATEVOLUMEREQUEST_PARAMETERSENTRY._options = None + _CREATEVOLUMEREQUEST_PARAMETERSENTRY._serialized_options = b'8\001' + _CREATEVOLUMEREQUEST_SECRETSENTRY._options = None + _CREATEVOLUMEREQUEST_SECRETSENTRY._serialized_options = b'8\001' + _CREATEVOLUMEREQUEST_MUTABLEPARAMETERSENTRY._options = None + _CREATEVOLUMEREQUEST_MUTABLEPARAMETERSENTRY._serialized_options = b'8\001' + _CREATEVOLUMEREQUEST.fields_by_name['secrets']._options = None + _CREATEVOLUMEREQUEST.fields_by_name['secrets']._serialized_options = b'\230B\001' + _CREATEVOLUMEREQUEST.fields_by_name['mutable_parameters']._options = None + _CREATEVOLUMEREQUEST.fields_by_name['mutable_parameters']._serialized_options = b'\240B\001' + _VOLUMECAPABILITY_ACCESSMODE_MODE.values_by_name["SINGLE_NODE_SINGLE_WRITER"]._options = None + _VOLUMECAPABILITY_ACCESSMODE_MODE.values_by_name["SINGLE_NODE_SINGLE_WRITER"]._serialized_options = b'\240B\001' + _VOLUMECAPABILITY_ACCESSMODE_MODE.values_by_name["SINGLE_NODE_MULTI_WRITER"]._options = None + _VOLUMECAPABILITY_ACCESSMODE_MODE.values_by_name["SINGLE_NODE_MULTI_WRITER"]._serialized_options = b'\240B\001' + _VOLUME_VOLUMECONTEXTENTRY._options = None + _VOLUME_VOLUMECONTEXTENTRY._serialized_options = b'8\001' + _TOPOLOGY_SEGMENTSENTRY._options = None + _TOPOLOGY_SEGMENTSENTRY._serialized_options = b'8\001' + _DELETEVOLUMEREQUEST_SECRETSENTRY._options = None + _DELETEVOLUMEREQUEST_SECRETSENTRY._serialized_options = b'8\001' + _DELETEVOLUMEREQUEST.fields_by_name['secrets']._options = None + _DELETEVOLUMEREQUEST.fields_by_name['secrets']._serialized_options = b'\230B\001' + _CONTROLLERPUBLISHVOLUMEREQUEST_SECRETSENTRY._options = None + _CONTROLLERPUBLISHVOLUMEREQUEST_SECRETSENTRY._serialized_options = b'8\001' + _CONTROLLERPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY._options = None + _CONTROLLERPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY._serialized_options = b'8\001' + _CONTROLLERPUBLISHVOLUMEREQUEST.fields_by_name['secrets']._options = None + _CONTROLLERPUBLISHVOLUMEREQUEST.fields_by_name['secrets']._serialized_options = b'\230B\001' + _CONTROLLERPUBLISHVOLUMERESPONSE_PUBLISHCONTEXTENTRY._options = None + _CONTROLLERPUBLISHVOLUMERESPONSE_PUBLISHCONTEXTENTRY._serialized_options = b'8\001' + _CONTROLLERUNPUBLISHVOLUMEREQUEST_SECRETSENTRY._options = None + _CONTROLLERUNPUBLISHVOLUMEREQUEST_SECRETSENTRY._serialized_options = b'8\001' + _CONTROLLERUNPUBLISHVOLUMEREQUEST.fields_by_name['secrets']._options = None + _CONTROLLERUNPUBLISHVOLUMEREQUEST.fields_by_name['secrets']._serialized_options = b'\230B\001' + _VALIDATEVOLUMECAPABILITIESREQUEST_VOLUMECONTEXTENTRY._options = None + _VALIDATEVOLUMECAPABILITIESREQUEST_VOLUMECONTEXTENTRY._serialized_options = b'8\001' + _VALIDATEVOLUMECAPABILITIESREQUEST_PARAMETERSENTRY._options = None + _VALIDATEVOLUMECAPABILITIESREQUEST_PARAMETERSENTRY._serialized_options = b'8\001' + _VALIDATEVOLUMECAPABILITIESREQUEST_SECRETSENTRY._options = None + _VALIDATEVOLUMECAPABILITIESREQUEST_SECRETSENTRY._serialized_options = b'8\001' + _VALIDATEVOLUMECAPABILITIESREQUEST_MUTABLEPARAMETERSENTRY._options = None + _VALIDATEVOLUMECAPABILITIESREQUEST_MUTABLEPARAMETERSENTRY._serialized_options = b'8\001' + _VALIDATEVOLUMECAPABILITIESREQUEST.fields_by_name['secrets']._options = None + _VALIDATEVOLUMECAPABILITIESREQUEST.fields_by_name['secrets']._serialized_options = b'\230B\001' + _VALIDATEVOLUMECAPABILITIESREQUEST.fields_by_name['mutable_parameters']._options = None + _VALIDATEVOLUMECAPABILITIESREQUEST.fields_by_name['mutable_parameters']._serialized_options = b'\240B\001' + _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_VOLUMECONTEXTENTRY._options = None + _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_VOLUMECONTEXTENTRY._serialized_options = b'8\001' + _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_PARAMETERSENTRY._options = None + _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_PARAMETERSENTRY._serialized_options = b'8\001' + _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_MUTABLEPARAMETERSENTRY._options = None + _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_MUTABLEPARAMETERSENTRY._serialized_options = b'8\001' + _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED.fields_by_name['mutable_parameters']._options = None + _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED.fields_by_name['mutable_parameters']._serialized_options = b'\240B\001' + _LISTVOLUMESRESPONSE_VOLUMESTATUS.fields_by_name['volume_condition']._options = None + _LISTVOLUMESRESPONSE_VOLUMESTATUS.fields_by_name['volume_condition']._serialized_options = b'\240B\001' + _CONTROLLERGETVOLUMEREQUEST._options = None + _CONTROLLERGETVOLUMEREQUEST._serialized_options = b'\240B\001' + _CONTROLLERGETVOLUMERESPONSE._options = None + _CONTROLLERGETVOLUMERESPONSE._serialized_options = b'\240B\001' + _CONTROLLERMODIFYVOLUMEREQUEST_SECRETSENTRY._options = None + _CONTROLLERMODIFYVOLUMEREQUEST_SECRETSENTRY._serialized_options = b'8\001' + _CONTROLLERMODIFYVOLUMEREQUEST_MUTABLEPARAMETERSENTRY._options = None + _CONTROLLERMODIFYVOLUMEREQUEST_MUTABLEPARAMETERSENTRY._serialized_options = b'8\001' + _CONTROLLERMODIFYVOLUMEREQUEST.fields_by_name['secrets']._options = None + _CONTROLLERMODIFYVOLUMEREQUEST.fields_by_name['secrets']._serialized_options = b'\230B\001' + _CONTROLLERMODIFYVOLUMEREQUEST._options = None + _CONTROLLERMODIFYVOLUMEREQUEST._serialized_options = b'\240B\001' + _CONTROLLERMODIFYVOLUMERESPONSE._options = None + _CONTROLLERMODIFYVOLUMERESPONSE._serialized_options = b'\240B\001' + _GETCAPACITYREQUEST_PARAMETERSENTRY._options = None + _GETCAPACITYREQUEST_PARAMETERSENTRY._serialized_options = b'8\001' + _GETCAPACITYRESPONSE.fields_by_name['minimum_volume_size']._options = None + _GETCAPACITYRESPONSE.fields_by_name['minimum_volume_size']._serialized_options = b'\240B\001' + _CONTROLLERSERVICECAPABILITY_RPC_TYPE.values_by_name["VOLUME_CONDITION"]._options = None + _CONTROLLERSERVICECAPABILITY_RPC_TYPE.values_by_name["VOLUME_CONDITION"]._serialized_options = b'\240B\001' + _CONTROLLERSERVICECAPABILITY_RPC_TYPE.values_by_name["GET_VOLUME"]._options = None + _CONTROLLERSERVICECAPABILITY_RPC_TYPE.values_by_name["GET_VOLUME"]._serialized_options = b'\240B\001' + _CONTROLLERSERVICECAPABILITY_RPC_TYPE.values_by_name["SINGLE_NODE_MULTI_WRITER"]._options = None + _CONTROLLERSERVICECAPABILITY_RPC_TYPE.values_by_name["SINGLE_NODE_MULTI_WRITER"]._serialized_options = b'\240B\001' + _CONTROLLERSERVICECAPABILITY_RPC_TYPE.values_by_name["MODIFY_VOLUME"]._options = None + _CONTROLLERSERVICECAPABILITY_RPC_TYPE.values_by_name["MODIFY_VOLUME"]._serialized_options = b'\240B\001' + _CREATESNAPSHOTREQUEST_SECRETSENTRY._options = None + _CREATESNAPSHOTREQUEST_SECRETSENTRY._serialized_options = b'8\001' + _CREATESNAPSHOTREQUEST_PARAMETERSENTRY._options = None + _CREATESNAPSHOTREQUEST_PARAMETERSENTRY._serialized_options = b'8\001' + _CREATESNAPSHOTREQUEST.fields_by_name['secrets']._options = None + _CREATESNAPSHOTREQUEST.fields_by_name['secrets']._serialized_options = b'\230B\001' + _SNAPSHOT.fields_by_name['group_snapshot_id']._options = None + _SNAPSHOT.fields_by_name['group_snapshot_id']._serialized_options = b'\240B\001' + _DELETESNAPSHOTREQUEST_SECRETSENTRY._options = None + _DELETESNAPSHOTREQUEST_SECRETSENTRY._serialized_options = b'8\001' + _DELETESNAPSHOTREQUEST.fields_by_name['secrets']._options = None + _DELETESNAPSHOTREQUEST.fields_by_name['secrets']._serialized_options = b'\230B\001' + _LISTSNAPSHOTSREQUEST_SECRETSENTRY._options = None + _LISTSNAPSHOTSREQUEST_SECRETSENTRY._serialized_options = b'8\001' + _LISTSNAPSHOTSREQUEST.fields_by_name['secrets']._options = None + _LISTSNAPSHOTSREQUEST.fields_by_name['secrets']._serialized_options = b'\230B\001' + _CONTROLLEREXPANDVOLUMEREQUEST_SECRETSENTRY._options = None + _CONTROLLEREXPANDVOLUMEREQUEST_SECRETSENTRY._serialized_options = b'8\001' + _CONTROLLEREXPANDVOLUMEREQUEST.fields_by_name['secrets']._options = None + _CONTROLLEREXPANDVOLUMEREQUEST.fields_by_name['secrets']._serialized_options = b'\230B\001' + _NODESTAGEVOLUMEREQUEST_PUBLISHCONTEXTENTRY._options = None + _NODESTAGEVOLUMEREQUEST_PUBLISHCONTEXTENTRY._serialized_options = b'8\001' + _NODESTAGEVOLUMEREQUEST_SECRETSENTRY._options = None + _NODESTAGEVOLUMEREQUEST_SECRETSENTRY._serialized_options = b'8\001' + _NODESTAGEVOLUMEREQUEST_VOLUMECONTEXTENTRY._options = None + _NODESTAGEVOLUMEREQUEST_VOLUMECONTEXTENTRY._serialized_options = b'8\001' + _NODESTAGEVOLUMEREQUEST.fields_by_name['secrets']._options = None + _NODESTAGEVOLUMEREQUEST.fields_by_name['secrets']._serialized_options = b'\230B\001' + _NODEPUBLISHVOLUMEREQUEST_PUBLISHCONTEXTENTRY._options = None + _NODEPUBLISHVOLUMEREQUEST_PUBLISHCONTEXTENTRY._serialized_options = b'8\001' + _NODEPUBLISHVOLUMEREQUEST_SECRETSENTRY._options = None + _NODEPUBLISHVOLUMEREQUEST_SECRETSENTRY._serialized_options = b'8\001' + _NODEPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY._options = None + _NODEPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY._serialized_options = b'8\001' + _NODEPUBLISHVOLUMEREQUEST.fields_by_name['secrets']._options = None + _NODEPUBLISHVOLUMEREQUEST.fields_by_name['secrets']._serialized_options = b'\230B\001' + _NODEGETVOLUMESTATSRESPONSE.fields_by_name['volume_condition']._options = None + _NODEGETVOLUMESTATSRESPONSE.fields_by_name['volume_condition']._serialized_options = b'\240B\001' + _VOLUMECONDITION._options = None + _VOLUMECONDITION._serialized_options = b'\240B\001' + _NODESERVICECAPABILITY_RPC_TYPE.values_by_name["VOLUME_CONDITION"]._options = None + _NODESERVICECAPABILITY_RPC_TYPE.values_by_name["VOLUME_CONDITION"]._serialized_options = b'\240B\001' + _NODESERVICECAPABILITY_RPC_TYPE.values_by_name["SINGLE_NODE_MULTI_WRITER"]._options = None + _NODESERVICECAPABILITY_RPC_TYPE.values_by_name["SINGLE_NODE_MULTI_WRITER"]._serialized_options = b'\240B\001' + _NODEEXPANDVOLUMEREQUEST_SECRETSENTRY._options = None + _NODEEXPANDVOLUMEREQUEST_SECRETSENTRY._serialized_options = b'8\001' + _NODEEXPANDVOLUMEREQUEST.fields_by_name['secrets']._options = None + _NODEEXPANDVOLUMEREQUEST.fields_by_name['secrets']._serialized_options = b'\230B\001\240B\001' + _GROUPCONTROLLERGETCAPABILITIESREQUEST._options = None + _GROUPCONTROLLERGETCAPABILITIESREQUEST._serialized_options = b'\240B\001' + _GROUPCONTROLLERGETCAPABILITIESRESPONSE._options = None + _GROUPCONTROLLERGETCAPABILITIESRESPONSE._serialized_options = b'\240B\001' + _GROUPCONTROLLERSERVICECAPABILITY_RPC_TYPE.values_by_name["CREATE_DELETE_GET_VOLUME_GROUP_SNAPSHOT"]._options = None + _GROUPCONTROLLERSERVICECAPABILITY_RPC_TYPE.values_by_name["CREATE_DELETE_GET_VOLUME_GROUP_SNAPSHOT"]._serialized_options = b'\240B\001' + _GROUPCONTROLLERSERVICECAPABILITY._options = None + _GROUPCONTROLLERSERVICECAPABILITY._serialized_options = b'\240B\001' + _CREATEVOLUMEGROUPSNAPSHOTREQUEST_SECRETSENTRY._options = None + _CREATEVOLUMEGROUPSNAPSHOTREQUEST_SECRETSENTRY._serialized_options = b'8\001' + _CREATEVOLUMEGROUPSNAPSHOTREQUEST_PARAMETERSENTRY._options = None + _CREATEVOLUMEGROUPSNAPSHOTREQUEST_PARAMETERSENTRY._serialized_options = b'8\001' + _CREATEVOLUMEGROUPSNAPSHOTREQUEST.fields_by_name['secrets']._options = None + _CREATEVOLUMEGROUPSNAPSHOTREQUEST.fields_by_name['secrets']._serialized_options = b'\230B\001' + _CREATEVOLUMEGROUPSNAPSHOTREQUEST._options = None + _CREATEVOLUMEGROUPSNAPSHOTREQUEST._serialized_options = b'\240B\001' + _CREATEVOLUMEGROUPSNAPSHOTRESPONSE._options = None + _CREATEVOLUMEGROUPSNAPSHOTRESPONSE._serialized_options = b'\240B\001' + _VOLUMEGROUPSNAPSHOT._options = None + _VOLUMEGROUPSNAPSHOT._serialized_options = b'\240B\001' + _DELETEVOLUMEGROUPSNAPSHOTREQUEST_SECRETSENTRY._options = None + _DELETEVOLUMEGROUPSNAPSHOTREQUEST_SECRETSENTRY._serialized_options = b'8\001' + _DELETEVOLUMEGROUPSNAPSHOTREQUEST.fields_by_name['secrets']._options = None + _DELETEVOLUMEGROUPSNAPSHOTREQUEST.fields_by_name['secrets']._serialized_options = b'\230B\001' + _DELETEVOLUMEGROUPSNAPSHOTREQUEST._options = None + _DELETEVOLUMEGROUPSNAPSHOTREQUEST._serialized_options = b'\240B\001' + _DELETEVOLUMEGROUPSNAPSHOTRESPONSE._options = None + _DELETEVOLUMEGROUPSNAPSHOTRESPONSE._serialized_options = b'\240B\001' + _GETVOLUMEGROUPSNAPSHOTREQUEST_SECRETSENTRY._options = None + _GETVOLUMEGROUPSNAPSHOTREQUEST_SECRETSENTRY._serialized_options = b'8\001' + _GETVOLUMEGROUPSNAPSHOTREQUEST.fields_by_name['secrets']._options = None + _GETVOLUMEGROUPSNAPSHOTREQUEST.fields_by_name['secrets']._serialized_options = b'\230B\001' + _GETVOLUMEGROUPSNAPSHOTREQUEST._options = None + _GETVOLUMEGROUPSNAPSHOTREQUEST._serialized_options = b'\240B\001' + _GETVOLUMEGROUPSNAPSHOTRESPONSE._options = None + _GETVOLUMEGROUPSNAPSHOTRESPONSE._serialized_options = b'\240B\001' + _CONTROLLER.methods_by_name['ControllerGetVolume']._options = None + _CONTROLLER.methods_by_name['ControllerGetVolume']._serialized_options = b'\240B\001' + _CONTROLLER.methods_by_name['ControllerModifyVolume']._options = None + _CONTROLLER.methods_by_name['ControllerModifyVolume']._serialized_options = b'\240B\001' + _GROUPCONTROLLER._options = None + _GROUPCONTROLLER._serialized_options = b'\240B\001' + _GROUPCONTROLLER.methods_by_name['CreateVolumeGroupSnapshot']._options = None + _GROUPCONTROLLER.methods_by_name['CreateVolumeGroupSnapshot']._serialized_options = b'\240B\001' + _GROUPCONTROLLER.methods_by_name['DeleteVolumeGroupSnapshot']._options = None + _GROUPCONTROLLER.methods_by_name['DeleteVolumeGroupSnapshot']._serialized_options = b'\240B\001' + _GROUPCONTROLLER.methods_by_name['GetVolumeGroupSnapshot']._options = None + _GROUPCONTROLLER.methods_by_name['GetVolumeGroupSnapshot']._serialized_options = b'\240B\001' + _GETPLUGININFOREQUEST._serialized_start=120 + _GETPLUGININFOREQUEST._serialized_end=142 + _GETPLUGININFORESPONSE._serialized_start=145 + _GETPLUGININFORESPONSE._serialized_end=318 + _GETPLUGININFORESPONSE_MANIFESTENTRY._serialized_start=271 + _GETPLUGININFORESPONSE_MANIFESTENTRY._serialized_end=318 + _GETPLUGINCAPABILITIESREQUEST._serialized_start=320 + _GETPLUGINCAPABILITIESREQUEST._serialized_end=350 + _GETPLUGINCAPABILITIESRESPONSE._serialized_start=352 + _GETPLUGINCAPABILITIESRESPONSE._serialized_end=431 + _PLUGINCAPABILITY._serialized_start=434 + _PLUGINCAPABILITY._serialized_end=892 + _PLUGINCAPABILITY_SERVICE._serialized_start=578 + _PLUGINCAPABILITY_SERVICE._serialized_end=758 + _PLUGINCAPABILITY_SERVICE_TYPE._serialized_start=642 + _PLUGINCAPABILITY_SERVICE_TYPE._serialized_end=758 + _PLUGINCAPABILITY_VOLUMEEXPANSION._serialized_start=760 + _PLUGINCAPABILITY_VOLUMEEXPANSION._serialized_end=884 + _PLUGINCAPABILITY_VOLUMEEXPANSION_TYPE._serialized_start=840 + _PLUGINCAPABILITY_VOLUMEEXPANSION_TYPE._serialized_end=884 + _PROBEREQUEST._serialized_start=894 + _PROBEREQUEST._serialized_end=908 + _PROBERESPONSE._serialized_start=910 + _PROBERESPONSE._serialized_end=968 + _CREATEVOLUMEREQUEST._serialized_start=971 + _CREATEVOLUMEREQUEST._serialized_end=1604 + _CREATEVOLUMEREQUEST_PARAMETERSENTRY._serialized_start=1449 + _CREATEVOLUMEREQUEST_PARAMETERSENTRY._serialized_end=1498 + _CREATEVOLUMEREQUEST_SECRETSENTRY._serialized_start=1500 + _CREATEVOLUMEREQUEST_SECRETSENTRY._serialized_end=1546 + _CREATEVOLUMEREQUEST_MUTABLEPARAMETERSENTRY._serialized_start=1548 + _CREATEVOLUMEREQUEST_MUTABLEPARAMETERSENTRY._serialized_end=1604 + _VOLUMECONTENTSOURCE._serialized_start=1607 + _VOLUMECONTENTSOURCE._serialized_end=1834 + _VOLUMECONTENTSOURCE_SNAPSHOTSOURCE._serialized_start=1754 + _VOLUMECONTENTSOURCE_SNAPSHOTSOURCE._serialized_end=1791 + _VOLUMECONTENTSOURCE_VOLUMESOURCE._serialized_start=1793 + _VOLUMECONTENTSOURCE_VOLUMESOURCE._serialized_end=1826 + _CREATEVOLUMERESPONSE._serialized_start=1836 + _CREATEVOLUMERESPONSE._serialized_end=1890 + _VOLUMECAPABILITY._serialized_start=1893 + _VOLUMECAPABILITY._serialized_end=2494 + _VOLUMECAPABILITY_BLOCKVOLUME._serialized_start=2081 + _VOLUMECAPABILITY_BLOCKVOLUME._serialized_end=2094 + _VOLUMECAPABILITY_MOUNTVOLUME._serialized_start=2096 + _VOLUMECAPABILITY_MOUNTVOLUME._serialized_end=2175 + _VOLUMECAPABILITY_ACCESSMODE._serialized_start=2178 + _VOLUMECAPABILITY_ACCESSMODE._serialized_end=2479 + _VOLUMECAPABILITY_ACCESSMODE_MODE._serialized_start=2249 + _VOLUMECAPABILITY_ACCESSMODE_MODE._serialized_end=2479 + _CAPACITYRANGE._serialized_start=2496 + _CAPACITYRANGE._serialized_end=2556 + _VOLUME._serialized_start=2559 + _VOLUME._serialized_end=2823 + _VOLUME_VOLUMECONTEXTENTRY._serialized_start=2771 + _VOLUME_VOLUMECONTEXTENTRY._serialized_end=2823 + _TOPOLOGYREQUIREMENT._serialized_start=2825 + _TOPOLOGYREQUIREMENT._serialized_end=2920 + _TOPOLOGY._serialized_start=2922 + _TOPOLOGY._serialized_end=3031 + _TOPOLOGY_SEGMENTSENTRY._serialized_start=2984 + _TOPOLOGY_SEGMENTSENTRY._serialized_end=3031 + _DELETEVOLUMEREQUEST._serialized_start=3034 + _DELETEVOLUMEREQUEST._serialized_end=3186 + _DELETEVOLUMEREQUEST_SECRETSENTRY._serialized_start=1500 + _DELETEVOLUMEREQUEST_SECRETSENTRY._serialized_end=1546 + _DELETEVOLUMERESPONSE._serialized_start=3188 + _DELETEVOLUMERESPONSE._serialized_end=3210 + _CONTROLLERPUBLISHVOLUMEREQUEST._serialized_start=3213 + _CONTROLLERPUBLISHVOLUMEREQUEST._serialized_end=3612 + _CONTROLLERPUBLISHVOLUMEREQUEST_SECRETSENTRY._serialized_start=1500 + _CONTROLLERPUBLISHVOLUMEREQUEST_SECRETSENTRY._serialized_end=1546 + _CONTROLLERPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY._serialized_start=2771 + _CONTROLLERPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY._serialized_end=2823 + _CONTROLLERPUBLISHVOLUMERESPONSE._serialized_start=3615 + _CONTROLLERPUBLISHVOLUMERESPONSE._serialized_end=3789 + _CONTROLLERPUBLISHVOLUMERESPONSE_PUBLISHCONTEXTENTRY._serialized_start=3736 + _CONTROLLERPUBLISHVOLUMERESPONSE_PUBLISHCONTEXTENTRY._serialized_end=3789 + _CONTROLLERUNPUBLISHVOLUMEREQUEST._serialized_start=3792 + _CONTROLLERUNPUBLISHVOLUMEREQUEST._serialized_end=3987 + _CONTROLLERUNPUBLISHVOLUMEREQUEST_SECRETSENTRY._serialized_start=1500 + _CONTROLLERUNPUBLISHVOLUMEREQUEST_SECRETSENTRY._serialized_end=1546 + _CONTROLLERUNPUBLISHVOLUMERESPONSE._serialized_start=3989 + _CONTROLLERUNPUBLISHVOLUMERESPONSE._serialized_end=4024 + _VALIDATEVOLUMECAPABILITIESREQUEST._serialized_start=4027 + _VALIDATEVOLUMECAPABILITIESREQUEST._serialized_end=4689 + _VALIDATEVOLUMECAPABILITIESREQUEST_VOLUMECONTEXTENTRY._serialized_start=2771 + _VALIDATEVOLUMECAPABILITIESREQUEST_VOLUMECONTEXTENTRY._serialized_end=2823 + _VALIDATEVOLUMECAPABILITIESREQUEST_PARAMETERSENTRY._serialized_start=1449 + _VALIDATEVOLUMECAPABILITIESREQUEST_PARAMETERSENTRY._serialized_end=1498 + _VALIDATEVOLUMECAPABILITIESREQUEST_SECRETSENTRY._serialized_start=1500 + _VALIDATEVOLUMECAPABILITIESREQUEST_SECRETSENTRY._serialized_end=1546 + _VALIDATEVOLUMECAPABILITIESREQUEST_MUTABLEPARAMETERSENTRY._serialized_start=1548 + _VALIDATEVOLUMECAPABILITIESREQUEST_MUTABLEPARAMETERSENTRY._serialized_end=1604 + _VALIDATEVOLUMECAPABILITIESRESPONSE._serialized_start=4692 + _VALIDATEVOLUMECAPABILITIESRESPONSE._serialized_end=5347 + _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED._serialized_start=4821 + _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED._serialized_end=5347 + _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_VOLUMECONTEXTENTRY._serialized_start=2771 + _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_VOLUMECONTEXTENTRY._serialized_end=2823 + _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_PARAMETERSENTRY._serialized_start=1449 + _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_PARAMETERSENTRY._serialized_end=1498 + _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_MUTABLEPARAMETERSENTRY._serialized_start=1548 + _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_MUTABLEPARAMETERSENTRY._serialized_end=1604 + _LISTVOLUMESREQUEST._serialized_start=5349 + _LISTVOLUMESREQUEST._serialized_end=5414 + _LISTVOLUMESRESPONSE._serialized_start=5417 + _LISTVOLUMESRESPONSE._serialized_end=5709 + _LISTVOLUMESRESPONSE_VOLUMESTATUS._serialized_start=5512 + _LISTVOLUMESRESPONSE_VOLUMESTATUS._serialized_end=5610 + _LISTVOLUMESRESPONSE_ENTRY._serialized_start=5612 + _LISTVOLUMESRESPONSE_ENTRY._serialized_end=5709 + _CONTROLLERGETVOLUMEREQUEST._serialized_start=5711 + _CONTROLLERGETVOLUMEREQUEST._serialized_end=5763 + _CONTROLLERGETVOLUMERESPONSE._serialized_start=5766 + _CONTROLLERGETVOLUMERESPONSE._serialized_end=5993 + _CONTROLLERGETVOLUMERESPONSE_VOLUMESTATUS._serialized_start=5895 + _CONTROLLERGETVOLUMERESPONSE_VOLUMESTATUS._serialized_end=5988 + _CONTROLLERMODIFYVOLUMEREQUEST._serialized_start=5996 + _CONTROLLERMODIFYVOLUMEREQUEST._serialized_end=6321 + _CONTROLLERMODIFYVOLUMEREQUEST_SECRETSENTRY._serialized_start=1500 + _CONTROLLERMODIFYVOLUMEREQUEST_SECRETSENTRY._serialized_end=1546 + _CONTROLLERMODIFYVOLUMEREQUEST_MUTABLEPARAMETERSENTRY._serialized_start=1548 + _CONTROLLERMODIFYVOLUMEREQUEST_MUTABLEPARAMETERSENTRY._serialized_end=1604 + _CONTROLLERMODIFYVOLUMERESPONSE._serialized_start=6323 + _CONTROLLERMODIFYVOLUMERESPONSE._serialized_end=6360 + _GETCAPACITYREQUEST._serialized_start=6363 + _GETCAPACITYREQUEST._serialized_end=6600 + _GETCAPACITYREQUEST_PARAMETERSENTRY._serialized_start=1449 + _GETCAPACITYREQUEST_PARAMETERSENTRY._serialized_end=1498 + _GETCAPACITYRESPONSE._serialized_start=6603 + _GETCAPACITYRESPONSE._serialized_end=6773 + _CONTROLLERGETCAPABILITIESREQUEST._serialized_start=6775 + _CONTROLLERGETCAPABILITIESREQUEST._serialized_end=6809 + _CONTROLLERGETCAPABILITIESRESPONSE._serialized_start=6811 + _CONTROLLERGETCAPABILITIESRESPONSE._serialized_end=6905 + _CONTROLLERSERVICECAPABILITY._serialized_start=6908 + _CONTROLLERSERVICECAPABILITY._serialized_end=7431 + _CONTROLLERSERVICECAPABILITY_RPC._serialized_start=6996 + _CONTROLLERSERVICECAPABILITY_RPC._serialized_end=7423 + _CONTROLLERSERVICECAPABILITY_RPC_TYPE._serialized_start=7064 + _CONTROLLERSERVICECAPABILITY_RPC_TYPE._serialized_end=7423 + _CREATESNAPSHOTREQUEST._serialized_start=7434 + _CREATESNAPSHOTREQUEST._serialized_end=7729 + _CREATESNAPSHOTREQUEST_SECRETSENTRY._serialized_start=1500 + _CREATESNAPSHOTREQUEST_SECRETSENTRY._serialized_end=1546 + _CREATESNAPSHOTREQUEST_PARAMETERSENTRY._serialized_start=1449 + _CREATESNAPSHOTREQUEST_PARAMETERSENTRY._serialized_end=1498 + _CREATESNAPSHOTRESPONSE._serialized_start=7731 + _CREATESNAPSHOTRESPONSE._serialized_end=7791 + _SNAPSHOT._serialized_start=7794 + _SNAPSHOT._serialized_end=7976 + _DELETESNAPSHOTREQUEST._serialized_start=7979 + _DELETESNAPSHOTREQUEST._serialized_end=8137 + _DELETESNAPSHOTREQUEST_SECRETSENTRY._serialized_start=1500 + _DELETESNAPSHOTREQUEST_SECRETSENTRY._serialized_end=1546 + _DELETESNAPSHOTRESPONSE._serialized_start=8139 + _DELETESNAPSHOTRESPONSE._serialized_end=8163 + _LISTSNAPSHOTSREQUEST._serialized_start=8166 + _LISTSNAPSHOTSREQUEST._serialized_end=8393 + _LISTSNAPSHOTSREQUEST_SECRETSENTRY._serialized_start=1500 + _LISTSNAPSHOTSREQUEST_SECRETSENTRY._serialized_end=1546 + _LISTSNAPSHOTSRESPONSE._serialized_start=8396 + _LISTSNAPSHOTSRESPONSE._serialized_end=8538 + _LISTSNAPSHOTSRESPONSE_ENTRY._serialized_start=8495 + _LISTSNAPSHOTSRESPONSE_ENTRY._serialized_end=8538 + _CONTROLLEREXPANDVOLUMEREQUEST._serialized_start=8541 + _CONTROLLEREXPANDVOLUMEREQUEST._serialized_end=8813 + _CONTROLLEREXPANDVOLUMEREQUEST_SECRETSENTRY._serialized_start=1500 + _CONTROLLEREXPANDVOLUMEREQUEST_SECRETSENTRY._serialized_end=1546 + _CONTROLLEREXPANDVOLUMERESPONSE._serialized_start=8815 + _CONTROLLEREXPANDVOLUMERESPONSE._serialized_end=8904 + _NODESTAGEVOLUMEREQUEST._serialized_start=8907 + _NODESTAGEVOLUMEREQUEST._serialized_end=9408 + _NODESTAGEVOLUMEREQUEST_PUBLISHCONTEXTENTRY._serialized_start=3736 + _NODESTAGEVOLUMEREQUEST_PUBLISHCONTEXTENTRY._serialized_end=3789 + _NODESTAGEVOLUMEREQUEST_SECRETSENTRY._serialized_start=1500 + _NODESTAGEVOLUMEREQUEST_SECRETSENTRY._serialized_end=1546 + _NODESTAGEVOLUMEREQUEST_VOLUMECONTEXTENTRY._serialized_start=2771 + _NODESTAGEVOLUMEREQUEST_VOLUMECONTEXTENTRY._serialized_end=2823 + _NODESTAGEVOLUMERESPONSE._serialized_start=9410 + _NODESTAGEVOLUMERESPONSE._serialized_end=9435 + _NODEUNSTAGEVOLUMEREQUEST._serialized_start=9437 + _NODEUNSTAGEVOLUMEREQUEST._serialized_end=9511 + _NODEUNSTAGEVOLUMERESPONSE._serialized_start=9513 + _NODEUNSTAGEVOLUMERESPONSE._serialized_end=9540 + _NODEPUBLISHVOLUMEREQUEST._serialized_start=9543 + _NODEPUBLISHVOLUMEREQUEST._serialized_end=10091 + _NODEPUBLISHVOLUMEREQUEST_PUBLISHCONTEXTENTRY._serialized_start=3736 + _NODEPUBLISHVOLUMEREQUEST_PUBLISHCONTEXTENTRY._serialized_end=3789 + _NODEPUBLISHVOLUMEREQUEST_SECRETSENTRY._serialized_start=1500 + _NODEPUBLISHVOLUMEREQUEST_SECRETSENTRY._serialized_end=1546 + _NODEPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY._serialized_start=2771 + _NODEPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY._serialized_end=2823 + _NODEPUBLISHVOLUMERESPONSE._serialized_start=10093 + _NODEPUBLISHVOLUMERESPONSE._serialized_end=10120 + _NODEUNPUBLISHVOLUMEREQUEST._serialized_start=10122 + _NODEUNPUBLISHVOLUMEREQUEST._serialized_end=10190 + _NODEUNPUBLISHVOLUMERESPONSE._serialized_start=10192 + _NODEUNPUBLISHVOLUMERESPONSE._serialized_end=10221 + _NODEGETVOLUMESTATSREQUEST._serialized_start=10223 + _NODEGETVOLUMESTATSREQUEST._serialized_end=10319 + _NODEGETVOLUMESTATSRESPONSE._serialized_start=10321 + _NODEGETVOLUMESTATSRESPONSE._serialized_end=10441 + _VOLUMEUSAGE._serialized_start=10444 + _VOLUMEUSAGE._serialized_end=10589 + _VOLUMEUSAGE_UNIT._serialized_start=10547 + _VOLUMEUSAGE_UNIT._serialized_end=10589 + _VOLUMECONDITION._serialized_start=10591 + _VOLUMECONDITION._serialized_end=10648 + _NODEGETCAPABILITIESREQUEST._serialized_start=10650 + _NODEGETCAPABILITIESREQUEST._serialized_end=10678 + _NODEGETCAPABILITIESRESPONSE._serialized_start=10680 + _NODEGETCAPABILITIESRESPONSE._serialized_end=10762 + _NODESERVICECAPABILITY._serialized_start=10765 + _NODESERVICECAPABILITY._serialized_end=11083 + _NODESERVICECAPABILITY_RPC._serialized_start=10841 + _NODESERVICECAPABILITY_RPC._serialized_end=11075 + _NODESERVICECAPABILITY_RPC_TYPE._serialized_start=10903 + _NODESERVICECAPABILITY_RPC_TYPE._serialized_end=11075 + _NODEGETINFOREQUEST._serialized_start=11085 + _NODEGETINFOREQUEST._serialized_end=11105 + _NODEGETINFORESPONSE._serialized_start=11107 + _NODEGETINFORESPONSE._serialized_end=11222 + _NODEEXPANDVOLUMEREQUEST._serialized_start=11225 + _NODEEXPANDVOLUMEREQUEST._serialized_end=11538 + _NODEEXPANDVOLUMEREQUEST_SECRETSENTRY._serialized_start=1500 + _NODEEXPANDVOLUMEREQUEST_SECRETSENTRY._serialized_end=1546 + _NODEEXPANDVOLUMERESPONSE._serialized_start=11540 + _NODEEXPANDVOLUMERESPONSE._serialized_end=11590 + _GROUPCONTROLLERGETCAPABILITIESREQUEST._serialized_start=11592 + _GROUPCONTROLLERGETCAPABILITIESREQUEST._serialized_end=11636 + _GROUPCONTROLLERGETCAPABILITIESRESPONSE._serialized_start=11638 + _GROUPCONTROLLERGETCAPABILITIESRESPONSE._serialized_end=11747 + _GROUPCONTROLLERSERVICECAPABILITY._serialized_start=11750 + _GROUPCONTROLLERSERVICECAPABILITY._serialized_end=12002 + _GROUPCONTROLLERSERVICECAPABILITY_RPC._serialized_start=11848 + _GROUPCONTROLLERSERVICECAPABILITY_RPC._serialized_end=11989 + _GROUPCONTROLLERSERVICECAPABILITY_RPC_TYPE._serialized_start=11920 + _GROUPCONTROLLERSERVICECAPABILITY_RPC_TYPE._serialized_end=11989 + _CREATEVOLUMEGROUPSNAPSHOTREQUEST._serialized_start=12005 + _CREATEVOLUMEGROUPSNAPSHOTREQUEST._serialized_end=12339 + _CREATEVOLUMEGROUPSNAPSHOTREQUEST_SECRETSENTRY._serialized_start=1500 + _CREATEVOLUMEGROUPSNAPSHOTREQUEST_SECRETSENTRY._serialized_end=1546 + _CREATEVOLUMEGROUPSNAPSHOTREQUEST_PARAMETERSENTRY._serialized_start=1449 + _CREATEVOLUMEGROUPSNAPSHOTREQUEST_PARAMETERSENTRY._serialized_end=1498 + _CREATEVOLUMEGROUPSNAPSHOTRESPONSE._serialized_start=12341 + _CREATEVOLUMEGROUPSNAPSHOTRESPONSE._serialized_end=12434 + _VOLUMEGROUPSNAPSHOT._serialized_start=12437 + _VOLUMEGROUPSNAPSHOT._serialized_end=12600 + _DELETEVOLUMEGROUPSNAPSHOTREQUEST._serialized_start=12603 + _DELETEVOLUMEGROUPSNAPSHOTREQUEST._serialized_end=12816 + _DELETEVOLUMEGROUPSNAPSHOTREQUEST_SECRETSENTRY._serialized_start=1500 + _DELETEVOLUMEGROUPSNAPSHOTREQUEST_SECRETSENTRY._serialized_end=1546 + _DELETEVOLUMEGROUPSNAPSHOTRESPONSE._serialized_start=12818 + _DELETEVOLUMEGROUPSNAPSHOTRESPONSE._serialized_end=12858 + _GETVOLUMEGROUPSNAPSHOTREQUEST._serialized_start=12861 + _GETVOLUMEGROUPSNAPSHOTREQUEST._serialized_end=13068 + _GETVOLUMEGROUPSNAPSHOTREQUEST_SECRETSENTRY._serialized_start=1500 + _GETVOLUMEGROUPSNAPSHOTREQUEST_SECRETSENTRY._serialized_end=1546 + _GETVOLUMEGROUPSNAPSHOTRESPONSE._serialized_start=13070 + _GETVOLUMEGROUPSNAPSHOTRESPONSE._serialized_end=13160 + _IDENTITY._serialized_start=13163 + _IDENTITY._serialized_end=13413 + _CONTROLLER._serialized_start=13416 + _CONTROLLER._serialized_end=14755 + _GROUPCONTROLLER._serialized_start=14758 + _GROUPCONTROLLER._serialized_end=15260 + _NODE._serialized_start=15263 + _NODE._serialized_end=15993 +# @@protoc_insertion_point(module_scope) diff --git a/vast_csi/proto/csi_pb2_grpc.py b/vast_csi/proto/csi_pb2_grpc.py new file mode 100644 index 00000000..936814d9 --- /dev/null +++ b/vast_csi/proto/csi_pb2_grpc.py @@ -0,0 +1,1074 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from . import csi_pb2 as csi__pb2 + + +class IdentityStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetPluginInfo = channel.unary_unary( + '/csi.v1.Identity/GetPluginInfo', + request_serializer=csi__pb2.GetPluginInfoRequest.SerializeToString, + response_deserializer=csi__pb2.GetPluginInfoResponse.FromString, + ) + self.GetPluginCapabilities = channel.unary_unary( + '/csi.v1.Identity/GetPluginCapabilities', + request_serializer=csi__pb2.GetPluginCapabilitiesRequest.SerializeToString, + response_deserializer=csi__pb2.GetPluginCapabilitiesResponse.FromString, + ) + self.Probe = channel.unary_unary( + '/csi.v1.Identity/Probe', + request_serializer=csi__pb2.ProbeRequest.SerializeToString, + response_deserializer=csi__pb2.ProbeResponse.FromString, + ) + + +class IdentityServicer(object): + """Missing associated documentation comment in .proto file.""" + + def GetPluginInfo(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetPluginCapabilities(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Probe(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_IdentityServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetPluginInfo': grpc.unary_unary_rpc_method_handler( + servicer.GetPluginInfo, + request_deserializer=csi__pb2.GetPluginInfoRequest.FromString, + response_serializer=csi__pb2.GetPluginInfoResponse.SerializeToString, + ), + 'GetPluginCapabilities': grpc.unary_unary_rpc_method_handler( + servicer.GetPluginCapabilities, + request_deserializer=csi__pb2.GetPluginCapabilitiesRequest.FromString, + response_serializer=csi__pb2.GetPluginCapabilitiesResponse.SerializeToString, + ), + 'Probe': grpc.unary_unary_rpc_method_handler( + servicer.Probe, + request_deserializer=csi__pb2.ProbeRequest.FromString, + response_serializer=csi__pb2.ProbeResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'csi.v1.Identity', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class Identity(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def GetPluginInfo(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Identity/GetPluginInfo', + csi__pb2.GetPluginInfoRequest.SerializeToString, + csi__pb2.GetPluginInfoResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetPluginCapabilities(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Identity/GetPluginCapabilities', + csi__pb2.GetPluginCapabilitiesRequest.SerializeToString, + csi__pb2.GetPluginCapabilitiesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Probe(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Identity/Probe', + csi__pb2.ProbeRequest.SerializeToString, + csi__pb2.ProbeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + +class ControllerStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateVolume = channel.unary_unary( + '/csi.v1.Controller/CreateVolume', + request_serializer=csi__pb2.CreateVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.CreateVolumeResponse.FromString, + ) + self.DeleteVolume = channel.unary_unary( + '/csi.v1.Controller/DeleteVolume', + request_serializer=csi__pb2.DeleteVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.DeleteVolumeResponse.FromString, + ) + self.ControllerPublishVolume = channel.unary_unary( + '/csi.v1.Controller/ControllerPublishVolume', + request_serializer=csi__pb2.ControllerPublishVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.ControllerPublishVolumeResponse.FromString, + ) + self.ControllerUnpublishVolume = channel.unary_unary( + '/csi.v1.Controller/ControllerUnpublishVolume', + request_serializer=csi__pb2.ControllerUnpublishVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.ControllerUnpublishVolumeResponse.FromString, + ) + self.ValidateVolumeCapabilities = channel.unary_unary( + '/csi.v1.Controller/ValidateVolumeCapabilities', + request_serializer=csi__pb2.ValidateVolumeCapabilitiesRequest.SerializeToString, + response_deserializer=csi__pb2.ValidateVolumeCapabilitiesResponse.FromString, + ) + self.ListVolumes = channel.unary_unary( + '/csi.v1.Controller/ListVolumes', + request_serializer=csi__pb2.ListVolumesRequest.SerializeToString, + response_deserializer=csi__pb2.ListVolumesResponse.FromString, + ) + self.GetCapacity = channel.unary_unary( + '/csi.v1.Controller/GetCapacity', + request_serializer=csi__pb2.GetCapacityRequest.SerializeToString, + response_deserializer=csi__pb2.GetCapacityResponse.FromString, + ) + self.ControllerGetCapabilities = channel.unary_unary( + '/csi.v1.Controller/ControllerGetCapabilities', + request_serializer=csi__pb2.ControllerGetCapabilitiesRequest.SerializeToString, + response_deserializer=csi__pb2.ControllerGetCapabilitiesResponse.FromString, + ) + self.CreateSnapshot = channel.unary_unary( + '/csi.v1.Controller/CreateSnapshot', + request_serializer=csi__pb2.CreateSnapshotRequest.SerializeToString, + response_deserializer=csi__pb2.CreateSnapshotResponse.FromString, + ) + self.DeleteSnapshot = channel.unary_unary( + '/csi.v1.Controller/DeleteSnapshot', + request_serializer=csi__pb2.DeleteSnapshotRequest.SerializeToString, + response_deserializer=csi__pb2.DeleteSnapshotResponse.FromString, + ) + self.ListSnapshots = channel.unary_unary( + '/csi.v1.Controller/ListSnapshots', + request_serializer=csi__pb2.ListSnapshotsRequest.SerializeToString, + response_deserializer=csi__pb2.ListSnapshotsResponse.FromString, + ) + self.ControllerExpandVolume = channel.unary_unary( + '/csi.v1.Controller/ControllerExpandVolume', + request_serializer=csi__pb2.ControllerExpandVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.ControllerExpandVolumeResponse.FromString, + ) + self.ControllerGetVolume = channel.unary_unary( + '/csi.v1.Controller/ControllerGetVolume', + request_serializer=csi__pb2.ControllerGetVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.ControllerGetVolumeResponse.FromString, + ) + self.ControllerModifyVolume = channel.unary_unary( + '/csi.v1.Controller/ControllerModifyVolume', + request_serializer=csi__pb2.ControllerModifyVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.ControllerModifyVolumeResponse.FromString, + ) + + +class ControllerServicer(object): + """Missing associated documentation comment in .proto file.""" + + def CreateVolume(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteVolume(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ControllerPublishVolume(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ControllerUnpublishVolume(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ValidateVolumeCapabilities(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListVolumes(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetCapacity(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ControllerGetCapabilities(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CreateSnapshot(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteSnapshot(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListSnapshots(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ControllerExpandVolume(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ControllerGetVolume(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ControllerModifyVolume(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_ControllerServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateVolume': grpc.unary_unary_rpc_method_handler( + servicer.CreateVolume, + request_deserializer=csi__pb2.CreateVolumeRequest.FromString, + response_serializer=csi__pb2.CreateVolumeResponse.SerializeToString, + ), + 'DeleteVolume': grpc.unary_unary_rpc_method_handler( + servicer.DeleteVolume, + request_deserializer=csi__pb2.DeleteVolumeRequest.FromString, + response_serializer=csi__pb2.DeleteVolumeResponse.SerializeToString, + ), + 'ControllerPublishVolume': grpc.unary_unary_rpc_method_handler( + servicer.ControllerPublishVolume, + request_deserializer=csi__pb2.ControllerPublishVolumeRequest.FromString, + response_serializer=csi__pb2.ControllerPublishVolumeResponse.SerializeToString, + ), + 'ControllerUnpublishVolume': grpc.unary_unary_rpc_method_handler( + servicer.ControllerUnpublishVolume, + request_deserializer=csi__pb2.ControllerUnpublishVolumeRequest.FromString, + response_serializer=csi__pb2.ControllerUnpublishVolumeResponse.SerializeToString, + ), + 'ValidateVolumeCapabilities': grpc.unary_unary_rpc_method_handler( + servicer.ValidateVolumeCapabilities, + request_deserializer=csi__pb2.ValidateVolumeCapabilitiesRequest.FromString, + response_serializer=csi__pb2.ValidateVolumeCapabilitiesResponse.SerializeToString, + ), + 'ListVolumes': grpc.unary_unary_rpc_method_handler( + servicer.ListVolumes, + request_deserializer=csi__pb2.ListVolumesRequest.FromString, + response_serializer=csi__pb2.ListVolumesResponse.SerializeToString, + ), + 'GetCapacity': grpc.unary_unary_rpc_method_handler( + servicer.GetCapacity, + request_deserializer=csi__pb2.GetCapacityRequest.FromString, + response_serializer=csi__pb2.GetCapacityResponse.SerializeToString, + ), + 'ControllerGetCapabilities': grpc.unary_unary_rpc_method_handler( + servicer.ControllerGetCapabilities, + request_deserializer=csi__pb2.ControllerGetCapabilitiesRequest.FromString, + response_serializer=csi__pb2.ControllerGetCapabilitiesResponse.SerializeToString, + ), + 'CreateSnapshot': grpc.unary_unary_rpc_method_handler( + servicer.CreateSnapshot, + request_deserializer=csi__pb2.CreateSnapshotRequest.FromString, + response_serializer=csi__pb2.CreateSnapshotResponse.SerializeToString, + ), + 'DeleteSnapshot': grpc.unary_unary_rpc_method_handler( + servicer.DeleteSnapshot, + request_deserializer=csi__pb2.DeleteSnapshotRequest.FromString, + response_serializer=csi__pb2.DeleteSnapshotResponse.SerializeToString, + ), + 'ListSnapshots': grpc.unary_unary_rpc_method_handler( + servicer.ListSnapshots, + request_deserializer=csi__pb2.ListSnapshotsRequest.FromString, + response_serializer=csi__pb2.ListSnapshotsResponse.SerializeToString, + ), + 'ControllerExpandVolume': grpc.unary_unary_rpc_method_handler( + servicer.ControllerExpandVolume, + request_deserializer=csi__pb2.ControllerExpandVolumeRequest.FromString, + response_serializer=csi__pb2.ControllerExpandVolumeResponse.SerializeToString, + ), + 'ControllerGetVolume': grpc.unary_unary_rpc_method_handler( + servicer.ControllerGetVolume, + request_deserializer=csi__pb2.ControllerGetVolumeRequest.FromString, + response_serializer=csi__pb2.ControllerGetVolumeResponse.SerializeToString, + ), + 'ControllerModifyVolume': grpc.unary_unary_rpc_method_handler( + servicer.ControllerModifyVolume, + request_deserializer=csi__pb2.ControllerModifyVolumeRequest.FromString, + response_serializer=csi__pb2.ControllerModifyVolumeResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'csi.v1.Controller', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class Controller(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def CreateVolume(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Controller/CreateVolume', + csi__pb2.CreateVolumeRequest.SerializeToString, + csi__pb2.CreateVolumeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def DeleteVolume(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Controller/DeleteVolume', + csi__pb2.DeleteVolumeRequest.SerializeToString, + csi__pb2.DeleteVolumeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ControllerPublishVolume(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Controller/ControllerPublishVolume', + csi__pb2.ControllerPublishVolumeRequest.SerializeToString, + csi__pb2.ControllerPublishVolumeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ControllerUnpublishVolume(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Controller/ControllerUnpublishVolume', + csi__pb2.ControllerUnpublishVolumeRequest.SerializeToString, + csi__pb2.ControllerUnpublishVolumeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ValidateVolumeCapabilities(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Controller/ValidateVolumeCapabilities', + csi__pb2.ValidateVolumeCapabilitiesRequest.SerializeToString, + csi__pb2.ValidateVolumeCapabilitiesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ListVolumes(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Controller/ListVolumes', + csi__pb2.ListVolumesRequest.SerializeToString, + csi__pb2.ListVolumesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetCapacity(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Controller/GetCapacity', + csi__pb2.GetCapacityRequest.SerializeToString, + csi__pb2.GetCapacityResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ControllerGetCapabilities(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Controller/ControllerGetCapabilities', + csi__pb2.ControllerGetCapabilitiesRequest.SerializeToString, + csi__pb2.ControllerGetCapabilitiesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def CreateSnapshot(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Controller/CreateSnapshot', + csi__pb2.CreateSnapshotRequest.SerializeToString, + csi__pb2.CreateSnapshotResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def DeleteSnapshot(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Controller/DeleteSnapshot', + csi__pb2.DeleteSnapshotRequest.SerializeToString, + csi__pb2.DeleteSnapshotResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ListSnapshots(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Controller/ListSnapshots', + csi__pb2.ListSnapshotsRequest.SerializeToString, + csi__pb2.ListSnapshotsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ControllerExpandVolume(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Controller/ControllerExpandVolume', + csi__pb2.ControllerExpandVolumeRequest.SerializeToString, + csi__pb2.ControllerExpandVolumeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ControllerGetVolume(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Controller/ControllerGetVolume', + csi__pb2.ControllerGetVolumeRequest.SerializeToString, + csi__pb2.ControllerGetVolumeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ControllerModifyVolume(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Controller/ControllerModifyVolume', + csi__pb2.ControllerModifyVolumeRequest.SerializeToString, + csi__pb2.ControllerModifyVolumeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + +class GroupControllerStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GroupControllerGetCapabilities = channel.unary_unary( + '/csi.v1.GroupController/GroupControllerGetCapabilities', + request_serializer=csi__pb2.GroupControllerGetCapabilitiesRequest.SerializeToString, + response_deserializer=csi__pb2.GroupControllerGetCapabilitiesResponse.FromString, + ) + self.CreateVolumeGroupSnapshot = channel.unary_unary( + '/csi.v1.GroupController/CreateVolumeGroupSnapshot', + request_serializer=csi__pb2.CreateVolumeGroupSnapshotRequest.SerializeToString, + response_deserializer=csi__pb2.CreateVolumeGroupSnapshotResponse.FromString, + ) + self.DeleteVolumeGroupSnapshot = channel.unary_unary( + '/csi.v1.GroupController/DeleteVolumeGroupSnapshot', + request_serializer=csi__pb2.DeleteVolumeGroupSnapshotRequest.SerializeToString, + response_deserializer=csi__pb2.DeleteVolumeGroupSnapshotResponse.FromString, + ) + self.GetVolumeGroupSnapshot = channel.unary_unary( + '/csi.v1.GroupController/GetVolumeGroupSnapshot', + request_serializer=csi__pb2.GetVolumeGroupSnapshotRequest.SerializeToString, + response_deserializer=csi__pb2.GetVolumeGroupSnapshotResponse.FromString, + ) + + +class GroupControllerServicer(object): + """Missing associated documentation comment in .proto file.""" + + def GroupControllerGetCapabilities(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CreateVolumeGroupSnapshot(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteVolumeGroupSnapshot(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetVolumeGroupSnapshot(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_GroupControllerServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GroupControllerGetCapabilities': grpc.unary_unary_rpc_method_handler( + servicer.GroupControllerGetCapabilities, + request_deserializer=csi__pb2.GroupControllerGetCapabilitiesRequest.FromString, + response_serializer=csi__pb2.GroupControllerGetCapabilitiesResponse.SerializeToString, + ), + 'CreateVolumeGroupSnapshot': grpc.unary_unary_rpc_method_handler( + servicer.CreateVolumeGroupSnapshot, + request_deserializer=csi__pb2.CreateVolumeGroupSnapshotRequest.FromString, + response_serializer=csi__pb2.CreateVolumeGroupSnapshotResponse.SerializeToString, + ), + 'DeleteVolumeGroupSnapshot': grpc.unary_unary_rpc_method_handler( + servicer.DeleteVolumeGroupSnapshot, + request_deserializer=csi__pb2.DeleteVolumeGroupSnapshotRequest.FromString, + response_serializer=csi__pb2.DeleteVolumeGroupSnapshotResponse.SerializeToString, + ), + 'GetVolumeGroupSnapshot': grpc.unary_unary_rpc_method_handler( + servicer.GetVolumeGroupSnapshot, + request_deserializer=csi__pb2.GetVolumeGroupSnapshotRequest.FromString, + response_serializer=csi__pb2.GetVolumeGroupSnapshotResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'csi.v1.GroupController', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class GroupController(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def GroupControllerGetCapabilities(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.GroupController/GroupControllerGetCapabilities', + csi__pb2.GroupControllerGetCapabilitiesRequest.SerializeToString, + csi__pb2.GroupControllerGetCapabilitiesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def CreateVolumeGroupSnapshot(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.GroupController/CreateVolumeGroupSnapshot', + csi__pb2.CreateVolumeGroupSnapshotRequest.SerializeToString, + csi__pb2.CreateVolumeGroupSnapshotResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def DeleteVolumeGroupSnapshot(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.GroupController/DeleteVolumeGroupSnapshot', + csi__pb2.DeleteVolumeGroupSnapshotRequest.SerializeToString, + csi__pb2.DeleteVolumeGroupSnapshotResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetVolumeGroupSnapshot(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.GroupController/GetVolumeGroupSnapshot', + csi__pb2.GetVolumeGroupSnapshotRequest.SerializeToString, + csi__pb2.GetVolumeGroupSnapshotResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + +class NodeStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.NodeStageVolume = channel.unary_unary( + '/csi.v1.Node/NodeStageVolume', + request_serializer=csi__pb2.NodeStageVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.NodeStageVolumeResponse.FromString, + ) + self.NodeUnstageVolume = channel.unary_unary( + '/csi.v1.Node/NodeUnstageVolume', + request_serializer=csi__pb2.NodeUnstageVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.NodeUnstageVolumeResponse.FromString, + ) + self.NodePublishVolume = channel.unary_unary( + '/csi.v1.Node/NodePublishVolume', + request_serializer=csi__pb2.NodePublishVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.NodePublishVolumeResponse.FromString, + ) + self.NodeUnpublishVolume = channel.unary_unary( + '/csi.v1.Node/NodeUnpublishVolume', + request_serializer=csi__pb2.NodeUnpublishVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.NodeUnpublishVolumeResponse.FromString, + ) + self.NodeGetVolumeStats = channel.unary_unary( + '/csi.v1.Node/NodeGetVolumeStats', + request_serializer=csi__pb2.NodeGetVolumeStatsRequest.SerializeToString, + response_deserializer=csi__pb2.NodeGetVolumeStatsResponse.FromString, + ) + self.NodeExpandVolume = channel.unary_unary( + '/csi.v1.Node/NodeExpandVolume', + request_serializer=csi__pb2.NodeExpandVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.NodeExpandVolumeResponse.FromString, + ) + self.NodeGetCapabilities = channel.unary_unary( + '/csi.v1.Node/NodeGetCapabilities', + request_serializer=csi__pb2.NodeGetCapabilitiesRequest.SerializeToString, + response_deserializer=csi__pb2.NodeGetCapabilitiesResponse.FromString, + ) + self.NodeGetInfo = channel.unary_unary( + '/csi.v1.Node/NodeGetInfo', + request_serializer=csi__pb2.NodeGetInfoRequest.SerializeToString, + response_deserializer=csi__pb2.NodeGetInfoResponse.FromString, + ) + + +class NodeServicer(object): + """Missing associated documentation comment in .proto file.""" + + def NodeStageVolume(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def NodeUnstageVolume(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def NodePublishVolume(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def NodeUnpublishVolume(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def NodeGetVolumeStats(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def NodeExpandVolume(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def NodeGetCapabilities(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def NodeGetInfo(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_NodeServicer_to_server(servicer, server): + rpc_method_handlers = { + 'NodeStageVolume': grpc.unary_unary_rpc_method_handler( + servicer.NodeStageVolume, + request_deserializer=csi__pb2.NodeStageVolumeRequest.FromString, + response_serializer=csi__pb2.NodeStageVolumeResponse.SerializeToString, + ), + 'NodeUnstageVolume': grpc.unary_unary_rpc_method_handler( + servicer.NodeUnstageVolume, + request_deserializer=csi__pb2.NodeUnstageVolumeRequest.FromString, + response_serializer=csi__pb2.NodeUnstageVolumeResponse.SerializeToString, + ), + 'NodePublishVolume': grpc.unary_unary_rpc_method_handler( + servicer.NodePublishVolume, + request_deserializer=csi__pb2.NodePublishVolumeRequest.FromString, + response_serializer=csi__pb2.NodePublishVolumeResponse.SerializeToString, + ), + 'NodeUnpublishVolume': grpc.unary_unary_rpc_method_handler( + servicer.NodeUnpublishVolume, + request_deserializer=csi__pb2.NodeUnpublishVolumeRequest.FromString, + response_serializer=csi__pb2.NodeUnpublishVolumeResponse.SerializeToString, + ), + 'NodeGetVolumeStats': grpc.unary_unary_rpc_method_handler( + servicer.NodeGetVolumeStats, + request_deserializer=csi__pb2.NodeGetVolumeStatsRequest.FromString, + response_serializer=csi__pb2.NodeGetVolumeStatsResponse.SerializeToString, + ), + 'NodeExpandVolume': grpc.unary_unary_rpc_method_handler( + servicer.NodeExpandVolume, + request_deserializer=csi__pb2.NodeExpandVolumeRequest.FromString, + response_serializer=csi__pb2.NodeExpandVolumeResponse.SerializeToString, + ), + 'NodeGetCapabilities': grpc.unary_unary_rpc_method_handler( + servicer.NodeGetCapabilities, + request_deserializer=csi__pb2.NodeGetCapabilitiesRequest.FromString, + response_serializer=csi__pb2.NodeGetCapabilitiesResponse.SerializeToString, + ), + 'NodeGetInfo': grpc.unary_unary_rpc_method_handler( + servicer.NodeGetInfo, + request_deserializer=csi__pb2.NodeGetInfoRequest.FromString, + response_serializer=csi__pb2.NodeGetInfoResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'csi.v1.Node', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class Node(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def NodeStageVolume(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Node/NodeStageVolume', + csi__pb2.NodeStageVolumeRequest.SerializeToString, + csi__pb2.NodeStageVolumeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def NodeUnstageVolume(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Node/NodeUnstageVolume', + csi__pb2.NodeUnstageVolumeRequest.SerializeToString, + csi__pb2.NodeUnstageVolumeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def NodePublishVolume(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Node/NodePublishVolume', + csi__pb2.NodePublishVolumeRequest.SerializeToString, + csi__pb2.NodePublishVolumeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def NodeUnpublishVolume(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Node/NodeUnpublishVolume', + csi__pb2.NodeUnpublishVolumeRequest.SerializeToString, + csi__pb2.NodeUnpublishVolumeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def NodeGetVolumeStats(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Node/NodeGetVolumeStats', + csi__pb2.NodeGetVolumeStatsRequest.SerializeToString, + csi__pb2.NodeGetVolumeStatsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def NodeExpandVolume(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Node/NodeExpandVolume', + csi__pb2.NodeExpandVolumeRequest.SerializeToString, + csi__pb2.NodeExpandVolumeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def NodeGetCapabilities(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Node/NodeGetCapabilities', + csi__pb2.NodeGetCapabilitiesRequest.SerializeToString, + csi__pb2.NodeGetCapabilitiesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def NodeGetInfo(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/csi.v1.Node/NodeGetInfo', + csi__pb2.NodeGetInfoRequest.SerializeToString, + csi__pb2.NodeGetInfoResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/vast_csi/quantity.py b/vast_csi/quantity.py new file mode 100644 index 00000000..bbd3596c --- /dev/null +++ b/vast_csi/quantity.py @@ -0,0 +1,73 @@ +# from https://github.com/kubernetes-client/python/blob/master/kubernetes/utils/quantity.py + +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from decimal import Decimal, InvalidOperation + + +def parse_quantity(quantity): + """ + Parse kubernetes canonical form quantity like 200Mi to a decimal number. + Supported SI suffixes: + base1024: Ki | Mi | Gi | Ti | Pi | Ei + base1000: n | u | m | "" | k | M | G | T | P | E + See https://github.com/kubernetes/apimachinery/blob/master/pkg/api/resource/quantity.go + Input: + quantity: string. kubernetes canonical form quantity + Returns: + Decimal + Raises: + ValueError on invalid or unknown input + """ + if isinstance(quantity, (int, float, Decimal)): + return Decimal(quantity) + + exponents = {"n": -3, "u": -2, "m": -1, "K": 1, "k": 1, "M": 2, + "G": 3, "T": 4, "P": 5, "E": 6} + + quantity = str(quantity) + number = quantity + suffix = None + if len(quantity) >= 2 and quantity[-1] == "i": + if quantity[-2] in exponents: + number = quantity[:-2] + suffix = quantity[-2:] + elif len(quantity) >= 1 and quantity[-1] in exponents: + number = quantity[:-1] + suffix = quantity[-1:] + + try: + number = Decimal(number) + except InvalidOperation: + raise ValueError("Invalid number format: {}".format(number)) + + if suffix is None: + return number + + if suffix.endswith("i"): + base = 1024 + elif len(suffix) == 1: + base = 1000 + else: + raise ValueError("{} has unknown suffix".format(quantity)) + + # handly SI inconsistency + if suffix == "ki": + raise ValueError("{} has unknown suffix".format(quantity)) + + if suffix[0] not in exponents: + raise ValueError("{} has unknown suffix".format(quantity)) + + exponent = Decimal(exponents[suffix[0]]) + return number * (base ** exponent) diff --git a/vast_csi/server.py b/vast_csi/server.py index 22288b24..3c855446 100644 --- a/vast_csi/server.py +++ b/vast_csi/server.py @@ -14,84 +14,68 @@ """The Python implementation of the GRPC helloworld.Greeter server.""" import os -import socket +import uuid +from random import randint from concurrent import futures from functools import wraps from pprint import pformat +from datetime import datetime import inspect -from uuid import uuid4 +from tempfile import mkdtemp +import json +from json import JSONDecodeError +from plumbum import cmd from plumbum import local, ProcessExecutionError -from plumbum.typed_env import TypedEnv import grpc +from requests.exceptions import HTTPError -from easypy.tokens import Token, ROUNDROBIN, RANDOM, CONTROLLER_AND_NODE, CONTROLLER, NODE -from easypy.misc import kwargs_resilient, at_least +from easypy.tokens import CONTROLLER_AND_NODE, CONTROLLER, NODE, COSI_PLUGIN +from easypy.misc import kwargs_resilient from easypy.caching import cached_property -from easypy.collections import shuffled +from easypy.bunch import Bunch from easypy.exceptions import TException - -from . logging import logger, init_logging -from . utils import patch_traceback_format, RESTSession, ApiError, get_mount -from . import csi_pb2_grpc -from .csi_pb2_grpc import ControllerServicer, NodeServicer, IdentityServicer +from easypy.collections import separate +from easypy.humanize import yesno_to_bool + +from .logging import logger, init_logging +from .utils import ( + patch_traceback_format, + get_mount, + normalize_mount_options, + string_to_proto_timestamp, +) +from .proto import csi_pb2_grpc as csi_grpc +from .proto import cosi_pb2_grpc as cosi_grpc from . import csi_types as types - - -LOAD_BALANCING_STRATEGIES = {ROUNDROBIN, RANDOM} - - -class Config(TypedEnv): - - class Path(TypedEnv.Str): - convert = staticmethod(local.path) - - plugin_name, plugin_version, git_commit = open("version.info").read().strip().split() - - controller_root_mount = Path("X_CSI_CTRL_ROOT_MOUNT", default=local.path("/csi-volumes")) - mock_vast = TypedEnv.Bool("X_CSI_MOCK_VAST", default=False) - nfs_server = TypedEnv.Str("X_CSI_NFS_SERVER", default="127.0.0.1") - root_export = Path("X_CSI_NFS_EXPORT", default=local.path("/k8s")) - log_level = TypedEnv.Str("X_CSI_LOG_LEVEL", default="info") - csi_sanity_test = TypedEnv.Bool("X_CSI_SANITY_TEST", default=False) - node_id = TypedEnv.Str("X_CSI_NODE_ID", default=socket.getfqdn()) - - vms_host = TypedEnv.Str("X_CSI_VMS_HOST", default="vast") - vip_pool_name = TypedEnv.Str("X_CSI_VIP_POOL_NAME", default="k8s") - vms_user = TypedEnv.Str("X_CSI_VMS_USER", default="admin") - vms_password = TypedEnv.Str("X_CSI_VMS_PASSWORD", default="admin") - ssl_verify = TypedEnv.Bool("X_CSI_DISABLE_VMS_SSL_VERIFICATION", default=False) - volume_name_fmt = TypedEnv.Str("X_CSI_VOLUME_NAME_FMT", default="csi:{namespace}:{name}:{id}") - - unmount_attempts = TypedEnv.Int("X_CSI_UNMOUNT_ATTEMPTS", default=10) - - _mount_options = TypedEnv.Str("X_CSI_MOUNT_OPTIONS", default="") # For example: "port=2049,nolock,vers=3" - - @property - def mount_options(self): - s = self._mount_options.strip() - return list({p for p in s.split(',') if p}) - - _load_balancing = TypedEnv.Str("X_CSI_LB_STRATEGY", default="roundrobin") - _mode = TypedEnv.Str("CSI_MODE", default="controller_and_node") - _endpoint = TypedEnv.Str("CSI_ENDPOINT", default='unix:///var/run/csi.sock') - - @property - def load_balancing(self): - lb = Token(self._load_balancing.upper()) - if lb not in LOAD_BALANCING_STRATEGIES: - raise Exception(f"invalid load balancing strategy: {lb} (use {'|'.join(LOAD_BALANCING_STRATEGIES)})") - return lb - - @property - def mode(self): - mode = Token(self._mode.upper()) - assert mode in {CONTROLLER_AND_NODE, CONTROLLER, NODE}, f"invalid mode: {mode}" - return mode - - @property - def endpoint(self): - return self._endpoint.strip("tcp://") +from .csi_types import ( + FAILED_PRECONDITION, + INVALID_ARGUMENT, + ALREADY_EXISTS, + NOT_FOUND, + ABORTED, + UNKNOWN, + OUT_OF_RANGE, +) +from .volume_builder import ( + EmptyVolumeBuilder, + VolumeFromSnapshotBuilder, + VolumeFromVolumeBuilder, + TestVolumeBuilder, + StaticVolumeBuilder, +) +from .exceptions import ( + Abort, + ApiError, + MissingParameter, + MountFailed, + VolumeAlreadyExists, + SourceNotFound, + OperationNotSupported, + LookupFieldError, +) +from .vms_session import get_vms_session, VmsSession +from .configuration import Config CONF = None @@ -103,41 +87,27 @@ def endpoint(self): ################################################################ -FAILED_PRECONDITION = grpc.StatusCode.FAILED_PRECONDITION -INVALID_ARGUMENT = grpc.StatusCode.INVALID_ARGUMENT -ALREADY_EXISTS = grpc.StatusCode.ALREADY_EXISTS -NOT_FOUND = grpc.StatusCode.NOT_FOUND -ABORTED = grpc.StatusCode.ABORTED -UNKNOWN = grpc.StatusCode.UNKNOWN -OUT_OF_RANGE = grpc.StatusCode.OUT_OF_RANGE - SUPPORTED_ACCESS = [ types.AccessModeType.SINGLE_NODE_WRITER, - # types.AccessModeType.SINGLE_NODE_READER_ONLY, - # types.AccessModeType.MULTI_NODE_READER_ONLY, + types.AccessModeType.SINGLE_NODE_READER_ONLY, + types.AccessModeType.MULTI_NODE_READER_ONLY, # types.AccessModeType.MULTI_NODE_SINGLE_WRITER, types.AccessModeType.MULTI_NODE_MULTI_WRITER, ] -class MountFailed(TException): - template = "Mounting {src} failed" - - def mount(src, tgt, flags=""): - cmd = local.cmd.mount - flags = flags.split(",") - flags += CONF.mount_options + executable = cmd.mount + flags = [f.strip() for f in flags.split(",")] if CONF.mock_vast: flags += "port=2049,nolock,vers=3".split(",") + flags = list(filter(None, flags)) if flags: - cmd = cmd["-o", ",".join(flags)] + executable = executable["-o", ",".join(flags)] try: - cmd[src, tgt] & logger.pipe_info("mount >>") + executable['-v', src, tgt] & logger.pipe_info("mount >>") except ProcessExecutionError as exc: - if exc.retcode == 32: - raise MountFailed(detail=exc.stderr, src=src, tgt=tgt) - raise + raise MountFailed(detail=exc.stderr, src=src, tgt=tgt, mount_options=flags) def _validate_capabilities(capabilities): @@ -145,30 +115,21 @@ def _validate_capabilities(capabilities): if capability.access_mode.mode not in SUPPORTED_ACCESS: raise Abort( INVALID_ARGUMENT, - f'Unsupported access mode: {capability.access_mode.mode} (use {SUPPORTED_ACCESS})') - - if not capability.HasField('mount'): + f"Unsupported access mode: {capability.access_mode.mode} (use {SUPPORTED_ACCESS})", + ) + if capability.HasField("block"): + raise Abort(INVALID_ARGUMENT, "Block access type is not supported") + if not capability.HasField("mount"): pass elif not capability.mount.fs_type: pass elif capability.mount.fs_type != "ext4": raise Abort( INVALID_ARGUMENT, - f'Unsupported file system type: {capability.mount.fs_type}') - + f"Unsupported file system type: {capability.mount.fs_type}", + ) -class Abort(Exception): - - @property - def code(self): - return self.args[0] - - @property - def message(self): - return self.args[1] - - -class Instrumented(): +class Instrumented: SILENCED = ["Probe", "NodeGetCapabilities"] @@ -179,8 +140,10 @@ def logged(cls, func): log = logger.debug if (method in cls.SILENCED) else logger.info parameters = inspect.signature(func).parameters - required_params = { - name for name, p in parameters.items() if p.default is p.empty} + required_params, non_required_params = map( + set, separate(parameters, key=lambda k: parameters[k].default is inspect._empty) + ) + vms_session_args = inspect.signature(get_vms_session).parameters.keys() required_params.discard("self") func = kwargs_resilient(func) @@ -189,35 +152,65 @@ def logged(cls, func): def wrapper(self, request, context): peer = context.peer() params = {fld.name: value for fld, value in request.ListFields()} - missing = required_params - {"request", "context"} - set(params) + # secrets are not logged and not the part of function signature. + secrets = params.pop("secrets", {}) + missing_params = required_params - {"request", "context", "vms_session"} - set(params) log(f"{peer} >>> {method}:") if params: for line in pformat(params).splitlines(): - log(f" {line}") + log(f"({method}) {line}") + + if "vms_session" in required_params: + # If secret exist and method signature requires `vms_session` + # then `vms_session` with secret will be injected into function parameters + params["vms_session"] = get_vms_session(**{k: secrets.get(k) for k in vms_session_args}) + elif "vms_session" in non_required_params: + # Try to take vms_session from secret. Set None on error. + try: + params["vms_session"] = get_vms_session(**{k: secrets.get(k) for k in vms_session_args}) + except LookupFieldError: + params["vms_session"] = None try: - if missing: - msg = f'Missing required fields: {", ".join(sorted(missing))}' + if missing_params: + msg = f'Missing required fields: {", ".join(sorted(missing_params))}' logger.error(f"{peer} <<< {method}: {msg}") raise Abort(INVALID_ARGUMENT, msg) ret = func(self, request=request, context=context, **params) except Abort as exc: - logger.info(f'{peer} <<< {method} ABORTED with {exc.code} ("{exc.message}")') + logger.info( + f'{peer} <<< {method} ABORTED with {exc.code} ("{exc.message}")' + ) logger.debug("Traceback", exc_info=True) context.abort(exc.code, exc.message) + except HTTPError as exc: + reason = exc.response.reason + status_code = exc.response.status_code + text = exc.response.text.splitlines()[0] + resource = exc.request.path_url + logger.exception(f"Exception during {method}\n{exc.response.text}") + context.abort( + UNKNOWN, + f"[{method}]. Unable to accomplish request to {resource}. {text}, <{reason}({status_code})>" + ) + except TException as exc: + # Any exception inherited from TException + logger.exception(f"Exception during {method}") + context.abort(UNKNOWN, f"[{method}]. {exc.render(color=False)}") except Exception as exc: - err_key = f"<{uuid4()}>" - logger.exception(f"Exception during {method} ({err_key}): {type(exc)}") - context.abort(UNKNOWN, f"Exception during {method}: {err_key}") + logger.exception(f"Exception during {method}") + text = str(exc) + context.abort(UNKNOWN, f"[{method}]: {text}") if ret: log(f"{peer} <<< {method}:") for line in pformat(ret).splitlines(): log(f" {line}") log(f"{peer} --- {method}: Done") return ret + return wrapper @classmethod @@ -237,8 +230,7 @@ def __init_subclass__(cls): ################################################################ -class Identity(IdentityServicer, Instrumented): - +class CsiIdentity(csi_grpc.IdentityServicer, Instrumented): def __init__(self): self.capabilities = [] self.controller = None @@ -254,21 +246,13 @@ def GetPluginCapabilities(self, request, context): return types.CapabilitiesResp( capabilities=[ types.Capability(service=types.Service(type=cap)) - for cap in self.capabilities]) + for cap in self.capabilities + ] + ) def Probe(self, request, context): - if self.node: - return types.ProbeRespOK - elif CONF.mock_vast: - return types.ProbeRespOK - elif self.controller: - try: - self.controller.get_vip() - except ApiError as exc: - raise Abort(FAILED_PRECONDITION, str(exc)) - return types.ProbeRespOK - else: - return types.ProbeRespNotReady + return types.ProbeRespOK + ################################################################ @@ -278,86 +262,39 @@ def Probe(self, request, context): ################################################################ -class Controller(ControllerServicer, Instrumented): +class CsiController(csi_grpc.ControllerServicer, Instrumented): CAPABILITIES = [ types.CtrlCapabilityType.CREATE_DELETE_VOLUME, types.CtrlCapabilityType.PUBLISH_UNPUBLISH_VOLUME, - types.CtrlCapabilityType.LIST_VOLUMES, + # types.CtrlCapabilityType.LIST_VOLUMES, types.CtrlCapabilityType.EXPAND_VOLUME, - # types.CtrlCapabilityType.GET_CAPACITY, - # types.CtrlCapabilityType.CREATE_DELETE_SNAPSHOT, + types.CtrlCapabilityType.CREATE_DELETE_SNAPSHOT, # types.CtrlCapabilityType.LIST_SNAPSHOTS, - # types.CtrlCapabilityType.CLONE_VOLUME, + types.CtrlCapabilityType.CLONE_VOLUME, + # types.CtrlCapabilityType.GET_CAPACITY, # types.CtrlCapabilityType.PUBLISH_READONLY, ] - mock_db = local.path("/tmp/") - - @cached_property - def vms_session(self): - auth = CONF.vms_user, CONF.vms_password - return RESTSession( - base_url=f"https://{CONF.vms_host}/api", - auth=auth, ssl_verify=CONF.ssl_verify) - - _vip_round_robin_idx = -1 - - def get_vip(self): - if CONF.mock_vast: - return CONF.nfs_server - - vips = [vip for vip in self.vms_session.vips() if vip.vippool == CONF.vip_pool_name] - if not vips: - raise Exception(f"No vips in pool {CONF.vip_pool_name}") - - if CONF.load_balancing == ROUNDROBIN: - self._vip_round_robin_idx = (self._vip_round_robin_idx + 1) % len(vips) - vip = vips[self._vip_round_robin_idx] - elif CONF.load_balancing == RANDOM: - vip = shuffled(vips)[0] - else: - raise Exception(f"Invalid load_balancing mode: '{CONF.load_balancing}'") - - logger.info(f"Using {CONF.load_balancing} - chose {vip.title}, currently connected to {vip.cnode}") - return vip.ip - - def get_quota(self, volume_id): - quotas = self.vms_session.quotas(path__contains=str(CONF.root_export[volume_id])) - if not quotas: - return - elif len(quotas) > 1: - names = ", ".join(sorted(q.name for q in quotas)) - raise Exception(f"Too many quotas on {volume_id}: {names}") - else: - return quotas[0] - - @cached_property - def root_mount(self): - target_path = CONF.controller_root_mount - if not target_path.exists(): - target_path.mkdir() - target_path["NOT_MOUNTED"].touch() - logger.info(f"created successfully: {target_path}") - - if target_path["NOT_MOUNTED"].exists(): - nfs_server = self.get_vip() - mount_spec = f"{nfs_server}:{CONF.root_export}" - mount(mount_spec, target_path) - logger.info(f"mounted successfully: {target_path}") - - return target_path - def ControllerGetCapabilities(self): - return types.CtrlCapabilityResp(capabilities=[ - types.CtrlCapability(rpc=types.CtrlCapability.RPC(type=rpc)) - for rpc in self.CAPABILITIES]) - - def ValidateVolumeCapabilities(self, context, volume_id, volume_capabilities, volume_context=None, parameters=None): - vol = self.root_mount[volume_id] - if not vol.exists(): - raise Abort(NOT_FOUND, f'Volume {volume_id} does not exist') + return types.CtrlCapabilityResp( + capabilities=[ + types.CtrlCapability(rpc=types.CtrlCapability.RPC(type=rpc)) + for rpc in self.CAPABILITIES + ] + ) + def ValidateVolumeCapabilities( + self, + vms_session, + context, + volume_id, + volume_capabilities, + volume_context=None, + parameters=None, + ): + if not vms_session.get_quota(volume_id): + raise Abort(NOT_FOUND, f"Volume {volume_id} does not exist") try: _validate_capabilities(volume_capabilities) except Abort as exc: @@ -366,184 +303,251 @@ def ValidateVolumeCapabilities(self, context, volume_id, volume_capabilities, vo confirmed = types.ValidateResp.Confirmed( volume_context=volume_context, volume_capabilities=volume_capabilities, - parameters=parameters) + parameters=parameters, + ) return types.ValidateResp(confirmed=confirmed) - def ListVolumes(self, starting_token=None, max_entries=None): - - if starting_token: - try: - starting_inode = int(starting_token) - except ValueError: - raise Abort(ABORTED, "Invalid starting_token") - else: - starting_inode = 0 - - fields = {'entries': []} - - vols = (d for d in os.scandir(self.root_mount) if d.is_dir()) - vols = sorted(vols, key=lambda d: d.inode()) - if not vols: - logger.info(f"No volumes in {self.root_mount}") - return types.ListResp(**fields) - - logger.info(f"Got {len(vols)} volumes in {self.root_mount}") - start_idx = 0 - - logger.info(f"Skipping to {starting_inode}") - for start_idx, d in enumerate(vols): - if d.inode() > starting_inode: - break - - del vols[:start_idx] - - remain = 0 - if max_entries: - remain = at_least(0, len(vols) - max_entries) - vols = vols[:max_entries] - - if remain: - fields['next_token'] = str(vols[-1].inode()) - - fields['entries'] = [types.ListResp.Entry( - volume=self._to_volume(vol.name)) - for vol in vols] - - return types.ListResp(**fields) - - def _to_volume(self, vol_id): - vol_dir = self.root_mount[vol_id] - logger.info(f"{vol_dir}") - if not vol_dir.is_dir(): - logger.info(f"{vol_dir} is not dir") - return - with self.mock_db[vol_id].open("rb") as f: - vol = types.Volume() - vol.ParseFromString(f.read()) - return vol - - def CreateVolume(self, name, volume_capabilities, capacity_range=None, parameters=None): + def CreateVolume( + self, + vms_session, + name, + volume_capabilities, + capacity_range=None, + parameters=None, + volume_content_source=None, + ephemeral_volume_name=None, + ): _validate_capabilities(volume_capabilities) + parameters = parameters or dict() - volume_id = name - volume_name = f"csi-{volume_id}" - if parameters: - pvc_name = parameters.get("csi.storage.k8s.io/pvc/name") - pvc_namespace = parameters.get("csi.storage.k8s.io/pvc/namespace") - if pvc_namespace and pvc_name: - volume_name = CONF.volume_name_fmt.format(namespace=pvc_namespace, name=pvc_name, id=volume_id) - volume_name = volume_name[:64] # crop to Vast's max-length - - requested_capacity = capacity_range.required_bytes if capacity_range else 0 - volume_context = {} - + # Take appropriate builder for volume, snapshot or test builder if CONF.mock_vast: - volume = self._to_volume(volume_id) - if volume: - existing_capacity = volume.capacity_bytes - if existing_capacity != requested_capacity: - raise Abort( - ALREADY_EXISTS, - "Volume already exists with different capacity than requested" - f"({existing_capacity})") - vol_dir = self.root_mount[volume_id] - vol_dir.mkdir(exist_ok=True) - + builder_cls = TestVolumeBuilder else: - if quota := self.get_quota(volume_id): - existing_capacity = quota.hard_limit - if existing_capacity and existing_capacity != requested_capacity: - raise Abort( - ALREADY_EXISTS, - "Volume already exists with different capacity than requested" - f"({existing_capacity})") + if not volume_content_source: + builder_cls = EmptyVolumeBuilder + + elif volume_content_source.snapshot.snapshot_id: + builder_cls = VolumeFromSnapshotBuilder + + elif volume_content_source.volume.volume_id: + builder_cls = VolumeFromVolumeBuilder else: - data = dict( - create_dir=True, - name=volume_name, - path=str(CONF.root_export[volume_id]), + raise ValueError( + "Invalid condition. Either volume_content_source" + " or test environment variable should be provided" ) - if requested_capacity: - data.update(hard_limit=requested_capacity) - quota = self.vms_session.post("quotas", data=data) - - volume_context.update(quota_id=quota.id) - volume = types.Volume( - capacity_bytes=requested_capacity, volume_id=volume_id, - volume_context={k: str(v) for k, v in volume_context.items()}) + builder = builder_cls.from_parameters( + conf=CONF, + vms_session=vms_session, + name=name, + volume_capabilities=volume_capabilities, + capacity_range=capacity_range, + parameters=parameters, + volume_content_source=volume_content_source, + ephemeral_volume_name=ephemeral_volume_name + ) + # Create volume, volume from snapshot or mount local path (for testing purposes) + try: + volume = builder.build_volume() + except SourceNotFound as exc: + raise Abort(NOT_FOUND, exc.message) + except VolumeAlreadyExists as exc: + raise Abort(ALREADY_EXISTS, exc.message) + return types.CreateResp(volume=volume) - if CONF.mock_vast: - with self.mock_db[volume_id].open("wb") as f: - f.write(volume.SerializeToString()) + def _delete_data_from_storage(self, vms_session, path, tenant_id): + if CONF.avoid_trash_api.expired: + try: + logger.info(f"Attempting trash API to delete {path}") + vms_session.delete_folder(path, tenant_id) + return # Successfully deleted. Prevent using local mounting + except OperationNotSupported as exc: + logger.info(f"Trash API not available {exc}") + CONF.avoid_trash_api.reset() + + logger.info(f"Use local mounting to delete {path}") + path = local.path(path) + volume_id = path.name + assert CONF.deletion_view_policy, ( + "Ensure that deletionViewPolicy is properly " + "configured in your Helm configuration to perform local volume deletion." + ) + view_policy = vms_session.get_view_policy(policy_name=CONF.deletion_view_policy) + assert tenant_id == view_policy.tenant_id, ( + f"Volume and deletionViewPolicy must be in the same tenant. " + f"Make sure deletionViewPolicy belongs to tenant {tenant_id} or use Trash API for deletion." + ) + if CONF.use_local_ip_for_mount: + nfs_server_ip = CONF.use_local_ip_for_mount + else: + assert CONF.deletion_vip_pool, ( + "Ensure that deletionVipPool is properly " + "configured in your Helm configuration to perform local volume deletion." + ) + nfs_server_ip = vms_session.get_vip(CONF.deletion_vip_pool, view_policy.tenant_id) + + logger.info(f"Creating temporary base view.") + with vms_session.temp_view(path.dirname, view_policy.id, view_policy.tenant_id) as base_view: + mount_spec = f"{nfs_server_ip}:{base_view.alias}" + mounted = False + tmpdir = local.path(mkdtemp()) # convert string to local.path + tmpdir['.csi-unmounted'].touch() - return types.CreateResp(volume=volume) + try: + mount(mount_spec, tmpdir, flags=",".join(CONF.mount_options)) + assert not tmpdir['.csi-unmounted'].exists() + mounted = True + + if tmpdir[volume_id].exists(): + logger.info(f"deleting {tmpdir[volume_id]}") + tmpdir[volume_id].delete() + logger.info(f"done deleting {tmpdir[volume_id]}") + else: + logger.info(f"already deleted {tmpdir[volume_id]}") + except FileNotFoundError as exc: + if 'No such file or directory' in str(exc): + logger.warning( + 'It appears that multiple processes are attempting to clean a single directory,' + ' leading to unforeseeable concurrent access to the identical file or directory.' + ' The cleaning process will be repeated.' + ) + raise Abort( + ABORTED, + f"Concurrent access to an identical file/directory has been detected." + f" A new attempt will be made.", + ) + else: + raise + except OSError as exc: + if 'not empty' in str(exc): + for i, item in enumerate(tmpdir[volume_id].list()): + if i > 9: + logger.debug(" ...") + break + logger.warning(f" - {item}") + raise + finally: + if mounted: + cmd.umount['-v', tmpdir] & logger.pipe_info("umount >>", retcode=None) # don't fail if not mounted + os.remove(tmpdir['.csi-unmounted']) # will fail if still mounted somehow + os.rmdir(tmpdir) # will fail if not empty directory + + def DeleteVolume(self, vms_session, volume_id): + vms_session.ensure_snapshot_stream_deleted(f"strm-{volume_id}") + if quota := vms_session.get_quota(volume_id): + # this is a check we have to do until Vast provides access to orphaned snapshots (ORION-135599) + might_use_trash_folder = not CONF.dont_use_trash_api + if might_use_trash_folder and vms_session.has_snapshots(quota.path): + raise Exception(f"Unable to delete {volume_id} as it holds snapshots") + try: + self._delete_data_from_storage(vms_session, quota.path, quota.tenant_id) + except OSError as exc: + if 'not empty' not in str(exc): + raise + if snaps := vms_session.has_snapshots(quota.path): + # this is expected when the volume has snapshots + logger.info(f"{quota.path} will remain due to remaining {len(snaps)} snapshots") + else: + raise + logger.info(f"Data removed: {quota.path}") - def DeleteVolume(self, volume_id): - vol_dir = self.root_mount[volume_id] - vol_dir.delete() + vms_session.delete_view_by_path(quota.path) + logger.info(f"View removed: {quota.path}") - if not CONF.mock_vast: - quota = self.get_quota(volume_id) - if quota: - self.vms_session.delete(f"quotas/{quota.id}") - logger.info(f"Quota removed: {quota.id}") - else: - self.mock_db[volume_id].delete() + vms_session.delete_quota(quota.id) + logger.info(f"Quota removed: {quota.id}") - logger.info(f"Removed volume: {vol_dir}") + logger.info(f"Removed volume: {volume_id}") return types.DeleteResp() - def GetCapacity(self): - cap = os.statvfs(self.root_mount).f_favail - return types.CapacityResp(available_capacity=cap) - - def ControllerPublishVolume(self, node_id, volume_id, volume_capability): + def ControllerPublishVolume( + self, vms_session, node_id, volume_id, volume_capability, volume_context=None + ): + volume_context = dict(volume_context or dict()) _validate_capabilities([volume_capability]) - found = bool(self._to_volume(volume_id) if CONF.mock_vast else self.get_quota(volume_id)) - if not found: - raise Abort(NOT_FOUND, f"Unknown volume: {volume_id}") + if volume_id.startswith("/"): + # Assumed consuming existing volume where user specified full path to view in volumeHandle attribute. + if volume_id != "/": + # keep path consistent. + volume_id = volume_id.rstrip("/") + logger.info(f"Binding static volume: {volume_id}") + export_path = volume_context["root_export"] = volume_id + name = str(uuid.uuid5(uuid.NAMESPACE_DNS, volume_id)) + create_view = yesno_to_bool(volume_context.get("static_pv_create_views", "no")) + create_quota = yesno_to_bool(volume_context.get("static_pv_create_quotas", "no")) + builder = StaticVolumeBuilder.from_parameters( + conf=CONF, + vms_session=vms_session, + name=name, + volume_capabilities=[volume_capability], + parameters=volume_context, + create_view=create_view, + create_quota=create_quota, + ) + try: + volume_context = builder.build_volume() + except SourceNotFound as exc: + raise Abort(NOT_FOUND, exc.message) + except VolumeAlreadyExists as exc: + raise Abort(ALREADY_EXISTS, exc.message) + + else: + root_export = CONF.sanity_test_nfs_export if CONF.mock_vast else local.path(volume_context["root_export"]) + # Build export path for snapshot or volume + if snapshot_base_path := volume_context.get("snapshot_base_path"): + # Snapshot + export_path = str(root_export[snapshot_base_path]) + else: + # Volume + export_path = str(root_export[volume_id]) if CONF.csi_sanity_test and CONF.node_id != node_id: # for a test that tries to fake a non-existent node raise Abort(NOT_FOUND, f"Unknown volume: {node_id}") - nfs_server_ip = self.get_vip() + vip_pool_name = volume_context.get("vip_pool_name") + vip_pool_fqdn = volume_context.get("vip_pool_fqdn") + if vip_pool_fqdn: + nfs_server_ip = vip_pool_fqdn + elif vip_pool_name or CONF.mock_vast: + nfs_server_ip = vms_session.get_vip(vip_pool_name=vip_pool_name) + else: + nfs_server_ip = CONF.use_local_ip_for_mount + assert nfs_server_ip, f"{nfs_server_ip=}" + logger.info(f"Using local IP for mount: {nfs_server_ip}") return types.CtrlPublishResp( publish_context=dict( - export_path=str(CONF.root_export), + export_path=export_path, nfs_server_ip=nfs_server_ip, - )) + # volume_context is not accessible for static volumes. Pass mount_options through publish_context. + mount_options=volume_context.get("mount_options", ""), + ) + ) def ControllerUnpublishVolume(self, node_id, volume_id): return types.CtrlUnpublishResp() - def ControllerExpandVolume(self, volume_id, capacity_range): + def ControllerExpandVolume(self, vms_session, volume_id, capacity_range): requested_capacity = capacity_range.required_bytes - if CONF.mock_vast: - volume = self._to_volume(volume_id) - if volume: - existing_capacity = volume.capacity_bytes - else: - quota = self.get_quota(volume_id) - if quota: - existing_capacity = quota.hard_limit + if not (quota := vms_session.get_quota(volume_id)): + raise Abort(NOT_FOUND, f"Not found quota with id: {volume_id}") + existing_capacity = quota.hard_limit if requested_capacity <= existing_capacity: capacity_bytes = existing_capacity - elif CONF.mock_vast: - volume.capacity_bytes = capacity_bytes = requested_capacity else: try: - self.vms_session.patch(f"quotas/{quota.id}", data=dict(hard_limit=requested_capacity)) + vms_session.update_quota( + quota_id=quota.id, data=dict(hard_limit=requested_capacity) + ) except ApiError as exc: - raise Abort( - OUT_OF_RANGE, - f"Failed updating quota {quota.id}: {exc}") + raise Abort(OUT_OF_RANGE, f"Failed updating quota {quota.id}: {exc}") capacity_bytes = requested_capacity return types.CtrlExpandResp( @@ -551,6 +555,101 @@ def ControllerExpandVolume(self, volume_id, capacity_range): node_expansion_required=False, ) + def CreateSnapshot(self, vms_session, source_volume_id, name, parameters=None): + + parameters = parameters or dict() + volume_id = source_volume_id + if not (quota := vms_session.get_quota(volume_id)): + raise Abort(NOT_FOUND, f"Unknown volume: {volume_id}") + + if CONF.mock_vast: + + try: + with CONF.fake_snapshot_store[name].open("rb") as f: + snp = types.Snapshot() + snp.ParseFromString(f.read()) + if snp.source_volume_id != volume_id: + raise Abort( + ALREADY_EXISTS, f"Snapshot name '{name}' is already taken" + ) + except FileNotFoundError: + ts = types.Timestamp() + ts.FromDatetime(datetime.utcnow()) + snp = types.Snapshot( + size_bytes=0, # indicates 'unspecified' + snapshot_id=name, + source_volume_id=volume_id, + creation_time=ts, + ready_to_use=True, + ) + with CONF.fake_snapshot_store[name].open("wb") as f: + f.write(snp.SerializeToString()) + else: + # Create snapshot using the same path as quota has. + path = quota.path + tenant_id = quota.tenant_id + snapshot_name = parameters["csi.storage.k8s.io/volumesnapshot/name"] + snapshot_namespace = parameters[ + "csi.storage.k8s.io/volumesnapshot/namespace" + ] + snapshot_name_fmt = parameters.get("snapshot_name_fmt", CONF.name_fmt) + snapshot_name = snapshot_name_fmt.format( + namespace=snapshot_namespace, name=snapshot_name, id=name + ) + snapshot_name = snapshot_name.replace(":", "-").replace("/", "-") + try: + snap = vms_session.ensure_snapshot(snapshot_name=snapshot_name, path=path, tenant_id=tenant_id) + except ApiError as exc: + handled = False + if exc.response.status_code == 400: + try: + [(k, [v])] = exc.response.json().items() + except (ValueError, JSONDecodeError): + pass + else: + if (k, v) == ("name", "This field must be unique."): + snap = vms_session.get_snapshot(snapshot_name=snapshot_name) + if snap.path.strip("/") != path.strip("/"): + raise Abort( + ALREADY_EXISTS, + f"Snapshot name '{name}' is already taken", + ) from None + else: + handled = True + if not handled: + raise Abort(INVALID_ARGUMENT, str(exc)) + + snp = types.Snapshot( + size_bytes=0, # indicates 'unspecified' + snapshot_id=str(snap.id), + source_volume_id=volume_id, + creation_time=string_to_proto_timestamp(snap.created), + ready_to_use=True, + ) + + return types.CreateSnapResp(snapshot=snp) + + def DeleteSnapshot(self, vms_session, snapshot_id): + if CONF.mock_vast: + CONF.fake_snapshot_store[snapshot_id].delete() + else: + snapshot = vms_session.get_snapshot(snapshot_id=snapshot_id) + vms_session.delete_snapshot(snapshot_id) + if vms_session.get_quota(path=snapshot.path, tenant_id=snapshot.tenant_id): + pass # quotas still exist + elif vms_session.has_snapshots(snapshot.path): + pass # other snapshots still exist + else: + logger.info(f"last snapshot for {snapshot.path}, and no more quotas - let's delete this directory") + self._delete_data_from_storage(vms_session, snapshot.path, snapshot.tenant_id) + + return types.DeleteSnapResp() + + @classmethod + def _to_volume_id(cls, path): + vol_id = str(local.path(path).relative_to(CONF.sanity_test_nfs_export)) + return None if vol_id.startswith("..") else vol_id + ################################################################ # @@ -559,23 +658,102 @@ def ControllerExpandVolume(self, volume_id, capacity_range): ################################################################ -class Node(NodeServicer, Instrumented): +class CsiNode(csi_grpc.NodeServicer, Instrumented): CAPABILITIES = [ # types.NodeCapabilityType.STAGE_UNSTAGE_VOLUME, - # types.NodeCapabilityType.GET_VOLUME_STATS, + types.NodeCapabilityType.GET_VOLUME_STATS, ] + @cached_property + def controller(self): + return CsiController() + def NodeGetCapabilities(self): - return types.NodeCapabilityResp(capabilities=[ - types.NodeCapability(rpc=types.NodeCapability.RPC(type=rpc)) - for rpc in self.CAPABILITIES]) + return types.NodeCapabilityResp( + capabilities=[ + types.NodeCapability(rpc=types.NodeCapability.RPC(type=rpc)) + for rpc in self.CAPABILITIES + ] + ) + + def NodePublishVolume( + self, + volume_id, + target_path, + vms_session=None, + volume_capability=None, + publish_context=None, + readonly=False, + volume_context=None, + ): + volume_context = volume_context or dict() + if ( + is_ephemeral := volume_context + and volume_context.get("csi.storage.k8s.io/ephemeral") == "true" + ): + from .quantity import parse_quantity + + if not vms_session: + raise Abort( + FAILED_PRECONDITION, + "Ephemeral Volume provisioning requires " + "configuring a global VMS credentials secret or nodePublishSecretRef secret reference." + ) + eph_volume_name_fmt = volume_context.get("eph_volume_name_fmt", CONF.name_fmt) + if "size" in volume_context: + required_bytes = int(parse_quantity(volume_context["size"])) + capacity_range = Bunch(required_bytes=required_bytes) + else: + capacity_range = None + pod_uid = volume_context["csi.storage.k8s.io/pod.uid"] + pod_name = volume_context["csi.storage.k8s.io/pod.name"] + pod_namespace = volume_context["csi.storage.k8s.io/pod.namespace"] + eph_volume_name = eph_volume_name_fmt.format( + namespace=pod_namespace, name=pod_name, id=pod_uid + ) + self.controller.CreateVolume.__wrapped__( + self.controller, + vms_session=vms_session, + name=volume_id, + volume_capabilities=[], + ephemeral_volume_name=eph_volume_name, + capacity_range=capacity_range, + parameters=volume_context + ) + resp = self.controller.ControllerPublishVolume.__wrapped__( + self.controller, + vms_session=vms_session, + node_id=CONF.node_id, + volume_id=volume_id, + volume_capability=volume_capability, + volume_context=volume_context, + ) + publish_context = resp.publish_context + elif not volume_capability: + raise Abort(INVALID_ARGUMENT, "missing 'volume_capability'") + + if not publish_context: + assert not CONF.attach_required, "missing 'publish_context' when attach_required is enabled" + logger.info("attach_required is disabled, obtaining publish context") + resp = self.controller.ControllerPublishVolume.__wrapped__( + self.controller, + vms_session=vms_session, + node_id=CONF.node_id, + volume_id=volume_id, + volume_capability=volume_capability, + volume_context=volume_context, + ) + publish_context = resp.publish_context - def NodePublishVolume(self, volume_id, target_path, volume_capability, publish_context, readonly=False): nfs_server_ip = publish_context["nfs_server_ip"] - export_path = publish_context["export_path"] - source_path = local.path(export_path)[volume_id] - mount_spec = f"{nfs_server_ip}:{source_path}" + + schema = "1" if not volume_context else volume_context.get("schema", "1") + if schema == "2": + export_path = volume_context["export_path"] + else: + export_path = publish_context["export_path"] + mount_spec = f"{nfs_server_ip}:{export_path}" _validate_capabilities([volume_capability]) target_path = local.path(target_path) @@ -588,24 +766,44 @@ def NodePublishVolume(self, volume_id, target_path, volume_capability, publish_c if found_mount.device != mount_spec: raise Abort( ALREADY_EXISTS, - f"Volume already mounted from {found_mount.device} instead of {mount_spec}") + f"Volume already mounted from {found_mount.device} instead of {mount_spec}", + ) elif is_readonly != readonly: raise Abort( ALREADY_EXISTS, - f"Volume already mounted as {'readonly' if is_readonly else 'readwrite'}") + f"Volume already mounted as {'readonly' if is_readonly else 'readwrite'}", + ) else: logger.info(f"{volume_id} is already mounted: {found_mount}") return types.NodePublishResp() target_path.mkdir() + meta_file = target_path[".vast-csi-meta"] + payload = dict(volume_id=volume_id, is_ephemeral=is_ephemeral) + if is_ephemeral: + payload["vms_session"] = vms_session.serialize(salt=volume_id.encode()) + with meta_file.open("w") as f: + json.dump(payload, f) + os.chmod(meta_file, 0o600) logger.info(f"created: {target_path}") - flags = "ro" if readonly else "" - mount(mount_spec, target_path, flags=flags) - logger.info(f"mounted: {target_path}") + flags = ["ro"] if readonly else [] + if volume_capability.mount.mount_flags: + flags += volume_capability.mount.mount_flags + else: + flags += normalize_mount_options( + volume_context.get("mount_options", publish_context.get("mount_options", "")) + ) + try: + mount(mount_spec, target_path, flags=",".join(flags)) + logger.info(f"mounted: {target_path} flags: {flags}") + except Exception: + meta_file.delete() + raise + return types.NodePublishResp() - def NodeUnpublishVolume(self, target_path): + def NodeUnpublishVolume(self, volume_id, target_path, vms_session=None): target_path = local.path(target_path) if not target_path.exists(): @@ -614,26 +812,136 @@ def NodeUnpublishVolume(self, target_path): # make sure we're really unmounted before we delete anything for i in range(CONF.unmount_attempts): if not get_mount(target_path): + logger.info(f"{target_path} is not mounted") break try: local.cmd.umount(target_path) except ProcessExecutionError as exc: if "not mounted" in exc.stderr: - break # a race?... + logger.info(f"umount failed - {target_path} is not mounted (race?)") + break raise else: raise Abort( UNKNOWN, - f"Stuck in unmount loop of {target_path} too many times ({CONF.unmount_attempts})") + f"Stuck in unmount loop of {target_path} too many times ({CONF.unmount_attempts})", + ) logger.info(f"Deleting {target_path}") - os.rmdir(str(target_path)) # don't use plumbum's .delete to avoid the dangerous rmtree + if target_path[".vast-csi-meta"].exists(): + with target_path[".vast-csi-meta"].open("r") as f: + meta = json.load(f) + if meta.get("is_ephemeral"): + if vms_session_data := meta.get("vms_session"): + vms_session = VmsSession.deserialize( + salt=volume_id.encode(), encrypted_data=vms_session_data + ) + elif not vms_session: + raise Abort( + FAILED_PRECONDITION, + "Ephemeral Volume provisioning requires " + "configuring a global VMS credentials secret or nodePublishSecretRef secret reference." + ) + self.controller.DeleteVolume.__wrapped__( + self.controller, vms_session=vms_session, volume_id=meta["volume_id"] + ) + + if target_path[".vast-csi-meta"].exists(): + os.remove(target_path[".vast-csi-meta"]) + os.rmdir(target_path) # don't use plumbum's .delete to avoid the dangerous rmtree logger.info(f"{target_path} removed successfully") return types.NodeUnpublishResp() def NodeGetInfo(self): return types.NodeInfoResp(node_id=CONF.node_id) + def NodeGetVolumeStats(self, volume_id, volume_path): + if not os.path.ismount(volume_path): + raise Abort(NOT_FOUND, f"{volume_path} is not a mountpoint") + # See http://man7.org/linux/man-pages/man2/statfs.2.html for details. + fstats = os.statvfs(volume_path) + return types.VolumeStatsResp( + usage=[ + types.VolumeUsage( + unit=types.UsageUnit.BYTES, + available=fstats.f_bavail * fstats.f_bsize, + total=fstats.f_blocks * fstats.f_bsize, + used=(fstats.f_blocks - fstats.f_bfree) * fstats.f_bsize, + ), + types.VolumeUsage( + unit=types.UsageUnit.INODES, + available=fstats.f_ffree, + total=fstats.f_files, + used=fstats.f_files - fstats.f_ffree, + ) + ] + ) + + +class CosiIdentity(cosi_grpc.IdentityServicer, Instrumented): + + def DriverGetInfo(self, request, context): + return types.DriverGetInfoResp(name=CONF.plugin_name) + + +class CosiProvisioner(cosi_grpc.ProvisionerServicer, Instrumented): + + def DriverCreateBucket(self, vms_session, name, parameters): + if (root_export := parameters.pop("root_export", None)) is None: + raise MissingParameter(param="root_export") + if not (vip_pool_name := parameters.pop("vip_pool_name", None)): + raise MissingParameter(param="vip_pool_name") + scheme = parameters.pop("scheme", "http") + + if CONF.truncate_volume_name: + name = name[:CONF.truncate_volume_name] # crop to Vast's max-length + + uid = randint(50000, 60000) + vms_session.ensure_user(uid=uid, name=name, allow_create_bucket=True) + view = vms_session.ensure_s3view(bucket_name=name, root_export=root_export, **parameters) + port = 443 if scheme == "https" else 80 + vip = vms_session.get_vip(vip_pool_name=vip_pool_name, tenant_id=view.tenant_id) + # bucket_id contains bucket name and endpoint + # should be smth like test-bucket-caf9e0d0-0b9a-4b5e-8b0a-9b0brb0b4c0c@1@https://172.0.0.1:443 + return types.DriverCreateBucketResp( + bucket_id=f"{name}@{view.tenant_id}@{scheme}://{vip}:{port}", + bucket_info=types.Protocol( + s3=types.S3( + region="N/A", + signature_version=types.S3SignatureVersion.UnknownSignature + ) + ) + ) + + def DriverDeleteBucket(self, vms_session, bucket_id, delete_context): + bucket_id, _, _ = bucket_id.split('@') + if view := vms_session.get_view(bucket=bucket_id): + vms_session.delete_folder(view.path, view.tenant_id) + vms_session.delete_view_by_id(view.id) + if user := vms_session.get_user(bucket_id): + vms_session.delete_user(user.id) + return types.DriverDeleteBucketResp() + + def DriverGrantBucketAccess(self, vms_session, bucket_id, name): + bucket_id, _, endpoint = bucket_id.split('@') + user = vms_session.get_user(bucket_id) + creds = vms_session.generate_access_key(user.id) + credentials = dict( + s3=types.CredentialDetails( + secrets={"accessKeyID": creds.access_key, "accessSecretKey": creds.secret_key, "endpoint": endpoint} + ) + ) + return types.DriverGrantBucketAccessResp( + account_id=creds.access_key, + credentials=credentials + ) + + def DriverRevokeBucketAccess(self, vms_session, bucket_id, account_id): + bucket_id, _, _ = bucket_id.split('@') + if user := vms_session.get_user(bucket_id): + vms_session.delete_access_key(user.id, account_id) + return types.DriverRevokeBucketAccessResp() + ################################################################ # @@ -651,26 +959,37 @@ def serve(): if not CONF.ssl_verify: import urllib3 + urllib3.disable_warnings() - server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + server = grpc.server(futures.ThreadPoolExecutor(max_workers=CONF.worker_threads)) - identity = Identity() - csi_pb2_grpc.add_IdentityServicer_to_server(identity, server) + identity = CsiIdentity() + csi_grpc.add_IdentityServicer_to_server(identity, server) identity.capabilities.append(types.ExpansionType.ONLINE) if CONF.mode in {CONTROLLER, CONTROLLER_AND_NODE}: - identity.controller = Controller() + identity.controller = CsiController() identity.capabilities.append(types.ServiceType.CONTROLLER_SERVICE) - csi_pb2_grpc.add_ControllerServicer_to_server(identity.controller, server) + csi_grpc.add_ControllerServicer_to_server(identity.controller, server) + CONF.fake_quota_store.mkdir() + CONF.fake_snapshot_store.mkdir() if CONF.mode in {NODE, CONTROLLER_AND_NODE}: - identity.node = Node() - csi_pb2_grpc.add_NodeServicer_to_server(identity.node, server) + identity.node = CsiNode() + csi_grpc.add_NodeServicer_to_server(identity.node, server) + + # COSI + if CONF.mode == COSI_PLUGIN: + cosi_identity = CosiIdentity() + cosi_grpc.add_IdentityServicer_to_server(cosi_identity, server) + + cosi_provisioner = CosiProvisioner() + cosi_grpc.add_ProvisionerServicer_to_server(cosi_provisioner, server) server.add_insecure_port(CONF.endpoint) server.start() - logger.info(f"Server started as '{CONF.mode}', listening on {CONF.endpoint}") + logger.info(f"Server started as '{CONF.mode}', listening on {CONF.endpoint}, spawned threads {CONF.worker_threads}") server.wait_for_termination() diff --git a/vast_csi/utils.py b/vast_csi/utils.py index f73716a3..bb9b23d4 100644 --- a/vast_csi/utils.py +++ b/vast_csi/utils.py @@ -1,75 +1,11 @@ -from collections import defaultdict -import threading import re -import requests -import json +from datetime import datetime +from ipaddress import summarize_address_range, ip_address +from requests.exceptions import HTTPError # noqa -from pprint import pformat from plumbum import local from easypy.caching import locking_cache -from easypy.bunch import Bunch - -from . logging import logger - -LOCKS = defaultdict(lambda: threading.Lock()) - - -class ApiError(Exception): - pass - - -class RESTSession(requests.Session): - - def __init__(self, *args, auth, base_url, ssl_verify, **kwargs): - super().__init__(*args, **kwargs) - self.base_url = base_url.rstrip("/") - self.ssl_verify = ssl_verify - self.auth = auth - self.headers["Accept"] = "application/json" - self.headers["Content-Type"] = "application/json" - - def request(self, verb, api_method, *, params=None, **kwargs): - verb = verb.upper() - api_method = api_method.strip("/") - url = f"{self.base_url}/{api_method}/" - logger.info(f">>> [{verb}] {url}") - - if 'data' in kwargs: - kwargs['data'] = json.dumps(kwargs['data']) - - if params or kwargs: - for line in pformat(dict(kwargs, params=params)).splitlines(): - logger.info(f" {line}") - - ret = super().request(verb, url, verify=self.ssl_verify, params=params, **kwargs) - - if ret.status_code == 503 and ret.text: - logger.error(ret.text) - raise ApiError(ret.text) - - ret.raise_for_status() - - logger.info(f"<<< [{verb}] {url}") - if ret.content: - ret = Bunch.from_dict(ret.json()) - for line in pformat(ret).splitlines(): - logger.info(f" {line}") - else: - ret = None - logger.info(f"--- [{verb}] {url}: Done") - return ret - - def __getattr__(self, attr): - if attr.startswith("_"): - raise AttributeError(attr) - - def func(**params): - return self.request("get", attr, params=params) - - func.__name__ = attr - func.__qualname__ = f"{self.__class__.__qualname__}.{attr}" - setattr(self, attr, func) - return func +from . import csi_types as types PATH_ALIASES = { @@ -161,3 +97,46 @@ def nice_format_traceback(self): def patch_traceback_format(): from traceback import StackSummary orig_format_traceback, StackSummary.format = StackSummary.format, nice_format_traceback + + +def normalize_mount_options(mount_options: str): + """Convert mount options to list if mount options were provided as string on StorageClass parameters level.""" + s = mount_options.strip() + mount_options = list({p for p in s.split(",") if p}) + return mount_options + + +def string_to_proto_timestamp(str_ts: str): + """Convert string to protobuf.Timestamp""" + t = datetime.fromisoformat(str_ts.rstrip("Z")).timestamp() + return types.Timestamp(seconds=int(t), nanos=int(t % 1 * 1e9)) + + +def is_ver_nfs4_present(mount_options: str) -> bool: + """Check if vers=4 or nfsvers=4 mount option is present in `mount_options` string""" + for opt in mount_options.split(","): + name, sep, value = opt.partition("=") + if name in ("vers", "nfsvers") and value.startswith("4"): + return True + return False + + +def generate_ip_range(ip_ranges): + """ + Generate list of ips from provided ip ranges. + `ip_ranges` should be list of ranges where fist ip in range represents start ip and second is end ip + eg: [["15.0.0.1", "15.0.0.4"], ["10.0.0.27", "10.0.0.30"]] + """ + return [ + ip.compressed + for start_ip, end_ip in ip_ranges for net in summarize_address_range(ip_address(start_ip), ip_address(end_ip)) + for ip in net + ] + + +def is_valid_ip(ip_str): + try: + ip_address(ip_str) + return True + except ValueError: + return False diff --git a/vast_csi/vms_session.py b/vast_csi/vms_session.py new file mode 100644 index 00000000..bfbf3423 --- /dev/null +++ b/vast_csi/vms_session.py @@ -0,0 +1,707 @@ +import os +import json +import requests +import hashlib +import pickle +import base64 +from pprint import pformat +from uuid import uuid4 +from contextlib import contextmanager +from datetime import datetime +from requests.exceptions import ConnectionError +from requests.utils import default_user_agent +from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.backends import default_backend + +from easypy.bunch import Bunch +from easypy.caching import cached_property +from easypy.collections import shuffled +from easypy.semver import SemVer +from easypy.caching import timecache, locking_cache +from easypy.units import HOUR, MINUTE +from easypy.resilience import retrying, resilient +from easypy.humanize import yesno_to_bool +from plumbum import cmd +from plumbum import local, ProcessExecutionError + +from .logging import logger +from .configuration import Config +from .exceptions import ApiError, MountFailed, OperationNotSupported, LookupFieldError +from .utils import generate_ip_range +from . import csi_types as types + + +def requisite(semver: str, operation: str = None, ignore: bool = False): + """ + Use this decorator to indicate the minimum required version of the VAST cluster + for invoking the API that is being decorated. + Decorator works in two modes: + 1. When ignore == False and version mismatch detected then `OperationNotSupported` exception will be thrown + 2. When ignore == True and version mismatch detected then method decorated method execution never happened + """ + required_version = SemVer.loads_fuzzy(semver) + + def dec(fn): + + def _args_wrapper(self, *args, **kwargs): + + sw_version = self.sw_version + if sw_version < required_version: + if ignore: + return + raise OperationNotSupported( + op=operation or fn.__name__, + required_version=required_version.dumps(), + current_version=self.sw_version.dumps(), + tip="Upgrade VAST cluster or adjust CSI driver settings to avoid unsupported operations" + ) + return fn(self, *args, **kwargs) + + return _args_wrapper + + return dec + + +class CannotUseTrashAPI(OperationNotSupported): + template = "Cannot delete folder via VMS: {reason}" + + +def _derive_key(salt): + # Derive a key from the salt + kdf = hashes.Hash(hashes.SHA256(), backend=default_backend()) + kdf.update(salt) + return kdf.finalize() + + +@locking_cache +def get_vms_session(username=None, password=None, endpoint=None, ssl_cert=None): + config = Config() + session_cls = TestVmsSession if config.mock_vast else VmsSession + return session_cls.create(config=config, username=username, password=password, endpoint=endpoint, ssl_cert=ssl_cert) + + +class RESTSession(requests.Session): + def __init__(self, config): + super().__init__() + self.config = config + self.headers["Accept"] = "application/json" + self.headers["Content-Type"] = "application/json" + self.headers["User-Agent"] = f"VastCSI/{config.plugin_version}.{config.ci_pipe}.{config.git_commit[:10]} ({config._mode.capitalize()}) {default_user_agent()}" + self.headers['authorization'] = f"Bearer #" # will be updated on first request + + @retrying.debug(times=3, acceptable=retrying.Retry) + def request(self, verb, api_method, *args, params=None, log_result=True, **kwargs): + verb = verb.upper() + api_method = api_method.strip("/") + url = [self.base_url, api_method] + url.extend(args) + url += [""] # ensures a '/' at the end + url = "/".join(str(p) for p in url) + logger.info(f">>> [{verb}] {url}") + + if "data" in kwargs: + kwargs["data"] = json.dumps(kwargs["data"]) + + if params or kwargs: + if log_result: + for line in pformat(dict(kwargs, params=params)).splitlines(): + logger.info(f" {line}") + else: + logger.info("*** request payload is hidden ***") + + kwargs.setdefault("timeout", self.config.timeout) + + ret = super().request( + verb, url, verify=self.ssl_verify, params=params, **kwargs + ) + if ret.status_code == 403 and "Token is invalid or expired" in ret.text: + self.refresh_auth_token() + raise retrying.Retry("refresh token") + + if ret.status_code in (400, 503): + raise ApiError(response=ret) + ret.raise_for_status() + + logger.info(f"<<< [{verb}] {url}") + if ret.content: + ret = Bunch.from_dict(ret.json()) + if log_result: + for line in pformat(ret).splitlines(): + logger.info(f" {line}") + else: + size = len(ret) if isinstance(ret, (dict, tuple, list, str)) else '-' + logger.info(f"{type(ret)[{size}]}") + else: + ret = None + logger.info(f"--- [{verb}] {url}: Done") + return ret + + def __getattr__(self, attr): + if attr.startswith("_"): + raise AttributeError(attr) + + def func(*args, log_result=True, **params): + return self.request("get", attr, *args, params=params, log_result=log_result) + + func.__name__ = attr + func.__qualname__ = f"{self.__class__.__qualname__}.{attr}" + setattr(self, attr, func) + return func + +class VmsSession(RESTSession): + """ + Communication with vms cluster. + Operations over vip pools, quotas, snapshots etc. + """ + def __init__(self, config, username, password, endpoint, ssl_cert): + super().__init__(config) + self.username = username + self.password = password + self.endpoint = endpoint + self.ssl_cert = ssl_cert + self.base_url = f"https://{endpoint}/api/v1" + # Modify the SSL verification CA bundle path established + # by the underlying Certifi library's defaults if ssl_verify==True. + certs_base_dir = "/etc/ssl/certs" + if ssl_cert: + # Store the certificate specified in StorageClass secret (unique for each StorageClass) + hash_obj = hashlib.sha256("".join([username, password, endpoint]).encode()) + unique_hash = hash_obj.hexdigest() + cert_path = f"{certs_base_dir}/{endpoint}-{unique_hash}.crt" + with open(cert_path, "w") as f: + f.write(ssl_cert) + logger.info(f"Generated new ssl certificate: {cert_path!r}") + else: + # Use certificate provided from global `sslCertsSecretName` secret (common for all StorageClasses) + # This way requests library can use mounted CA bundle or default system CA bundle under the same path. + cert_path = f"{certs_base_dir}/ca-certificates.crt" + self.ssl_verify = (False, cert_path)[config.ssl_verify] + + def serialize(self, salt: str): + session_data = pickle.dumps((self.username, self.password, self.endpoint, self.ssl_cert)) + iv = os.urandom(16) + key = _derive_key(salt) + cipher = Cipher(algorithms.AES(key), modes.CFB(iv), backend=default_backend()) + encryptor = cipher.encryptor() + ciphertext = encryptor.update(session_data) + encryptor.finalize() + # Return IV and ciphertext (both base64 encoded for storage) + return base64.b64encode(iv + ciphertext).decode() + + @classmethod + def deserialize(cls, salt: str, encrypted_data: str): + encrypted_data = base64.b64decode(encrypted_data) + # Extract IV and ciphertext + iv = encrypted_data[:16] + ciphertext = encrypted_data[16:] + # Create cipher object + key = _derive_key(salt) + cipher = Cipher(algorithms.AES(key), modes.CFB(iv), backend=default_backend()) + decryptor = cipher.decryptor() + # Decrypt the data + plainbytes = decryptor.update(ciphertext) + decryptor.finalize() + username, password, endpoint, ssl_cert = pickle.loads(plainbytes) + return get_vms_session(username=username, password=password, endpoint=endpoint, ssl_cert=ssl_cert) + + @classmethod + def create(cls, config, username, password, endpoint, ssl_cert): + """ + Create instance of session. + username, password endpoint are optional and in context of csi driver comes from secret if passed as argument. + Otherwise, username, password and endpoint are taken from locally mounted secret (COSI case). + """ + # The presence of the name in the arguments already indicates + # that we have a StorageClass scope secret at this point. + # In other words, it's not a globally mounted secret. Other secret fields will be validated below. + is_global = not bool(username) + if config.vms_credentials_store.exists() and is_global: + username = config.vms_user + password = config.vms_password + endpoint = config.vms_host + if not endpoint: + raise LookupFieldError(field="endpoint", tip="Make sure endpoint is specified in values.yaml.") + if not username: + raise LookupFieldError(field="username", tip="Make sure username is present in secret.") + if not password: + raise LookupFieldError(field="password", tip="Make sure password is present in secret.") + if not endpoint: + raise LookupFieldError(field="endpoint", tip="Make sure endpoint is present in secret.") + session = cls(config, username, password, endpoint, ssl_cert) + config_source = "mounted configuration" if is_global else "secret" + ssl_verification = "enabled" if session.ssl_verify else "disabled" + logger.info(f"VMS session has been instantiated from {config_source}. SSL verification {ssl_verification}.") + return session + + @property + @timecache(HOUR) + def sw_version(self) -> SemVer: + versions = self.versions(status='success')[0].sys_version + return SemVer.loads_fuzzy(versions) + + @requisite(semver="4.7.0") + def delete_folder(self, path: str, tenant_id: int): + """Delete remote cluster folder by provided path.""" + + if self.config.dont_use_trash_api: + # trash api usage is disabled by csi admin or trash api doesn't exist for cluster + raise CannotUseTrashAPI(reason="Disabled by Vast CSI settings (see 'dontUseTrashApi' in your Helm chart)") + + try: + self.delete("/folders/delete_folder/", data={"path": path, "tenant_id": tenant_id}) + except ApiError as e: + if "no such directory" in e.render(): + logger.info(f"Remote directory might have been removed earlier. ({e})") + elif "trash folder disabled" in e.render(): + raise CannotUseTrashAPI(reason="Trash Folder Access is disabled (see Settings/Cluster/Features in VMS)") + else: + # unpredictable error + raise + + def refresh_auth_token(self): + try: + resp = super(RESTSession, self).request( + "POST", f"{self.base_url}/token/", verify=self.ssl_verify, timeout=5, + json={"username": self.username, "password": self.password} + ) + resp.raise_for_status() + token = resp.json()["access"] + self.headers['authorization'] = f"Bearer {token}" + except ConnectionError as e: + raise ApiError( + response=Bunch( + status_code=None, + text=f"The vms on the designated host {self.config.vms_host!r} " + f"cannot be accessed. Please verify the specified endpoint. " + f"origin error: {e}" + )) + self.usage_report() + + @requisite(semver="5.2.0", ignore=True) + @resilient.error(msg="failed to report usage to VMS") + def usage_report(self): + self.post("plugins/usage/", data={ + "vendor": "vastdata", "name": "vast-csi", + "version": self.config.plugin_version, "build": self.config.git_commit[:10] + }) + + # ---------------------------- + # View policies + def get_view_policy(self, policy_name: str): + """Get view policy by name. Raise exception if not found.""" + if res := self.viewpolicies(name=policy_name): + return res[0] + else: + raise Exception(f"No such view policy: {policy_name}. Please create policy manually") + + # ---------------------------- + # QoS policies + def get_qos_policy(self, policy_name: str): + """Get QoS policy by name. Raise exception if not found.""" + if res := self.qospolicies(name=policy_name): + return res[0] + else: + raise Exception(f"No such QoS policy: {policy_name}. Please create policy manually") + + # ---------------------------- + # Views + def get_view(self, **kwargs) -> Bunch: + """ + Get view that contain provided search kwargs eg path, bucket_name + """ + if views := self.views(**kwargs): + if len(views) > 1: + raise Exception(f"Too many views were found by condition {kwargs}: {views}") + return views[0] + + def ensure_view(self, path, protocols, view_policy, qos_policy): + if not (view := self.get_view(path=str(path), policy__name=view_policy)): + view_policy = self.get_view_policy(policy_name=view_policy) + if qos_policy: + qos_policy_id = self.get_qos_policy(qos_policy).id + else: + qos_policy_id = None + view = self.create_view( + path=path, protocols=protocols, policy_id=view_policy.id, + qos_policy_id=qos_policy_id, tenant_id=view_policy.tenant_id + ) + return view + + def ensure_s3view(self, bucket_name, root_export, **kwargs): + if not (view := self.get_view(bucket=bucket_name)): + view_policy = kwargs.pop("view_policy", "s3_default_policy") + protocols = kwargs.pop("protocols", None) or [] + if protocols: + protocols = [p.upper().strip() for p in protocols.split(",")] + if "S3" not in protocols: + protocols.append("S3") + view_policy = self.get_view_policy(policy_name=view_policy) + policy_id = view_policy.id + tenant_id = view_policy.tenant_id + root_export = root_export.strip("/") + path = f"/{root_export}/{bucket_name}" if root_export else f"/{bucket_name}" + for key in kwargs.keys(): + if kwargs[key] in ("true", "false"): + kwargs[key] = yesno_to_bool(kwargs[key]) + view = self.create_view( + bucket=bucket_name, bucket_owner=bucket_name, path=path, + protocols=protocols, policy_id=policy_id, tenant_id=tenant_id, + **kwargs + ) + return view + + def create_view(self, path: str, create_dir=True, **kwargs): + """ + Create new view on remove cluster + Args: + path: full system path to create view for. + **kwargs: additional view parameters. + Returns: + newly created view as dictionary. + """ + data = {"path": str(path), "create_dir": create_dir, **kwargs} + if "SMB" in kwargs.get("protocols", []): + data["share"] = os.path.basename(path) + return Bunch.from_dict(self.post("views", data)) + + def delete_view_by_path(self, path: str): + """Delete view by provided path criteria.""" + if view := self.get_view(path=path): + self.delete_view_by_id(view.id) + + def delete_view_by_id(self, id_: int): + """Delete view by provided id""" + self.delete(f"views/{id_}") + + @contextmanager + def temp_view(self, path, policy_id, tenant_id) -> Bunch: + """ + Create temporary view with autogenerated alias and delite it on context manager exit. + """ + view = self.create_view(path=path, policy_id=policy_id, tenant_id=tenant_id, alias=f"/{uuid4()}") + try: + yield view + finally: + self.delete_view_by_id(view.id) + + # ---------------------------- + @timecache(5 * MINUTE) + def get_vip_pool(self, vip_pool_name: str) -> Bunch: + if not (vippools := self.vippools(name=vip_pool_name)): + raise Exception(f"No VIP Pool named '{vip_pool_name}'") + return vippools[0] + + # Vip pools + def get_vip(self, vip_pool_name: str, tenant_id: int = None): + """ + Get vip by provided vip_pool_name. + tenant_id is optional argument for validation. tenant_id usually + make sense only during volume deletion where deletionVipPool and deletionViewPolicy + is used. For such case additional validation might help to troubleshoot + tenant misconfiguration. + Returns: + Random vip ip from provided vip pool. + """ + vippool = self.get_vip_pool(vip_pool_name) + if isinstance(tenant_id, str): + # for tenant_id passed as volume context. + tenant_id = int(tenant_id) + if tenant_id and vippool.tenant_id and vippool.tenant_id != tenant_id: + raise Exception( + f"Pool {vip_pool_name} belongs to tenant with id {vippool.tenant_id} but {tenant_id=} was requested" + ) + vips = generate_ip_range(vippool.ip_ranges) + assert vips, f"Pool {vip_pool_name} has no available vips" + vip = shuffled(vips)[0] + logger.info(f"Using - {vip}") + return vip + + # ---------------------------- + # Quotas + def create_quota(self, data): + """Create new quota""" + return self.post("quotas", data=data) + + def get_quota(self, volume_id=None, path=None, **kwargs): + """Get quota by provided query params.""" + if volume_id: + kwargs.update(path__contains=volume_id) + elif path: + path = path.rstrip("/") or "/" # for root path + kwargs.update(path=path) + quotas = self.quotas(**kwargs) + if not quotas: + return + elif len(quotas) > 1: + names = ", ".join(sorted(q.name for q in quotas)) + raise Exception(f"Too many quotas on {volume_id}: {names}") + else: + return quotas[0] + + def ensure_quota(self, volume_id, view_path, tenant_id, requested_capacity=None): + if quota := self.get_quota(path=view_path, tenant_id=tenant_id): + # Check if volume with provided name but another capacity already exists. + if requested_capacity and quota.hard_limit != requested_capacity: + raise Exception( + "Volume already exists with different capacity than requested " + f"({quota.hard_limit})") + if quota.tenant_id != tenant_id: + raise Exception( + "Volume already exists with different tenancy ownership " + f"({quota.tenant_name})") + else: + data = dict( + name=volume_id, + path=view_path, + tenant_id=tenant_id + ) + if requested_capacity: + data.update(hard_limit=requested_capacity) + quota = self.create_quota(data=data) + return quota + + def update_quota(self, quota_id, data): + """Update existing quota.""" + self.patch(f"quotas/{quota_id}", data=data) + + def delete_quota(self, quota_id): + """Delete quota""" + self.delete(f"quotas/{quota_id}") + + # ---------------------------- + # Snapshots + def has_snapshots(self, path): + # we intentionally limit the number of results + ret = self.snapshots(path__startswith=path.rstrip("/"), page_size=10) + return ret.results + + def create_snapshot(self, name, path, tenant_id, expiration_delta=None): + """Create new snapshot.""" + data = dict(name=name, path=path, tenant_id=tenant_id) + if expiration_delta: + expiration_time = (datetime.utcnow() + expiration_delta).isoformat() + data["expiration_time"] = expiration_time + return Bunch(self.post("snapshots", data=data)) + + def get_snapshot(self, snapshot_name=None, snapshot_id=None): + """ + Get snapshot by name or by id. + Only one argument should be provided. + """ + if snapshot_name: + if ret := self.snapshots(name=snapshot_name): + if len(ret) > 1: + raise Exception(f"Too many snapshots named {snapshot_name}: ({len(ret)})") + return ret[0] + else: + return self.snapshots(snapshot_id) + + def ensure_snapshot(self, snapshot_name, path, tenant_id, expiration_delta=None): + if snapshot := self.get_snapshot(snapshot_name=snapshot_name): + if snapshot.path.strip("/") != path.strip("/"): + raise Exception( + f"Snapshot already exists, but the specified path {path}" + f" does not correspond to the path of the snapshot {snapshot.path}" + ) + else: + path = path.rstrip("/") + "/" + snapshot = self.create_snapshot(name=snapshot_name, path=path, tenant_id=tenant_id, expiration_delta=expiration_delta) + return snapshot + + def delete_snapshot(self, snapshot_id): + self.delete(f"snapshots/{snapshot_id}") + + def get_snapshot_stream(self, name): + if res := self.globalsnapstreams(name=name): + return res[0] + + def stop_snapshot_stream(self, snapshot_stream_id): + self.patch(f"globalsnapstreams/{snapshot_stream_id}/stop") + + @requisite(semver="4.6.0", operation="create_globalsnapshotstream") + def ensure_snapshot_stream(self, snapshot_id, tenant_id, destination_path, snapshot_stream_name): + if not (snapshot_stream := self.get_snapshot_stream(name=snapshot_stream_name)): + data = dict( + loanee_root_path=destination_path, + name=snapshot_stream_name, + enabled=True, + loanee_tenant_id=tenant_id, # target tenant_id + ) + snapshot_stream = self.post(f"snapshots/{snapshot_id}/clone/", data) + return snapshot_stream + + @requisite(semver="4.6.0", ignore=True) + def ensure_snapshot_stream_deleted(self, snapshot_stream_name): + """ + Stop global snapshot stream in case it is not finished. + Snapshots with expiration time will be deleted as soon as snapshot stream is stopped. + """ + if snapshot_stream := self.get_snapshot_stream(snapshot_stream_name): + if snapshot_stream.status.state != "FINISHED": + # Just stop the stream. It will be deleted automatically upon stop request. + self.stop_snapshot_stream(snapshot_stream.id) + else: + self.delete(f"globalsnapstreams/{snapshot_stream.id}", data=dict(remove_dir=False)) + + def get_by_token(self, token): + """ + This method used to iterate over paginated resources (snapshots, quotas etc). + Where after first request to resource list token for next page is returned. + """ + return self.get(token) + + # ---------------------------- + # Users + def create_user(self, name, uid, allow_create_bucket=False, allow_delete_bucket=False): + return self.post("users", data={ + "name": name, "uid": uid, + "allow_create_bucket": allow_create_bucket, "allow_delete_bucket": allow_delete_bucket + }) + + def get_user(self, name): + if users := self.users(name=name): + return users[0] + + def ensure_user(self, name, uid, allow_create_bucket=False, allow_delete_bucket=False): + if user := self.get_user(name=name): + return user + return self.create_user( + name=name, uid=uid, allow_create_bucket=allow_create_bucket, allow_delete_bucket=allow_delete_bucket + ) + + def delete_user(self, user_id): + self.delete(f"users/{user_id}") + + def generate_access_key(self, user_id): + return self.post(f"users/{user_id}/access_keys/", log_result=False) + + def delete_access_key(self, user_id, access_key): + return self.delete(f"users/{user_id}/access_keys/", data={"access_key": access_key}, log_result=False) + + +class TestVmsSession(RESTSession): + """RestSession simulation for sanity tests""" + + def __init__(self, config): + super().__init__(config) + + @classmethod + def create(cls, config: Config, *_, **__): + return cls(config) + + def create_fake_quota(self, volume_id): + class FakeQuota: + + def __init__(self, volume_id): + super().__init__() + self._volume = types.Volume() + self._volume_id = volume_id + self.tenant_id = 1 + self.tenant_name = "test-tenant" + + def __str__(self): + return "<< FakeQuota >>" + + def __getattr__(self, item): + return getattr(self._volume, item) + + @property + def id(self): + return self + + @property + def path(self): + return local.path(os.environ["X_CSI_NFS_EXPORT"])[self._volume_id] + + @property + def hard_limit(self): + return 1000 + + return FakeQuota(volume_id=volume_id) + + def _mount(self, src, tgt, flags=""): + executable = cmd.mount + flags = [f.strip() for f in flags.split(",")] + flags += "port=2049,nolock,vers=3".split(",") + executable = executable["-o", ",".join(flags)] + try: + executable[src, tgt] & logger.pipe_info("mount >>") + except ProcessExecutionError as exc: + raise MountFailed(detail=exc.stderr, src=src, tgt=tgt) + + def _to_mock_volume(self, vol_id): + vol_dir = self._mock_mount[vol_id] + logger.info(f"{vol_dir}") + if not vol_dir.is_dir(): + logger.info(f"{vol_dir} is not dir") + return + with self.config.fake_quota_store[vol_id].open("rb") as f: + vol = self.create_fake_quota(volume_id=vol_id) + vol.ParseFromString(f.read()) + return vol + + @cached_property + def _mock_mount(self): + target_path = self.config.controller_root_mount + if not target_path.exists(): + target_path.mkdir() + + if not os.path.ismount(target_path): + mount_spec = f"{self.config.nfs_server}:{self.config.sanity_test_nfs_export}" + self._mount( + mount_spec, + target_path, + flags=",".join(self.config.mount_options), + ) + logger.info(f"mounted successfully: {target_path}") + + return target_path + + def get_vip(self, *_, **__) -> str: + return self.config.nfs_server + + def get_quota(self, volume_id: str) -> "FakeQuota": + """Create fake quota object which can simulate attributes of original Quota butch.""" + return self._to_mock_volume(volume_id) + + def delete_quota(self, quota: "FakeQuota"): + """ + Delete all folders and files under '/csi-volumes/ + Normally in this method quota id should be passed but here we abuse first position argument to + pass FakeQuota which were initialized before and has '_volume_id' attribute. + """ + self.config.controller_root_mount[quota._volume_id].delete() + self.config.fake_quota_store[quota._volume_id].delete() + + @contextmanager + def temp_view(self, path, policy_id, tenant_id): + yield Bunch( + id=1, + alias=path, + tenant_id=tenant_id, + tenant_name="test-tenant" + ) + + def get_view(self, *_, **__): + return Bunch(id=1, policy_id=1, tenant_id=1) + + def get_view_policy(self, *_, **__): + return Bunch(id=1, tenant_id=1, tenant_name="test-tenant") + + def get_snapshot(self, *_, **__): + return [] + + def _empty(self, *_, **__): + """ + empty method for test scenarios + Method needs to be declared for compatibility with sanity tests. + """ + pass + + update_quota = _empty + delete_view_by_path = _empty + delete_view_by_id = _empty + ensure_snapshot_stream_deleted = _empty + refresh_auth_token = _empty + delete_folder = _empty + is_trash_api_usable = _empty + has_snapshots = _empty diff --git a/vast_csi/volume_builder.py b/vast_csi/volume_builder.py new file mode 100644 index 00000000..fc87bbf1 --- /dev/null +++ b/vast_csi/volume_builder.py @@ -0,0 +1,532 @@ +import os +import re +from dataclasses import dataclass +from abc import ABC +from base64 import b32encode +from random import getrandbits +from datetime import timedelta +from typing import Optional, final, TypeVar + +from easypy.bunch import Bunch + +from . import csi_types as types +from .csi_types import INVALID_ARGUMENT +from .utils import is_ver_nfs4_present +from plumbum import local + +from .exceptions import VolumeAlreadyExists, SourceNotFound, Abort, MissingParameter +from .utils import is_valid_ip +from .quantity import parse_quantity + +CreatedVolumeT = TypeVar("CreatedVolumeT") + + +class VolumeBuilderI(ABC): + """Base Volume Builder interface""" + + def build_volume_name(self, **kwargs) -> str: + """ + Final implementation should build volume name from provided argumenents and/or from other params + depends on conditions. + """ + ... + + def get_requested_capacity(self) -> int: + """Final implementation should return requested capacity based on provided params and/or other inputs""" + ... + + def get_existing_capacity(self) -> int: + """Final implementation should return existing voume capacity based on provided params and/or other inputs""" + ... + + def build_volume(self, **kwargs) -> CreatedVolumeT: + """ + Final implementation shoud perform final actions for creationg volume and/or return all necessary + data to create volume. + """ + ... + + @classmethod + def from_parameters(cls, *args, **kwargs): + """Parse context and return builder instance.""" + ... + + +@dataclass +class BaseBuilder(VolumeBuilderI): + """Common builder with shared methods/attributes""" + + # Required + vms_session: "RESTSession" + configuration: "CONF" + name: str # Name of volume or snapshot + rw_access_mode: bool + root_export: str + volume_name_fmt: str + view_policy: str + mount_options: str + + # Optional + volume_content_source: Optional[str] = None # Either volume or snapshot + ephemeral_volume_name: Optional[str] = None + vip_pool_name: Optional[str] = None + vip_pool_fqdn: Optional[str] = None + qos_policy: Optional[str] = None + capacity_range: Optional[int] = None # Optional desired volume capacity + pvc_name: Optional[str] = None + pvc_namespace: Optional[str] = None + + @property + def mount_protocol(self) -> str: + return "NFS4" if is_ver_nfs4_present(self.mount_options) else "NFS" + + @property + def volume_context(self) -> dict: + context = { + "root_export": self.root_export_abs, + "mount_options": self.mount_options, + "view_policy": self.view_policy, + "protocol": self.mount_protocol, + } + if self.vip_pool_name: + context["vip_pool_name"] = self.vip_pool_name + elif self.vip_pool_fqdn: + context["vip_pool_fqdn"] = self.vip_pool_fqdn_with_prefix + return context + + @property + def view_path(self) -> str: + return os.path.join(self.root_export_abs, self.name) + + @property + def root_export_abs(self) -> str: + return os.path.join("/", self.root_export) + + @property + def vip_pool_fqdn_with_prefix(self) -> str: + prefix = b32encode(getrandbits(16).to_bytes(2, "big")).decode("ascii").rstrip("=") + return f"{prefix}.{self.vip_pool_fqdn}" + + + @classmethod + def from_parameters( + cls, + conf, + vms_session, + name, + volume_capabilities, + capacity_range, + parameters, + volume_content_source, + ephemeral_volume_name, + ) -> "BaseBuilder": + """Parse context and return builder instance.""" + mount_options = cls._parse_mount_options(volume_capabilities) + rw_access_mode = cls._parse_access_mode(volume_capabilities) + root_export = cls._get_required_param(parameters, "root_export") + view_policy = cls._get_required_param(parameters, "view_policy") + + vip_pool_fqdn = parameters.get("vip_pool_fqdn") + vip_pool_name = parameters.get("vip_pool_name") + cls._validate_mount_src(vip_pool_name, vip_pool_fqdn, conf.use_local_ip_for_mount) + + volume_name_fmt = parameters.get("volume_name_fmt", conf.name_fmt) + qos_policy = parameters.get("qos_policy") + + return cls( + vms_session=vms_session, + configuration=conf, + name=name, + rw_access_mode=rw_access_mode, + capacity_range=capacity_range, + pvc_name=parameters.get("csi.storage.k8s.io/pvc/name"), + pvc_namespace=parameters.get("csi.storage.k8s.io/pvc/namespace"), + volume_content_source=volume_content_source, + ephemeral_volume_name=ephemeral_volume_name, + root_export=root_export, + volume_name_fmt=volume_name_fmt, + view_policy=view_policy, + vip_pool_name=vip_pool_name, + vip_pool_fqdn=vip_pool_fqdn, + mount_options=mount_options, + qos_policy=qos_policy, + ) + + @classmethod + def _get_required_param(cls, parameters, param_name): + """Get required parameter or raise MissingParameter exception.""" + value = parameters.get(param_name) + if value is None: + raise MissingParameter(param=param_name) + return value + + @classmethod + def _parse_mount_options(cls, volume_capabilities): + """Get mount options from volume capabilities.""" + try: + mount_capability = next(cap for cap in volume_capabilities if cap.HasField("mount")) + mount_flags = mount_capability.mount.mount_flags + mount_options = ",".join(mount_flags) + return ",".join(re.sub(r"[\[\]]", "", mount_options).replace(",", " ").split()) + except StopIteration: + return "" + + @classmethod + def _parse_access_mode(cls, volume_capabilities): + """Check if list of provided access modes contains read-write mode.""" + rw_access_modes = { + types.AccessModeType.SINGLE_NODE_WRITER, + types.AccessModeType.MULTI_NODE_MULTI_WRITER + } + return any( + cap.access_mode.mode in rw_access_modes + for cap in volume_capabilities if cap.HasField("access_mode") + ) + + @classmethod + def _validate_mount_src(cls, vip_pool_name, vip_pool_fqdn, local_ip_for_mount): + """Validate that only one of vip_pool_name, vip_pool_fqdn or local_ip_for_mount is provided.""" + if vip_pool_name and vip_pool_fqdn: + raise Abort( + INVALID_ARGUMENT, + "vip_pool_name and vip_pool_fqdn are mutually exclusive. Provide one of them." + ) + if not (vip_pool_name or vip_pool_fqdn) and not local_ip_for_mount: + raise Abort( + INVALID_ARGUMENT, + "either vip_pool_name, vip_pool_fqdn or use_local_ip_for_mount must be provided." + ) + if local_ip_for_mount and not is_valid_ip(local_ip_for_mount): + raise Abort(INVALID_ARGUMENT, f"Local IP address: {local_ip_for_mount} is invalid") + + def get_requested_capacity(self) -> int: + """Return desired allocated capacity if provided, else return 0.""" + return self.capacity_range.required_bytes if self.capacity_range else 0 + + def build_volume_name(self) -> str: + """Build volume name using format csi:{namespace}:{name}:{id}""" + volume_id = self.name + if self.ephemeral_volume_name: + return self.ephemeral_volume_name + + if self.pvc_name and self.pvc_namespace: + volume_name = self.volume_name_fmt.format( + namespace=self.pvc_namespace, name=self.pvc_name, id=volume_id + ) + else: + volume_name = f"csi-{volume_id}" + + if self.configuration.truncate_volume_name: + volume_name = volume_name[:self.configuration.truncate_volume_name] + + return volume_name + + +# ---------------------------------------------------------------------------------------------------------------------- +# Final builders +# ---------------------------------------------------------------------------------------------------------------------- + +@final +class EmptyVolumeBuilder(BaseBuilder): + """Builder for k8s PersistentVolumeClaim, PersistentVolume etc.""" + + def build_volume(self) -> types.Volume: + """Main build entrypoint for volumes.""" + volume_name = self.build_volume_name() + requested_capacity = self.get_requested_capacity() + volume_context = self.volume_context + volume_context["volume_name"] = volume_name + + view = self.vms_session.ensure_view( + path=self.view_path, protocols=[self.mount_protocol], view_policy=self.view_policy, + qos_policy=self.qos_policy + ) + quota = self.vms_session.ensure_quota( + volume_id=volume_name, view_path=self.view_path, + tenant_id=view.tenant_id, requested_capacity=requested_capacity + ) + volume_context.update( + quota_id=str(quota.id), + view_id=str(view.id), + tenant_id=str(view.tenant_id) + ) + + return types.Volume( + capacity_bytes=requested_capacity, + volume_id=self.name, + volume_context=volume_context, + ) + + +@final +class VolumeFromVolumeBuilder(BaseBuilder): + """Cloning volumes from existing.""" + + def build_volume(self) -> types.Volume: + volume_name = self.build_volume_name() + requested_capacity = self.get_requested_capacity() + volume_context = self.volume_context + volume_context["volume_name"] = volume_name + + source_volume_id = self.volume_content_source.volume.volume_id + if not (source_quota := self.vms_session.get_quota(source_volume_id)): + raise SourceNotFound(f"Unknown volume: {source_volume_id}") + + source_path = source_quota.path + tenant_id = source_quota.tenant_id + snapshot_name = f"snp-{self.name}" + snapshot_stream_name = f"strm-{self.name}" + + snapshot = self.vms_session.ensure_snapshot( + snapshot_name=snapshot_name, path=source_path, + tenant_id=tenant_id, expiration_delta=timedelta(minutes=5) + ) + snapshot_stream = self.vms_session.ensure_snapshot_stream( + snapshot_id=snapshot.id, destination_path=self.view_path, tenant_id=tenant_id, + snapshot_stream_name=snapshot_stream_name, + ) + # View should go after snapshot stream. + # Otherwise, snapshot stream action will detect folder already exist and will be rejected + view = self.vms_session.ensure_view( + path=self.view_path, protocols=[self.mount_protocol], + view_policy=self.view_policy, qos_policy=self.qos_policy + ) + quota = self.vms_session.ensure_quota( + volume_id=volume_name, view_path=self.view_path, + tenant_id=view.tenant_id, requested_capacity=requested_capacity + ) + volume_context.update( + quota_id=str(quota.id), view_id=str(view.id), + tenant_id=str(tenant_id), snapshot_stream_name=snapshot_stream.name + ) + + return types.Volume( + capacity_bytes=requested_capacity, + volume_id=self.name, + content_source=types.VolumeContentSource( + volume=types.VolumeSource(volume_id=source_volume_id) + ), + volume_context=volume_context, + ) + + +@final +class VolumeFromSnapshotBuilder(BaseBuilder): + """Builder for k8s Snapshots.""" + + def build_volume(self) -> types.Volume: + """ + Main entry point for snapshots. + Create snapshot representation. + """ + source_snapshot_id = self.volume_content_source.snapshot.snapshot_id + if not (snapshot := self.vms_session.get_snapshot(snapshot_id=source_snapshot_id)): + raise SourceNotFound(f"Unknown snapshot: {source_snapshot_id}") + volume_context = self.volume_context + + if self.rw_access_mode: + # Create volume from snapshot for READ_WRITE modes. + # quota and view will be created. + # The contents of the source snapshot will be replicated to view folder + # using an intermediate global snapshot stream. + tenant_id = snapshot.tenant_id + volume_name = self.build_volume_name() + requested_capacity = self.get_requested_capacity() + volume_context["volume_name"] = volume_name + + snapshot_stream_name = f"strm-{self.name}" + snapshot_stream = self.vms_session.ensure_snapshot_stream( + snapshot_id=snapshot.id, destination_path=self.view_path, tenant_id=tenant_id, + snapshot_stream_name=snapshot_stream_name, + ) + view = self.vms_session.ensure_view( + path=self.view_path, protocols=[self.mount_protocol], + view_policy=self.view_policy, qos_policy=self.qos_policy + ) + quota = self.vms_session.ensure_quota( + volume_id=volume_name, view_path=self.view_path, + tenant_id=view.tenant_id, requested_capacity=requested_capacity + ) + volume_context.update( + quota_id=str(quota.id), view_id=str(view.id), + tenant_id=str(tenant_id), snapshot_stream_name=snapshot_stream.name + ) + + else: + # Create volume from snapshot for READ_ONLY modes. + # Such volume has no quota and view representation on VAST. + # Volume within pod will be directly mounted to snapshot source folder. + requested_capacity = 0 # read-only volumes from snapshots have no capacity. + snapshot_path = local.path(snapshot.path) + # Compute root_export from snapshot path. This value should be passed as context for appropriate + # mounting within 'ControllerPublishVolume' endpoint + self.root_export = snapshot_path.parent + path = snapshot_path / ".snapshot" / snapshot.name + snapshot_base_path = str(path.relative_to(self.root_export)) + volume_context.update(snapshot_base_path=snapshot_base_path, root_export=self.root_export) + + return types.Volume( + capacity_bytes=requested_capacity, + volume_id=self.name, + content_source=types.VolumeContentSource( + snapshot=types.SnapshotSource(snapshot_id=source_snapshot_id) + ), + volume_context=volume_context + ) + + +@final +@dataclass +class StaticVolumeBuilder(BaseBuilder): + create_view: bool = True + create_quota: bool = True + + @classmethod + def from_parameters( + cls, + conf, + vms_session, + name, + volume_capabilities, + parameters, + create_view, + create_quota, + ): + """Parse context and return builder instance""" + mount_options = cls._parse_mount_options(volume_capabilities) + rw_access_mode = cls._parse_access_mode(volume_capabilities) + root_export = parameters["root_export"] + # View policy is required only when view is about to be created. + if not (view_policy := parameters.get("view_policy")) and create_view: + raise MissingParameter(param="view_policy") + vip_pool_fqdn = parameters.get("vip_pool_fqdn") + vip_pool_name = parameters.get("vip_pool_name") + cls._validate_mount_src(vip_pool_name, vip_pool_fqdn, conf.use_local_ip_for_mount) + volume_name_fmt = parameters.get("volume_name_fmt", conf.name_fmt) + qos_policy = parameters.get("qos_policy") + if "size" in parameters: + required_bytes = int(parse_quantity(parameters["size"])) + capacity_range = Bunch(required_bytes=required_bytes) + else: + capacity_range = None + return cls( + vms_session=vms_session, + configuration=conf, + name=name, + capacity_range=capacity_range, + rw_access_mode=rw_access_mode, + pvc_name=parameters.get("csi.storage.k8s.io/pvc/name"), + pvc_namespace=parameters.get("csi.storage.k8s.io/pvc/namespace"), + root_export=root_export, + volume_name_fmt=volume_name_fmt, + view_policy=view_policy, + vip_pool_name=vip_pool_name, + vip_pool_fqdn=vip_pool_fqdn, + mount_options=mount_options, + qos_policy=qos_policy, + create_view=create_view, + create_quota=create_quota + ) + + @property + def view_path(self): + return self.root_export_abs + + def build_volume(self) -> dict: + """ + Main build entrypoint for static volumes. + Create volume from pvc, pv etc. + """ + volume_name = self.build_volume_name() + volume_context = self.volume_context + volume_context["volume_name"] = volume_name + + if self.create_view: + # Check if view with expected system path already exists. + view = self.vms_session.ensure_view( + path=self.view_path, protocols=[self.mount_protocol], + view_policy=self.view_policy, qos_policy=self.qos_policy + ) + else: + if not (view := self.vms_session.get_view(path=self.view_path)): + raise SourceNotFound(f"View {self.view_path} does not exist but claimed as existing.") + + volume_context.update(view_id=str(view.id), tenant_id=str(view.tenant_id)) + + if self.create_quota: + quota = self.vms_session.ensure_quota( + volume_id=volume_name, view_path=self.view_path, + tenant_id=view.tenant_id, requested_capacity=self.get_requested_capacity() + ) + volume_context.update(quota_id=str(quota.id)) + return volume_context + + +@final +class TestVolumeBuilder(BaseBuilder): + """Test volumes builder for sanity checks""" + + @classmethod + def from_parameters( + cls, + conf, + vms_session, + name, + volume_capabilities, + capacity_range, + parameters, + volume_content_source, + **kwargs, + ): + rw_access_mode = cls._parse_access_mode(volume_capabilities) + root_export = volume_name_fmt = view_policy = mount_options = "" + return cls( + vms_session=vms_session, + configuration=conf, + name=name, + capacity_range=capacity_range, + rw_access_mode=rw_access_mode, + volume_content_source=volume_content_source, + root_export=root_export, + volume_name_fmt=volume_name_fmt, + view_policy=view_policy, + mount_options=mount_options, + ) + + def build_volume_name(self) -> str: + pass + + def get_existing_capacity(self) -> Optional[int]: + volume = self.vms_session.get_quota(self.name) + if volume: + return volume.capacity_bytes + + def build_volume(self) -> types.Volume: + """Main build entrypoint for tests""" + if content_source := self.volume_content_source: + if content_source.snapshot.snapshot_id: + if not self.configuration.fake_snapshot_store[content_source.snapshot.snapshot_id].exists(): + raise SourceNotFound(f"Source snapshot does not exist: {content_source.snapshot.snapshot_id}") + elif content_source.volume.volume_id: + if not self.configuration.fake_quota_store[content_source.volume.volume_id].exists(): + raise SourceNotFound(f"Source volume does not exist: {content_source.volume.volume_id}") + + requested_capacity = self.get_requested_capacity() + if existing_capacity := self.get_existing_capacity(): + if existing_capacity != requested_capacity: + raise VolumeAlreadyExists( + "Volume already exists with different capacity than requested" + f"({existing_capacity})", + ) + + vol_dir = self.vms_session._mock_mount[self.name] + vol_dir.mkdir() + + volume = types.Volume( + capacity_bytes=requested_capacity, + volume_id=self.name, + ) + + with self.configuration.fake_quota_store[self.name].open("wb") as f: + f.write(volume.SerializeToString()) + return volume diff --git a/version.txt b/version.txt index c2f6de90..21222cee 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v2.0.5 +v2.5.0