Skip to content

Commit

Permalink
Merge pull request #61 from tnozicka/update-scripts
Browse files Browse the repository at this point in the history
Update CI scripts
  • Loading branch information
scylla-operator-bot[bot] authored Oct 31, 2024
2 parents 55f97bd + 7d76aeb commit 91fdd8f
Show file tree
Hide file tree
Showing 4 changed files with 185 additions and 87 deletions.
81 changes: 81 additions & 0 deletions hack/.ci/lib/e2e.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
#!/bin/bash
#
# Copyright (C) 2024 ScyllaDB
#

set -euExo pipefail
shopt -s inherit_errexit

source "$( dirname "${BASH_SOURCE[0]}" )/../../lib/kube.sh"

SO_IMAGE="${SO_IMAGE:-quay.io/scylladb/scylla-operator:latest}"

# gather-artifacts is a self sufficient function that collects artifacts without depending on any external objects.
# $1- target directory
function gather-artifacts {
if [ -z "${1+x}" ]; then
echo -e "Missing target directory.\nUsage: ${FUNCNAME[0]} target_directory" > /dev/stderr
exit 2
fi

if [ -z "${SO_IMAGE+x}" ]; then
echo "SO_IMAGE can't be empty" > /dev/stderr
exit 2
fi

kubectl create namespace gather-artifacts --dry-run=client -o=yaml | kubectl_apply -f=-
kubectl create clusterrolebinding gather-artifacts --clusterrole=cluster-admin --serviceaccount=gather-artifacts:default --dry-run=client -o=yaml | kubectl_apply -f=-
kubectl create -n=gather-artifacts pdb must-gather --selector='app=must-gather' --max-unavailable=0 --dry-run=client -o=yaml | kubectl_apply -f=-

kubectl_create -n=gather-artifacts -f=- <<EOF
apiVersion: v1
kind: Pod
metadata:
labels:
app: must-gather
name: must-gather
spec:
restartPolicy: Never
containers:
- name: wait-for-artifacts
command:
- /usr/bin/sleep
- infinity
image: "${SO_IMAGE}"
imagePullPolicy: Always
volumeMounts:
- name: artifacts
mountPath: /tmp/artifacts
- name: must-gather
args:
- must-gather
- --all-resources
- --loglevel=2
- --dest-dir=/tmp/artifacts
image: "${SO_IMAGE}"
imagePullPolicy: Always
volumeMounts:
- name: artifacts
mountPath: /tmp/artifacts
volumes:
- name: artifacts
emptyDir: {}
EOF
kubectl -n=gather-artifacts wait --for=condition=Ready pod/must-gather

exit_code="$( wait-for-container-exit-with-logs gather-artifacts must-gather must-gather )"

kubectl -n=gather-artifacts cp --retries=42 -c=wait-for-artifacts must-gather:/tmp/artifacts "${1}"
ls -l "${1}"

kubectl -n=gather-artifacts delete pod/must-gather --wait=false

if [[ "${exit_code}" -ne "0" ]]; then
echo "Collecting artifacts using must-gather failed"
exit "${exit_code}"
fi
}

function gather-artifacts-on-exit {
gather-artifacts "${ARTIFACTS}/must-gather"
}
133 changes: 61 additions & 72 deletions hack/.ci/run-e2e-gke.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,17 @@
set -euExo pipefail
shopt -s inherit_errexit

if [ -z "${ARTIFACTS+x}" ]; then
echo "ARTIFACTS can't be empty" > /dev/stderr
exit 2
fi

source "$( dirname "${BASH_SOURCE[0]}" )/../lib/kube.sh"
source "$( dirname "${BASH_SOURCE[0]}" )/lib/e2e.sh"
parent_dir="$( dirname "${BASH_SOURCE[0]}" )"

trap gather-artifacts-on-exit EXIT

if [ -z ${E2E_SUITE+x} ]; then
echo "E2E_SUITE can't be empty" > /dev/stderr
exit 2
Expand All @@ -31,87 +42,65 @@ export REENTRANT

field_manager=run-e2e-script

function kubectl_create {
if [[ "${REENTRANT}" != "true" ]]; then
# In an actual CI run we have to enforce that no two objects have the same name.
kubectl create --field-manager="${field_manager}" "$@"
else
# For development iterations we want to update the objects.
kubectl apply --server-side=true --field-manager="${field_manager}" --force-conflicts "$@"
fi
}

function gather-artifacts {
kubectl -n e2e run --restart=Never --image="quay.io/scylladb/scylla-operator:latest" --labels='app=must-gather' --command=true must-gather -- bash -euExo pipefail -O inherit_errexit -c "function wait-for-artifacts { touch /tmp/done && until [[ -f '/tmp/exit' ]]; do sleep 1; done } && trap wait-for-artifacts EXIT && mkdir /tmp/artifacts && scylla-operator must-gather --all-resources --loglevel=2 --dest-dir=/tmp/artifacts"
kubectl -n e2e wait --for=condition=Ready pod/must-gather

# Setup artifacts transfer when finished and unblock the must-gather pod when done.
(
function unblock-must-gather-pod {
kubectl -n e2e exec pod/must-gather -- bash -euEo pipefail -O inherit_errexit -c "touch /tmp/exit"
}
trap unblock-must-gather-pod EXIT

kubectl -n e2e exec pod/must-gather -- bash -euEo pipefail -O inherit_errexit -c "until [[ -f /tmp/done ]]; do sleep 1; done; ls -l /tmp/artifacts"
kubectl -n e2e cp --retries=42 must-gather:/tmp/artifacts "${ARTIFACTS}/must-gather"
ls -l "${ARTIFACTS}"
) &
must_gather_bg_pid=$!

kubectl -n e2e logs -f pod/must-gather
exit_code=$( kubectl -n e2e get pods/must-gather --output='jsonpath={.status.containerStatuses[0].state.terminated.exitCode}' )
kubectl -n e2e delete pod/must-gather --wait=false

if [[ "${exit_code}" != "0" ]]; then
echo "Collecting artifacts using must-gather failed"
exit "${exit_code}"
fi

wait "${must_gather_bg_pid}"
}

function handle-exit {
gather-artifacts || "Error gathering artifacts" > /dev/stderr
}

trap handle-exit EXIT

# Pre-create e2e namespace to be available to artifacts collection if something we to go wrong while deploying the stack.
kubectl create namespace e2e --dry-run=client -o yaml | kubectl_create -f -
kubectl create clusterrolebinding e2e --clusterrole=cluster-admin --serviceaccount=e2e:default --dry-run=client -o yaml | kubectl_create -f -

timeout -v 10m ./hack/ci-deploy.sh "${E2E_IMAGE}"

kubectl -n=local-csi-driver patch daemonset/local-csi-driver --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--loglevel=4"}]'
kubectl -n=local-csi-driver rollout status daemonset/local-csi-driver

kubectl create -n e2e pdb my-pdb --selector='app=e2e' --min-available=1 --dry-run=client -o yaml | kubectl_create -f -

kubectl -n e2e run --restart=Never --image="${E2E_TESTS_IMAGE}" --labels='app=e2e' --command=true e2e -- bash -euExo pipefail -O inherit_errexit -c "function wait-for-artifacts { touch /tmp/done && until [[ -f '/tmp/exit' ]]; do sleep 1; done } && trap wait-for-artifacts EXIT && mkdir /tmp/artifacts && local-csi-driver-tests run '${E2E_SUITE}' --loglevel=2 --color=false --artifacts-dir=/tmp/artifacts"
kubectl -n e2e wait --for=condition=Ready pod/e2e

# Setup artifacts transfer when finished and unblock the e2e pod when done.
(
function unblock-e2e-pod {
kubectl -n e2e exec pod/e2e -- bash -euEo pipefail -O inherit_errexit -c "touch /tmp/exit"
}
trap unblock-e2e-pod EXIT

kubectl -n e2e exec pod/e2e -- bash -euEo pipefail -O inherit_errexit -c "until [[ -f /tmp/done ]]; do sleep 1; done; ls -l /tmp/artifacts"
kubectl -n e2e cp --retries=42 e2e:/tmp/artifacts "${ARTIFACTS}"
ls -l "${ARTIFACTS}"
) &
e2e_bg_pid=$!

kubectl -n e2e logs -f pod/e2e
exit_code=$( kubectl -n e2e get pods/e2e --output='jsonpath={.status.containerStatuses[0].state.terminated.exitCode}' )
kubectl -n e2e delete pod/e2e --wait=false

wait "${e2e_bg_pid}" || ( echo "Collecting e2e artifacts failed" && exit 2 )
# Pre-create e2e namespace to be available to artifacts collection if something we to go wrong while deploying the stack.
kubectl create namespace e2e --dry-run=client -o=yaml | kubectl_create -f=-
kubectl create clusterrolebinding e2e --clusterrole=cluster-admin --serviceaccount=e2e:default --dry-run=client -o=yaml | kubectl_create -f=-
kubectl create -n=e2e pdb my-pdb --selector='app=e2e' --min-available=1 --dry-run=client -o=yaml | kubectl_create -f=-

kubectl_create -n=e2e -f=- <<EOF
apiVersion: v1
kind: Pod
metadata:
labels:
app: e2e
name: e2e
spec:
restartPolicy: Never
containers:
- name: wait-for-artifacts
command:
- /usr/bin/sleep
- infinity
image: "${E2E_TESTS_IMAGE}"
imagePullPolicy: Always
volumeMounts:
- name: artifacts
mountPath: /tmp/artifacts
- name: e2e
command:
- local-csi-driver-tests
- run
- "${E2E_SUITE}"
- --loglevel=2
- --color=false
- --artifacts-dir=/tmp/artifacts
image: "${E2E_TESTS_IMAGE}"
imagePullPolicy: Always
volumeMounts:
- name: artifacts
mountPath: /tmp/artifacts
volumes:
- name: artifacts
emptyDir: {}
EOF
kubectl -n=e2e wait --for=condition=Ready pod/e2e

exit_code="$( wait-for-container-exit-with-logs e2e e2e e2e )"

kubectl -n=e2e cp --retries=42 e2e:/tmp/artifacts -c=wait-for-artifacts "${ARTIFACTS}"
ls -l "${ARTIFACTS}"

kubectl -n=e2e delete pod/e2e --wait=false

if [[ "${exit_code}" != "0" ]]; then
echo "E2E tests failed"
exit "${exit_code}"
fi

wait
echo "E2E tests finished successfully"
17 changes: 2 additions & 15 deletions hack/ci-deploy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,28 +6,15 @@
# Usage: ${0} <driver_image_ref>

set -euxEo pipefail
shopt -s inherit_errexit

function wait-for-object-creation {
for i in {1..30}; do
{ kubectl -n "${1}" get "${2}" && break; } || sleep 1
done
}
source "$( dirname "${BASH_SOURCE[0]}" )/lib/kube.sh"

if [[ -z ${1+x} ]]; then
echo "Missing driver image ref.\nUsage: ${0} <driver_image_ref>" >&2 >/dev/null
exit 1
fi

function kubectl_create {
if [[ -z ${REENTRANT+x} ]]; then
# In an actual CI run we have to enforce that no two objects have the same name.
kubectl create "$@"
else
# For development iterations we want to update the objects.
kubectl apply "$@"
fi
}

ARTIFACTS_DIR=${ARTIFACTS_DIR:-$( mktemp -d )}
DRIVER_IMAGE_REF=${1}

Expand Down
41 changes: 41 additions & 0 deletions hack/lib/kube.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#!/bin/bash
#
# Copyright (C) 2024 ScyllaDB
#

set -euExo pipefail
shopt -s inherit_errexit

FIELD_MANAGER="${FIELD_MANAGER:-so-default}"

function kubectl_apply {
kubectl apply --server-side=true --field-manager="${FIELD_MANAGER}" --force-conflicts "$@"
}

function kubectl_create {
if [[ -z "${REENTRANT+x}" ]]; then
# In an actual CI run we have to enforce that no two objects have the same name.
kubectl create --field-manager="${FIELD_MANAGER}" "$@"
else
# For development iterations we want to update the objects.
kubectl_apply "$@"
fi
}

function wait-for-object-creation {
for i in {1..30}; do
{ kubectl -n "${1}" get "${2}" && break; } || sleep 1
done
}

# $1 - namespace
# $2 - pod name
# $3 - container name
function wait-for-container-exit-with-logs {
exit_code=""
while [[ "${exit_code}" == "" ]]; do
kubectl -n="${1}" logs -f pod/"${2}" -c="${3}" > /dev/stderr || echo "kubectl logs failed before pod has finished, retrying..." > /dev/stderr
exit_code="$( kubectl -n="${1}" get pods/"${2}" --template='{{ range .status.containerStatuses }}{{ if and (eq .name "'"${3}"'") (ne .state.terminated.exitCode nil) }}{{ .state.terminated.exitCode }}{{ end }}{{ end }}' )"
done
echo -n "${exit_code}"
}

0 comments on commit 91fdd8f

Please sign in to comment.