Skip to content

Commit

Permalink
Fix e2e-autoscale test case on OpenShift (#1927)
Browse files Browse the repository at this point in the history
Signed-off-by: Ishwar Kanse <[email protected]>
Co-authored-by: Jacob Aronoff <[email protected]>
  • Loading branch information
IshwarKanse and jaronoff97 committed Jul 19, 2023
1 parent d1ede76 commit 04dbbb4
Show file tree
Hide file tree
Showing 5 changed files with 59 additions and 25 deletions.
2 changes: 1 addition & 1 deletion tests/e2e-autoscale/autoscale/03-install.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,4 @@ spec:
- -duration=1m
- -workers=20
restartPolicy: Never
backoffLimit: 4
backoffLimit: 4
2 changes: 1 addition & 1 deletion tests/e2e-autoscale/autoscale/05-assert.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ metadata:
name: simplest-set-utilization
status:
scale:
replicas: 1
replicas: 1
Original file line number Diff line number Diff line change
Expand Up @@ -34,17 +34,13 @@ func main() {
var hpaName string
var timeout time.Duration
var numMetrics int
var kubeconfigPath string
var cpuValue int
var memoryValue int
var scaleDownWindow int
var scaleUpWindow int

defaultKubeconfigPath := filepath.Join(homedir.HomeDir(), ".kube", "config")

pflag.DurationVar(&timeout, "timeout", 5*time.Minute, "The timeout for the check.")
pflag.StringVar(&hpaName, "hpa", "", "HPA to check")
pflag.StringVar(&kubeconfigPath, "kubeconfig-path", defaultKubeconfigPath, "Absolute path to the KubeconfigPath file")
pflag.IntVar(&numMetrics, "num-metrics", 1, "number of expected metrics in Spec")
pflag.IntVar(&cpuValue, "cpu-value", -1, "value for target CPU utilization")
pflag.IntVar(&memoryValue, "memory-value", -1, "value for target memory utilization")
Expand All @@ -57,7 +53,14 @@ func main() {
os.Exit(1)
}

config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
kubeconfigPath := getKubeconfigPath()

configLoader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath},
&clientcmd.ConfigOverrides{},
)

config, err := configLoader.ClientConfig()
if err != nil {
fmt.Printf("Error reading the kubeconfig: %s\n", err)
os.Exit(1)
Expand Down Expand Up @@ -170,3 +173,15 @@ func main() {

fmt.Printf("%s is ready!\n", hpaName)
}

func getKubeconfigPath() string {
kubeconfigEnv := os.Getenv("KUBECONFIG")
if kubeconfigEnv != "" {
if _, err := os.Stat(kubeconfigEnv); err == nil {
return kubeconfigEnv
}
}

homeDir := homedir.HomeDir()
return filepath.Join(homeDir, ".kube", "config")
}
30 changes: 18 additions & 12 deletions tests/e2e-autoscale/autoscale/wait-until-hpa-ready.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (
"time"

"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
Expand All @@ -37,19 +37,13 @@ func main() {
pflag.StringVar(&hpaName, "hpa", "", "HPA to check")
pflag.Parse()

kubeconfigEnv := os.Getenv("KUBECONFIG")
kubeconfigPath := filepath.Join(homedir.HomeDir(), ".kube", "config")
if kubeconfigEnv != "" {
if _, err := os.Stat(kubeconfigEnv); err != nil {
kubeconfigPath = kubeconfigEnv
}
}

if len(hpaName) == 0 {
fmt.Println("hpa flag is mandatory")
os.Exit(1)
}

kubeconfigPath := getKubeconfigPath()

config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
if err != nil {
fmt.Printf("Error reading the kubeconfig: %s\n", err)
Expand All @@ -62,7 +56,7 @@ func main() {
os.Exit(1)
}

namespace, err := client.CoreV1().Namespaces().Get(context.Background(), os.Getenv("NAMESPACE"), metav1.GetOptions{})
namespace, err := client.CoreV1().Namespaces().Get(context.Background(), os.Getenv("NAMESPACE"), v1.GetOptions{})
if err != nil {
fmt.Println(err)
os.Exit(1)
Expand All @@ -80,13 +74,13 @@ func main() {
hpav2, err := hpaClientV2.Get(
c,
hpaName,
metav1.GetOptions{},
v1.GetOptions{},
)
if err != nil {
hpav1, err := hpaClientV1.Get(
c,
hpaName,
metav1.GetOptions{},
v1.GetOptions{},
)
if err != nil {
fmt.Printf("HPA %s not found\n", hpaName)
Expand All @@ -113,3 +107,15 @@ func main() {

fmt.Printf("%s is ready!\n", hpaName)
}

func getKubeconfigPath() string {
kubeconfigEnv := os.Getenv("KUBECONFIG")
if kubeconfigEnv != "" {
if _, err := os.Stat(kubeconfigEnv); err == nil {
return kubeconfigEnv
}
}

homeDir := homedir.HomeDir()
return filepath.Join(homeDir, ".kube", "config")
}
25 changes: 19 additions & 6 deletions tests/e2e-openshift/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,28 @@ COPY . /tmp/opentelemetry-operator

WORKDIR /tmp

# Set the Go path and Go cache environment variables
ENV GOPATH=/tmp/go
ENV GOBIN=/tmp/go/bin
ENV GOCACHE=/tmp/.cache/go-build
ENV PATH=$PATH:$GOBIN

# Create the /tmp/go/bin and build cache directories, and grant read and write permissions to all users
RUN mkdir -p /tmp/go/bin $GOCACHE \
&& chmod -R 777 /tmp/go/bin $GOPATH $GOCACHE

# Install dependencies required by test cases and debugging
RUN apt-get update && apt-get install -y jq vim libreadline-dev

# Install kuttl
RUN curl -L -o kuttl https://github.com/kudobuilder/kuttl/releases/download/v0.15.0/kubectl-kuttl_0.15.0_linux_x86_64 \
&& chmod +x kuttl \
&& mv kuttl /usr/local/bin/kuttl
RUN curl -LO https://github.com/kudobuilder/kuttl/releases/download/v0.15.0/kubectl-kuttl_0.15.0_linux_x86_64 \
&& chmod +x kubectl-kuttl_0.15.0_linux_x86_64 \
&& mv kubectl-kuttl_0.15.0_linux_x86_64 /usr/local/bin/kuttl

# Install kubectl and oc
RUN curl -L -o oc.tar.gz https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/latest/openshift-client-linux.tar.gz \
&& tar -xvzf oc.tar.gz \
&& chmod +x kubectl oc \
RUN curl -LO https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/latest/openshift-client-linux.tar.gz \
&& tar -xzf openshift-client-linux.tar.gz \
&& chmod +x oc kubectl \
&& mv oc kubectl /usr/local/bin/

# Set the working directory
Expand Down

0 comments on commit 04dbbb4

Please sign in to comment.