From f0754cbd634ccae057da995b552ba2b9bb936e22 Mon Sep 17 00:00:00 2001 From: Diego Ciangottini Date: Wed, 10 Jul 2024 11:30:56 +0200 Subject: [PATCH] Patches after 0.3.0 rc reviews (#254) - Enhance debug information for status report - print version of the interlink binary - vk node is now starting in notReady, not allowing pending pod to be polled unless the remote side is OK - telemetry endpoint is now configurable - CPU and memory allocatable resources are now taken correctly from the config file Signed-off-by: Diego Ciangottini --------- Signed-off-by: Diego Ciangottini --- .goreleaser.yaml | 2 + cmd/installer/templates/deployment.yaml | 11 +-- cmd/interlink/main.go | 18 +++- cmd/virtual-kubelet/main.go | 9 +- docker/Dockerfile.interlink | 2 + pkg/interlink/api/create.go | 10 +-- pkg/interlink/api/delete.go | 6 +- pkg/interlink/api/func.go | 24 +++--- pkg/interlink/api/logs.go | 4 +- pkg/interlink/api/status.go | 6 +- pkg/virtualkubelet/config.go | 8 +- pkg/virtualkubelet/execute.go | 108 +++++++++++++----------- pkg/virtualkubelet/virtualkubelet.go | 10 +-- 13 files changed, 124 insertions(+), 94 deletions(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 38d4c0d3..3117e9cf 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -20,6 +20,8 @@ builds: main: ./cmd/virtual-kubelet - id: "interlink" binary: interlink + hooks: + pre: bash -c "KUBELET_VERSION={{.Version}} ./cmd/virtual-kubelet/set-version.sh" env: - CGO_ENABLED=0 goos: diff --git a/cmd/installer/templates/deployment.yaml b/cmd/installer/templates/deployment.yaml index 5ed90154..4154bc51 100644 --- a/cmd/installer/templates/deployment.yaml +++ b/cmd/installer/templates/deployment.yaml @@ -15,10 +15,10 @@ spec: labels: nodeName: {{.VKName}} spec: - #hostNetwork: true + dnsConfig: + nameservers: + - 8.8.8.8 containers: - - name: jaeger - image: jaegertracing/all-in-one:1.51 - name: inttw-vk image: ghcr.io/intertwin-eu/interlink/virtual-kubelet-inttw:{{.InterLinkVersion}} imagePullPolicy: Always @@ -54,7 +54,6 @@ spec: env: - name: IAM_TOKEN_ENDPOINT value: {{.OAUTH.TokenURL}} - # TODO load env IAM client from secret - name: IAM_CLIENT_ID value: {{.OAUTH.ClientID}} - name: IAM_CLIENT_SECRET @@ -87,6 +86,4 @@ spec: # Provide the name of the ConfigMap you want to mount. name: {{.VKName}}-config - name: token - hostPath: - path: /tmp - type: Directory + emptyDir: {} diff --git a/cmd/interlink/main.go b/cmd/interlink/main.go index f2b85846..8eeb8311 100644 --- a/cmd/interlink/main.go +++ b/cmd/interlink/main.go @@ -2,6 +2,8 @@ package main import ( "context" + "flag" + "fmt" "net/http" "strings" @@ -9,15 +11,23 @@ import ( "github.com/virtual-kubelet/virtual-kubelet/log" logruslogger "github.com/virtual-kubelet/virtual-kubelet/log/logrus" - commonIL "github.com/intertwin-eu/interlink/pkg/interlink" + types "github.com/intertwin-eu/interlink/pkg/interlink" "github.com/intertwin-eu/interlink/pkg/interlink/api" + "github.com/intertwin-eu/interlink/pkg/virtualkubelet" ) func main() { + printVersion := flag.Bool("version", false, "show version") + flag.Parse() + + if *printVersion { + fmt.Println(virtualkubelet.KubeletVersion) + return + } var cancel context.CancelFunc - api.PodStatuses.Statuses = make(map[string]commonIL.PodStatus) + api.PodStatuses.Statuses = make(map[string]types.PodStatus) - interLinkConfig, err := commonIL.NewInterLinkConfig() + interLinkConfig, err := types.NewInterLinkConfig() if err != nil { panic(err) } @@ -36,6 +46,8 @@ func main() { log.G(ctx).Info(interLinkConfig) + log.G(ctx).Info("interLink version: ", virtualkubelet.KubeletVersion) + sidecarEndpoint := "" if strings.HasPrefix(interLinkConfig.Sidecarurl, "unix://") { sidecarEndpoint = interLinkConfig.Sidecarurl diff --git a/cmd/virtual-kubelet/main.go b/cmd/virtual-kubelet/main.go index fe5919ab..fa1c2b14 100644 --- a/cmd/virtual-kubelet/main.go +++ b/cmd/virtual-kubelet/main.go @@ -115,7 +115,14 @@ func initProvider() (func(context.Context) error, error) { // probably connect directly to the service through dns. ctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() - conn, err := grpc.DialContext(ctx, "localhost:4317", + + otlpEndpoint := os.Getenv("TELEMETRY_ENDPOINT") + + if otlpEndpoint == "" { + otlpEndpoint = "localhost:4317" + } + + conn, err := grpc.DialContext(ctx, otlpEndpoint, // Note the use of insecure transport here. TLS is recommended in production. grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock(), diff --git a/docker/Dockerfile.interlink b/docker/Dockerfile.interlink index 3163d204..8731532e 100644 --- a/docker/Dockerfile.interlink +++ b/docker/Dockerfile.interlink @@ -10,6 +10,8 @@ ENV GOCACHE="/go/build-cache" RUN mkdir -p $GOMODCACHE && mkdir -p $GOCACHE +ARG VERSION +RUN bash -c "KUBELET_VERSION=${VERSION} ./cmd/virtual-kubelet/set-version.sh" RUN go mod tidy RUN CGO_ENABLED=0 GOOS=linux go build -o bin/interlink cmd/interlink/main.go diff --git a/pkg/interlink/api/create.go b/pkg/interlink/api/create.go index 908e4b3a..9fddda70 100644 --- a/pkg/interlink/api/create.go +++ b/pkg/interlink/api/create.go @@ -8,7 +8,7 @@ import ( "github.com/containerd/containerd/log" - commonIL "github.com/intertwin-eu/interlink/pkg/interlink" + types "github.com/intertwin-eu/interlink/pkg/interlink" ) // CreateHandler collects and rearranges all needed ConfigMaps/Secrets/EmptyDirs to ship them to the sidecar, then sends a response to the client @@ -25,8 +25,8 @@ func (h *InterLinkHandler) CreateHandler(w http.ResponseWriter, r *http.Request) return } - var req *http.Request //request to forward to sidecar - var pod commonIL.PodCreateRequests //request for interlink + var req *http.Request //request to forward to sidecar + var pod types.PodCreateRequests //request for interlink err = json.Unmarshal(bodyBytes, &pod) if err != nil { statusCode = http.StatusInternalServerError @@ -35,9 +35,9 @@ func (h *InterLinkHandler) CreateHandler(w http.ResponseWriter, r *http.Request) return } - var retrievedData []commonIL.RetrievedPodData + var retrievedData []types.RetrievedPodData - data := commonIL.RetrievedPodData{} + data := types.RetrievedPodData{} if h.Config.ExportPodData { data, err = getData(h.Ctx, h.Config, pod) if err != nil { diff --git a/pkg/interlink/api/delete.go b/pkg/interlink/api/delete.go index 864d9dbb..f2d1ca2d 100644 --- a/pkg/interlink/api/delete.go +++ b/pkg/interlink/api/delete.go @@ -9,7 +9,7 @@ import ( "github.com/containerd/containerd/log" v1 "k8s.io/api/core/v1" - commonIL "github.com/intertwin-eu/interlink/pkg/interlink" + types "github.com/intertwin-eu/interlink/pkg/interlink" ) // DeleteHandler deletes the cached status for the provided Pod and forwards the request to the sidecar @@ -64,8 +64,8 @@ func (h *InterLinkHandler) DeleteHandler(w http.ResponseWriter, r *http.Request) w.WriteHeader(http.StatusOK) } log.G(h.Ctx).Debug("InterLink: " + string(returnValue)) - var returnJson []commonIL.PodStatus - returnJson = append(returnJson, commonIL.PodStatus{PodName: pod.Name, PodUID: string(pod.UID), PodNamespace: pod.Namespace}) + var returnJson []types.PodStatus + returnJson = append(returnJson, types.PodStatus{PodName: pod.Name, PodUID: string(pod.UID), PodNamespace: pod.Namespace}) bodyBytes, err = json.Marshal(returnJson) if err != nil { diff --git a/pkg/interlink/api/func.go b/pkg/interlink/api/func.go index 02c7de61..7a078d82 100644 --- a/pkg/interlink/api/func.go +++ b/pkg/interlink/api/func.go @@ -8,12 +8,12 @@ import ( "github.com/containerd/containerd/log" v1 "k8s.io/api/core/v1" - commonIL "github.com/intertwin-eu/interlink/pkg/interlink" + types "github.com/intertwin-eu/interlink/pkg/interlink" ) type MutexStatuses struct { mu sync.Mutex - Statuses map[string]commonIL.PodStatus + Statuses map[string]types.PodStatus } var PodStatuses MutexStatuses @@ -21,18 +21,18 @@ var PodStatuses MutexStatuses // getData retrieves ConfigMaps, Secrets and EmptyDirs from the provided pod by calling the retrieveData function. // The config is needed by the retrieveData function. // The function aggregates the return values of retrieveData function in a commonIL.RetrievedPodData variable and returns it, along with the first encountered error. -func getData(ctx context.Context, config commonIL.InterLinkConfig, pod commonIL.PodCreateRequests) (commonIL.RetrievedPodData, error) { +func getData(ctx context.Context, config types.InterLinkConfig, pod types.PodCreateRequests) (types.RetrievedPodData, error) { log.G(ctx).Debug(pod.ConfigMaps) - var retrievedData commonIL.RetrievedPodData + var retrievedData types.RetrievedPodData retrievedData.Pod = pod.Pod for _, container := range pod.Pod.Spec.InitContainers { log.G(ctx).Info("- Retrieving Secrets and ConfigMaps for the Docker Sidecar. InitContainer: " + container.Name) log.G(ctx).Debug(container.VolumeMounts) - data, err := retrieveData(ctx, config, pod, container) - if err != nil { - log.G(ctx).Error(err) - return commonIL.RetrievedPodData{}, err + data, InterlinkIP := retrieveData(ctx, config, pod, container) + if InterlinkIP != nil { + log.G(ctx).Error(InterlinkIP) + return types.RetrievedPodData{}, InterlinkIP } retrievedData.Containers = append(retrievedData.Containers, data) } @@ -43,7 +43,7 @@ func getData(ctx context.Context, config commonIL.InterLinkConfig, pod commonIL. data, err := retrieveData(ctx, config, pod, container) if err != nil { log.G(ctx).Error(err) - return commonIL.RetrievedPodData{}, err + return types.RetrievedPodData{}, err } retrievedData.Containers = append(retrievedData.Containers, data) } @@ -54,8 +54,8 @@ func getData(ctx context.Context, config commonIL.InterLinkConfig, pod commonIL. // retrieveData retrieves ConfigMaps, Secrets and EmptyDirs. // The config is needed to specify the EmptyDirs mounting point. // It returns the retrieved data in a variable of type commonIL.RetrievedContainer and the first encountered error. -func retrieveData(ctx context.Context, config commonIL.InterLinkConfig, pod commonIL.PodCreateRequests, container v1.Container) (commonIL.RetrievedContainer, error) { - retrievedData := commonIL.RetrievedContainer{} +func retrieveData(ctx context.Context, config types.InterLinkConfig, pod types.PodCreateRequests, container v1.Container) (types.RetrievedContainer, error) { + retrievedData := types.RetrievedContainer{} for _, mountVar := range container.VolumeMounts { log.G(ctx).Debug("-- Retrieving data for mountpoint " + mountVar.Name) @@ -114,7 +114,7 @@ func checkIfCached(uid string) bool { } // updateStatuses locks and updates the PodStatuses map with the statuses contained in the returnedStatuses slice -func updateStatuses(returnedStatuses []commonIL.PodStatus) { +func updateStatuses(returnedStatuses []types.PodStatus) { PodStatuses.mu.Lock() for _, new := range returnedStatuses { diff --git a/pkg/interlink/api/logs.go b/pkg/interlink/api/logs.go index c53a0f1b..9189091b 100644 --- a/pkg/interlink/api/logs.go +++ b/pkg/interlink/api/logs.go @@ -10,7 +10,7 @@ import ( "github.com/containerd/containerd/log" - commonIL "github.com/intertwin-eu/interlink/pkg/interlink" + types "github.com/intertwin-eu/interlink/pkg/interlink" ) func (h *InterLinkHandler) GetLogsHandler(w http.ResponseWriter, r *http.Request) { @@ -22,7 +22,7 @@ func (h *InterLinkHandler) GetLogsHandler(w http.ResponseWriter, r *http.Request } log.G(h.Ctx).Info("InterLink: unmarshal GetLogs request") - var req2 commonIL.LogStruct //incoming request. To be used in interlink API. req is directly forwarded to sidecar + var req2 types.LogStruct //incoming request. To be used in interlink API. req is directly forwarded to sidecar err = json.Unmarshal(bodyBytes, &req2) if err != nil { statusCode = http.StatusInternalServerError diff --git a/pkg/interlink/api/status.go b/pkg/interlink/api/status.go index 92dde171..da8502bd 100644 --- a/pkg/interlink/api/status.go +++ b/pkg/interlink/api/status.go @@ -10,7 +10,7 @@ import ( "github.com/containerd/containerd/log" v1 "k8s.io/api/core/v1" - commonIL "github.com/intertwin-eu/interlink/pkg/interlink" + types "github.com/intertwin-eu/interlink/pkg/interlink" ) func (h *InterLinkHandler) StatusHandler(w http.ResponseWriter, r *http.Request) { @@ -29,8 +29,8 @@ func (h *InterLinkHandler) StatusHandler(w http.ResponseWriter, r *http.Request) } var podsToBeChecked []*v1.Pod - var returnedStatuses []commonIL.PodStatus //returned from the query to the sidecar - var returnPods []commonIL.PodStatus //returned to the vk + var returnedStatuses []types.PodStatus //returned from the query to the sidecar + var returnPods []types.PodStatus //returned to the vk PodStatuses.mu.Lock() for _, pod := range pods { diff --git a/pkg/virtualkubelet/config.go b/pkg/virtualkubelet/config.go index ef8c2868..a7f68339 100644 --- a/pkg/virtualkubelet/config.go +++ b/pkg/virtualkubelet/config.go @@ -2,7 +2,7 @@ package virtualkubelet // VirtualKubeletConfig holds the whole configuration type VirtualKubeletConfig struct { - Interlinkurl string `yaml:"InterlinkURL"` + InterlinkURL string `yaml:"InterlinkURL"` Interlinkport string `yaml:"InterlinkPort"` VKConfigPath string `yaml:"VKConfigPath"` VKTokenFile string `yaml:"VKTokenFile"` @@ -11,8 +11,8 @@ type VirtualKubeletConfig struct { PodIP string `yaml:"PodIP"` VerboseLogging bool `yaml:"VerboseLogging"` ErrorsOnlyLogging bool `yaml:"ErrorsOnlyLogging"` - CPU string `yaml:"cpu,omitempty"` - Memory string `yaml:"memory,omitempty"` - Pods string `yaml:"pods,omitempty"` + CPU string `yaml:"CPU,omitempty"` + Memory string `yaml:"Memory,omitempty"` + Pods string `yaml:"Pods,omitempty"` GPU string `yaml:"nvidia.com/gpu,omitempty"` } diff --git a/pkg/virtualkubelet/execute.go b/pkg/virtualkubelet/execute.go index 06bfaeb0..b07c90cb 100644 --- a/pkg/virtualkubelet/execute.go +++ b/pkg/virtualkubelet/execute.go @@ -17,7 +17,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - commonIL "github.com/intertwin-eu/interlink/pkg/interlink" + types "github.com/intertwin-eu/interlink/pkg/interlink" ) func doRequest(req *http.Request, token string) (*http.Response, error) { @@ -44,7 +44,7 @@ func getSidecarEndpoint(ctx context.Context, interLinkURL string, interLinkPort // PingInterLink pings the InterLink API and returns true if there's an answer. The second return value is given by the answer provided by the API. func PingInterLink(ctx context.Context, config VirtualKubeletConfig) (bool, int, error) { - interLinkEndpoint := getSidecarEndpoint(ctx, config.Interlinkurl, config.Interlinkport) + interLinkEndpoint := getSidecarEndpoint(ctx, config.InterlinkURL, config.Interlinkport) log.G(ctx).Info("Pinging: " + interLinkEndpoint + "/pinglink") retVal := -1 req, err := http.NewRequest(http.MethodPost, interLinkEndpoint+"/pinglink", nil) @@ -90,7 +90,7 @@ func updateCacheRequest(ctx context.Context, config VirtualKubeletConfig, pod v1 return err } - interLinkEndpoint := getSidecarEndpoint(ctx, config.Interlinkurl, config.Interlinkport) + interLinkEndpoint := getSidecarEndpoint(ctx, config.InterlinkURL, config.Interlinkport) reader := bytes.NewReader(bodyBytes) req, err := http.NewRequest(http.MethodPost, interLinkEndpoint+"/updateCache", reader) if err != nil { @@ -116,9 +116,9 @@ func updateCacheRequest(ctx context.Context, config VirtualKubeletConfig, pod v1 // createRequest performs a REST call to the InterLink API when a Pod is registered to the VK. It Marshals the pod with already retrieved ConfigMaps and Secrets and sends it to InterLink. // Returns the call response expressed in bytes and/or the first encountered error -func createRequest(ctx context.Context, config VirtualKubeletConfig, pod commonIL.PodCreateRequests, token string) ([]byte, error) { - interLinkEndpoint := getSidecarEndpoint(ctx, config.Interlinkurl, config.Interlinkport) - var returnValue, _ = json.Marshal(commonIL.PodStatus{}) +func createRequest(ctx context.Context, config VirtualKubeletConfig, pod types.PodCreateRequests, token string) ([]byte, error) { + interLinkEndpoint := getSidecarEndpoint(ctx, config.InterlinkURL, config.Interlinkport) + var returnValue, _ = json.Marshal(types.PodStatus{}) bodyBytes, err := json.Marshal(pod) if err != nil { @@ -155,7 +155,7 @@ func createRequest(ctx context.Context, config VirtualKubeletConfig, pod commonI // deleteRequest performs a REST call to the InterLink API when a Pod is deleted from the VK. It Marshals the standard v1.Pod struct and sends it to InterLink. // Returns the call response expressed in bytes and/or the first encountered error func deleteRequest(ctx context.Context, config VirtualKubeletConfig, pod *v1.Pod, token string) ([]byte, error) { - interLinkEndpoint := getSidecarEndpoint(ctx, config.Interlinkurl, config.Interlinkport) + interLinkEndpoint := getSidecarEndpoint(ctx, config.InterlinkURL, config.Interlinkport) bodyBytes, err := json.Marshal(pod) if err != nil { log.G(context.Background()).Error(err) @@ -185,7 +185,7 @@ func deleteRequest(ctx context.Context, config VirtualKubeletConfig, pod *v1.Pod return nil, err } log.G(context.Background()).Info(string(returnValue)) - var response []commonIL.PodStatus + var response []types.PodStatus err = json.Unmarshal(returnValue, &response) if err != nil { log.G(context.Background()).Error(err) @@ -200,7 +200,7 @@ func deleteRequest(ctx context.Context, config VirtualKubeletConfig, pod *v1.Pod // Returns the call response expressed in bytes and/or the first encountered error func statusRequest(ctx context.Context, config VirtualKubeletConfig, podsList []*v1.Pod, token string) ([]byte, error) { var returnValue []byte - interLinkEndpoint := getSidecarEndpoint(ctx, config.Interlinkurl, config.Interlinkport) + interLinkEndpoint := getSidecarEndpoint(ctx, config.InterlinkURL, config.Interlinkport) bodyBytes, err := json.Marshal(podsList) if err != nil { @@ -237,8 +237,8 @@ func statusRequest(ctx context.Context, config VirtualKubeletConfig, podsList [] // LogRetrieval performs a REST call to the InterLink API when the user ask for a log retrieval. Compared to create/delete/status request, a way smaller struct is marshalled and sent. // This struct only includes a minimum data set needed to identify the job/container to get the logs from. // Returns the call response and/or the first encountered error -func LogRetrieval(ctx context.Context, config VirtualKubeletConfig, logsRequest commonIL.LogStruct) (io.ReadCloser, error) { - interLinkEndpoint := getSidecarEndpoint(ctx, config.Interlinkurl, config.Interlinkport) +func LogRetrieval(ctx context.Context, config VirtualKubeletConfig, logsRequest types.LogStruct) (io.ReadCloser, error) { + interLinkEndpoint := getSidecarEndpoint(ctx, config.InterlinkURL, config.Interlinkport) b, err := os.ReadFile(config.VKTokenFile) // just pass the file name if err != nil { log.G(ctx).Fatal(err) @@ -288,8 +288,8 @@ func RemoteExecution(ctx context.Context, config VirtualKubeletConfig, p *Virtua switch mode { case CREATE: - var req commonIL.PodCreateRequests - var resp commonIL.CreateStruct + var req types.PodCreateRequests + var resp types.CreateStruct req.Pod = *pod startTime := time.Now() @@ -396,11 +396,12 @@ func RemoteExecution(ctx context.Context, config VirtualKubeletConfig, p *Virtua // checkPodsStatus is regularly called by the VK itself at regular intervals of time to query InterLink for Pods' status. // It basically append all available pods registered to the VK to a slice and passes this slice to the statusRequest function. // After the statusRequest returns a response, this function uses that response to update every Pod and Container status. -func checkPodsStatus(ctx context.Context, p *VirtualKubeletProvider, podsList []*v1.Pod, token string, config VirtualKubeletConfig) ([]commonIL.PodStatus, error) { - var ret []commonIL.PodStatus +func checkPodsStatus(ctx context.Context, p *VirtualKubeletProvider, podsList []*v1.Pod, token string, config VirtualKubeletConfig) ([]types.PodStatus, error) { + var ret []types.PodStatus //commented out because it's too verbose. uncomment to see all registered pods //log.G(ctx).Debug(p.pods) + // retrieve pod status from remote interlink returnVal, err := statusRequest(ctx, config, podsList, token) if err != nil { return nil, err @@ -413,79 +414,88 @@ func checkPodsStatus(ctx context.Context, p *VirtualKubeletProvider, podsList [] return nil, err } + // if there is a pod status available go ahead to match with the latest state available in etcd if podsList != nil { - for _, podStatus := range ret { + for _, podRemoteStatus := range ret { + log.G(ctx).Debug(fmt.Sprintln("Get status from remote status len: ", len(podRemoteStatus.Containers))) // avoid asking for status too early, when etcd as not been updated - if podStatus.PodName != "" { + if podRemoteStatus.PodName != "" { - pod, err := p.GetPod(ctx, podStatus.PodNamespace, podStatus.PodName) + // get pod reference from cluster etcd + podRefInCluster, err := p.GetPod(ctx, podRemoteStatus.PodNamespace, podRemoteStatus.PodName) if err != nil { log.G(ctx).Warning(err) continue } + log.G(ctx).Debug(fmt.Sprintln("Get pod from k8s cluster status: ", podRefInCluster.Status.ContainerStatuses)) - if podStatus.PodUID == string(pod.UID) { + // if the PodUID match with the one in etcd we are talking of the same thing. GOOD + if podRemoteStatus.PodUID == string(podRefInCluster.UID) { podRunning := false podErrored := false podCompleted := false failedReason := "" - for _, containerStatus := range podStatus.Containers { + + // For each container of the pod we check if there is a previous state known by K8s + for _, containerRemoteStatus := range podRemoteStatus.Containers { index := 0 foundCt := false - for i, checkedContainer := range pod.Status.ContainerStatuses { - if checkedContainer.Name == containerStatus.Name { + for i, checkedContainer := range podRefInCluster.Status.ContainerStatuses { + if checkedContainer.Name == containerRemoteStatus.Name { foundCt = true index = i } } + // if it is the first time checking the container, append it to the pod containers, otherwise just update the correct item if !foundCt { - pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, containerStatus) + podRefInCluster.Status.ContainerStatuses = append(podRefInCluster.Status.ContainerStatuses, containerRemoteStatus) } else { - pod.Status.ContainerStatuses[index] = containerStatus + podRefInCluster.Status.ContainerStatuses[index] = containerRemoteStatus } - // if plugin cannot return any running container set the status to terminated + log.G(ctx).Debug(containerRemoteStatus.State.Running) + + // if plugin cannot return any non-terminated container set the status to terminated // if the exit code is != 0 get the error and set error reason + rememeber to set pod to failed - if containerStatus.State.Terminated != nil { - log.G(ctx).Debug("Pod " + podStatus.PodName + ": Service " + containerStatus.Name + " is not running on Plugin side") + if containerRemoteStatus.State.Terminated != nil { + log.G(ctx).Debug("Pod " + podRemoteStatus.PodName + ": Service " + containerRemoteStatus.Name + " is not running on Plugin side") podCompleted = true - pod.Status.ContainerStatuses[index].State.Terminated.Reason = "Completed" - if containerStatus.State.Terminated.ExitCode != 0 { + podRefInCluster.Status.ContainerStatuses[index].State.Terminated.Reason = "Completed" + if containerRemoteStatus.State.Terminated.ExitCode != 0 { podErrored = true - failedReason = "Error: " + strconv.Itoa(int(containerStatus.State.Terminated.ExitCode)) - pod.Status.ContainerStatuses[index].State.Terminated.Reason = failedReason - log.G(ctx).Error("Container " + containerStatus.Name + " exited with error: " + strconv.Itoa(int(containerStatus.State.Terminated.ExitCode))) + failedReason = "Error: " + strconv.Itoa(int(containerRemoteStatus.State.Terminated.ExitCode)) + podRefInCluster.Status.ContainerStatuses[index].State.Terminated.Reason = failedReason + log.G(ctx).Error("Container " + containerRemoteStatus.Name + " exited with error: " + strconv.Itoa(int(containerRemoteStatus.State.Terminated.ExitCode))) } - } else if containerStatus.State.Waiting != nil { - log.G(ctx).Info("Pod " + podStatus.PodName + ": Service " + containerStatus.Name + " is setting up on Sidecar") + } else if containerRemoteStatus.State.Waiting != nil { + log.G(ctx).Info("Pod " + podRemoteStatus.PodName + ": Service " + containerRemoteStatus.Name + " is setting up on Sidecar") podRunning = true - } else if containerStatus.State.Running != nil { + } else if containerRemoteStatus.State.Running != nil { podRunning = true - log.G(ctx).Debug("Pod " + podStatus.PodName + ": Service " + containerStatus.Name + " is running on Sidecar") - + log.G(ctx).Debug("Pod " + podRemoteStatus.PodName + ": Service " + containerRemoteStatus.Name + " is running on Sidecar") } // if this is the first time you see a container running/errored/completed, update the status of the pod. - if podRunning && pod.Status.Phase != v1.PodRunning { - pod.Status.Phase = v1.PodRunning - pod.Status.Conditions = append(pod.Status.Conditions, v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}) - } else if podErrored && pod.Status.Phase != v1.PodFailed { - pod.Status.Phase = v1.PodFailed - pod.Status.Reason = failedReason - } else if podCompleted && pod.Status.Phase != v1.PodSucceeded { - pod.Status.Conditions = append(pod.Status.Conditions, v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionFalse}) - pod.Status.Phase = v1.PodSucceeded - pod.Status.Reason = "Completed" + if podRunning && podRefInCluster.Status.Phase != v1.PodRunning { + podRefInCluster.Status.Phase = v1.PodRunning + podRefInCluster.Status.Conditions = append(podRefInCluster.Status.Conditions, v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}) + } else if podErrored && podRefInCluster.Status.Phase != v1.PodFailed { + podRefInCluster.Status.Phase = v1.PodFailed + podRefInCluster.Status.Reason = failedReason + } else if podCompleted && podRefInCluster.Status.Phase != v1.PodSucceeded { + podRefInCluster.Status.Conditions = append(podRefInCluster.Status.Conditions, v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionFalse}) + podRefInCluster.Status.Phase = v1.PodSucceeded + podRefInCluster.Status.Reason = "Completed" } } } else { // if you don't now any UID yet, collect the status and updated the status cache - list, err := p.clientSet.CoreV1().Pods(podStatus.PodNamespace).List(ctx, metav1.ListOptions{}) + list, err := p.clientSet.CoreV1().Pods(podRemoteStatus.PodNamespace).List(ctx, metav1.ListOptions{}) if err != nil { log.G(ctx).Error(err) return nil, err @@ -494,7 +504,7 @@ func checkPodsStatus(ctx context.Context, p *VirtualKubeletProvider, podsList [] pods := list.Items for _, pod := range pods { - if string(pod.UID) == podStatus.PodUID { + if string(pod.UID) == podRemoteStatus.PodUID { err = updateCacheRequest(ctx, config, pod, token) if err != nil { log.G(ctx).Error(err) diff --git a/pkg/virtualkubelet/virtualkubelet.go b/pkg/virtualkubelet/virtualkubelet.go index bda0e7b2..586cfe1c 100644 --- a/pkg/virtualkubelet/virtualkubelet.go +++ b/pkg/virtualkubelet/virtualkubelet.go @@ -22,7 +22,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - commonIL "github.com/intertwin-eu/interlink/pkg/interlink" + types "github.com/intertwin-eu/interlink/pkg/interlink" ) const ( @@ -621,7 +621,7 @@ func nodeConditions() []v1.NodeCondition { return []v1.NodeCondition{ { Type: "Ready", - Status: v1.ConditionTrue, + Status: v1.ConditionFalse, LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletPending", @@ -653,7 +653,7 @@ func nodeConditions() []v1.NodeCondition { }, { Type: "NetworkUnavailable", - Status: v1.ConditionFalse, + Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "RouteCreated", @@ -756,12 +756,12 @@ func (p *VirtualKubeletProvider) GetLogs(ctx context.Context, namespace, podName log.G(ctx).Error(err) } - logsRequest := commonIL.LogStruct{ + logsRequest := types.LogStruct{ Namespace: namespace, PodUID: string(p.pods[key].UID), PodName: podName, ContainerName: containerName, - Opts: commonIL.ContainerLogOpts(opts), + Opts: types.ContainerLogOpts(opts), } return LogRetrieval(ctx, p.config, logsRequest)