From ea1b17b11cbd9b024906a2c5364ddc2d1401bd13 Mon Sep 17 00:00:00 2001 From: Jericho Keyne Date: Wed, 24 May 2023 13:20:26 -0600 Subject: [PATCH] Adding ingress support. Should address #47 --- src/app/ingress.rs | 196 ++++++++++++++++++++++++++++++++++++++++ src/app/mod.rs | 12 ++- src/handlers/mod.rs | 16 ++++ src/network/kube_api.rs | 21 +++-- src/network/mod.rs | 4 + src/ui/resource_tabs.rs | 67 ++++++++++++++ test_data/ingress.yaml | 90 ++++++++++++++++++ 7 files changed, 399 insertions(+), 7 deletions(-) create mode 100644 src/app/ingress.rs create mode 100644 test_data/ingress.yaml diff --git a/src/app/ingress.rs b/src/app/ingress.rs new file mode 100644 index 00000000..5a60781a --- /dev/null +++ b/src/app/ingress.rs @@ -0,0 +1,196 @@ +use k8s_openapi::{ + api::networking::v1::{Ingress, IngressBackend, IngressRule, IngressStatus}, + chrono::Utc, +}; + +use super::{ + models::KubeResource, + utils::{self, UNKNOWN}, +}; + +#[derive(Clone, Debug, PartialEq)] +pub struct KubeIngress { + pub namespace: String, + pub name: String, + pub ingress_class: String, + pub address: String, + pub paths: String, + pub default_backend: String, + pub age: String, + k8s_obj: Ingress, +} + +impl From for KubeIngress { + fn from(ingress: Ingress) -> Self { + let (ingress_class, rules, default_backend) = match &ingress.spec { + Some(spec) => { + let class_name = match &spec.ingress_class_name { + Some(c) => c.clone(), + None => UNKNOWN.into(), + }; + ( + class_name, + get_rules(&spec.rules), + format_backend(&spec.default_backend), + ) + } + None => (String::default(), None, String::default()), + }; + let name = match &ingress.metadata.name { + Some(n) => n.clone(), + None => UNKNOWN.into(), + }; + let namespace = match &ingress.metadata.namespace { + Some(n) => n.clone(), + None => UNKNOWN.into(), + }; + let paths = match rules { + Some(r) => r, + None => String::default(), + }; + Self { + name, + namespace, + ingress_class, + address: get_addresses(&ingress.status), + paths, + default_backend, + age: utils::to_age(ingress.metadata.creation_timestamp.as_ref(), Utc::now()), + k8s_obj: utils::sanitize_obj(ingress), + } + } +} + +impl KubeResource for KubeIngress { + fn get_k8s_obj(&self) -> &Ingress { + &self.k8s_obj + } +} + +fn format_backend(backend: &Option) -> String { + match backend { + Some(backend) => { + if let Some(resource) = &backend.resource { + return resource.name.to_string(); + } + if let Some(service) = &backend.service { + match &service.port { + Some(port) => { + if let Some(name) = &port.name { + format!("{}:{}", service.name, name) + } else if let Some(number) = &port.number { + return format!("{}:{}", service.name, number); + } else { + return String::default(); + } + } + None => String::default(), + } + } else { + String::default() + } + } + None => String::default(), + } +} + +fn get_rules(i_rules: &Option>) -> Option { + i_rules.as_ref().map(|rules| { + rules + .iter() + .map(|i_rule| { + let mut rule = i_rule.host.clone().unwrap_or("*".to_string()); + if let Some(http) = &i_rule.http { + http.paths.iter().for_each(|path| { + rule = format!( + "{}{}►{}", + rule, + &path.path.clone().unwrap_or("/*".to_string()), + format_backend(&Some(path.backend.clone())) + ); + }); + } + rule + }) + .collect::>() + .join(" ") + }) +} + +fn get_addresses(i_status: &Option) -> String { + match i_status { + Some(status) => match &status.load_balancer { + Some(lb) => match &lb.ingress { + Some(ingress) => ingress + .iter() + .map(|i| { + if let Some(h) = &i.hostname { + h.to_string() + } else if let Some(ip) = &i.ip { + ip.to_string() + } else { + "".to_string() + } + }) + .collect::>() + .join(" "), + None => String::default(), + }, + None => String::default(), + }, + None => String::default(), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::app::test_utils::*; + + #[test] + fn test_ingresses_from_api() { + let (ingresses, ingress_list): (Vec, Vec<_>) = + convert_resource_from_file("ingress"); + + assert_eq!(ingresses.len(), 3); + assert_eq!( + ingresses[0], + KubeIngress { + name: "ingdefault".into(), + namespace: "default".into(), + age: utils::to_age(Some(&get_time("2023-05-24T16:14:32Z")), Utc::now()), + k8s_obj: ingress_list[0].clone(), + ingress_class: "default".into(), + address: "".into(), + paths: "foo.com/►svc:8080".into(), + default_backend: "defaultsvc:http".into(), + } + ); + assert_eq!( + ingresses[1], + KubeIngress { + name: "test".into(), + namespace: "default".into(), + age: utils::to_age(Some(&get_time("2023-05-24T16:20:48Z")), Utc::now()), + k8s_obj: ingress_list[1].clone(), + ingress_class: "nginx".into(), + address: "192.168.49.2".into(), + paths: "".into(), + default_backend: "test:5701".into(), + } + ); + assert_eq!( + ingresses[2], + KubeIngress { + name: "test-ingress".into(), + namespace: "dev".into(), + age: utils::to_age(Some(&get_time("2023-05-24T16:22:23Z")), Utc::now()), + k8s_obj: ingress_list[2].clone(), + ingress_class: "nginx".into(), + address: "192.168.49.2".into(), + paths: "demo.apps.mlopshub.com/►hello-service:80".into(), + default_backend: "".into(), + } + ); + } +} diff --git a/src/app/mod.rs b/src/app/mod.rs index 8559298c..cc29c6c7 100644 --- a/src/app/mod.rs +++ b/src/app/mod.rs @@ -3,6 +3,7 @@ pub(crate) mod contexts; pub(crate) mod cronjobs; pub(crate) mod daemonsets; pub(crate) mod deployments; +pub(crate) mod ingress; pub(crate) mod jobs; pub(crate) mod key_binding; pub(crate) mod metrics; @@ -31,6 +32,7 @@ use self::{ cronjobs::KubeCronJob, daemonsets::KubeDaemonSet, deployments::KubeDeployment, + ingress::KubeIngress, jobs::KubeJob, key_binding::DEFAULT_KEYBINDING, metrics::KubeNodeMetrics, @@ -78,6 +80,7 @@ pub enum ActiveBlock { RoleBindings, ClusterRoles, ClusterRoleBinding, + Ingress, More, } @@ -137,6 +140,7 @@ pub struct Data { pub role_bindings: StatefulTable, pub cluster_roles: StatefulTable, pub cluster_role_binding: StatefulTable, + pub ingress: StatefulTable, } /// selected data items @@ -214,6 +218,7 @@ impl Default for Data { role_bindings: StatefulTable::new(), cluster_roles: StatefulTable::new(), cluster_role_binding: StatefulTable::new(), + ingress: StatefulTable::new(), } } } @@ -346,7 +351,7 @@ impl Default for App { ActiveBlock::ClusterRoleBinding, ), // ("Service Accounts".into(), ActiveBlock::RplCtrl), - // ("Ingresses".into(), ActiveBlock::RplCtrl), + ("Ingresses".into(), ActiveBlock::Ingress), // ("Network Policies".into(), ActiveBlock::RplCtrl), ]), show_info_bar: true, @@ -540,6 +545,7 @@ impl App { self.dispatch(IoEvent::GetRoleBindings).await; self.dispatch(IoEvent::GetClusterRoles).await; self.dispatch(IoEvent::GetClusterRoleBinding).await; + self.dispatch(IoEvent::GetIngress).await; self.dispatch(IoEvent::GetMetrics).await; } @@ -593,6 +599,9 @@ impl App { ActiveBlock::ClusterRoleBinding => { self.dispatch(IoEvent::GetClusterRoleBinding).await; } + ActiveBlock::Ingress => { + self.dispatch(IoEvent::GetIngress).await; + } ActiveBlock::Logs => { if !self.is_streaming { // do not tail to avoid duplicates @@ -755,6 +764,7 @@ mod tests { sync_io_rx.recv().await.unwrap(), IoEvent::GetClusterRoleBinding ); + assert_eq!(sync_io_rx.recv().await.unwrap(), IoEvent::GetIngress); assert_eq!(sync_io_rx.recv().await.unwrap(), IoEvent::GetMetrics); assert_eq!(sync_io_rx.recv().await.unwrap(), IoEvent::GetNamespaces); assert_eq!(sync_io_rx.recv().await.unwrap(), IoEvent::GetNodes); diff --git a/src/handlers/mod.rs b/src/handlers/mod.rs index e1ae7e2d..14af77cc 100644 --- a/src/handlers/mod.rs +++ b/src/handlers/mod.rs @@ -521,6 +521,21 @@ async fn handle_route_events(key: Key, app: &mut App) { .await; } } + ActiveBlock::Ingress => { + if let Some(res) = handle_block_action(key, &mut app.data.ingress) { + let _ok = handle_describe_decode_or_yaml_action( + key, + app, + &res, + IoCmdEvent::GetDescribe { + kind: "ingress".to_owned(), + value: res.name.to_owned(), + ns: None, + }, + ) + .await; + } + } ActiveBlock::Contexts | ActiveBlock::Utilization | ActiveBlock::Help => { /* Do nothing */ } } } @@ -588,6 +603,7 @@ async fn handle_block_scroll(app: &mut App, up: bool, is_mouse: bool, page: bool ActiveBlock::RoleBindings => app.data.role_bindings.handle_scroll(up, page), ActiveBlock::ClusterRoles => app.data.cluster_roles.handle_scroll(up, page), ActiveBlock::ClusterRoleBinding => app.data.cluster_role_binding.handle_scroll(up, page), + ActiveBlock::Ingress => app.data.ingress.handle_scroll(up, page), ActiveBlock::Contexts => app.data.contexts.handle_scroll(up, page), ActiveBlock::Utilization => app.data.metrics.handle_scroll(up, page), ActiveBlock::Help => app.help_docs.handle_scroll(up, page), diff --git a/src/network/kube_api.rs b/src/network/kube_api.rs index 466764a7..a3dbee1e 100644 --- a/src/network/kube_api.rs +++ b/src/network/kube_api.rs @@ -1,13 +1,14 @@ use std::fmt; use anyhow::anyhow; -use k8s_openapi::api::apps::v1::{DaemonSet, Deployment, ReplicaSet, StatefulSet}; -use k8s_openapi::api::batch::v1::{CronJob, Job}; -use k8s_openapi::api::core::v1::{ - ConfigMap, Namespace, Node, Pod, ReplicationController, Secret, Service, +use k8s_openapi::api::{ + apps::v1::{DaemonSet, Deployment, ReplicaSet, StatefulSet}, + batch::v1::{CronJob, Job}, + core::v1::{ConfigMap, Namespace, Node, Pod, ReplicationController, Secret, Service}, + networking::v1::Ingress, + rbac::v1::{ClusterRole, ClusterRoleBinding, Role, RoleBinding}, + storage::v1::StorageClass, }; -use k8s_openapi::api::rbac::v1::{ClusterRole, ClusterRoleBinding, Role, RoleBinding}; -use k8s_openapi::api::storage::v1::StorageClass; use kube::{ api::{ListMeta, ListParams, ObjectList}, config::Kubeconfig, @@ -26,6 +27,7 @@ use crate::app::{ cronjobs::KubeCronJob, daemonsets::KubeDaemonSet, deployments::KubeDeployment, + ingress::KubeIngress, jobs::KubeJob, metrics::{self, KubeNodeMetrics}, nodes::KubeNode, @@ -322,6 +324,13 @@ impl<'a> Network<'a> { app.data.cluster_role_binding.set_items(items); } + pub async fn get_ingress(&self) { + let items: Vec = self.get_namespaced_resources(Ingress::into).await; + + let mut app = self.app.lock().await; + app.data.ingress.set_items(items); + } + /// calls the kubernetes API to list the given resource for either selected namespace or all namespaces async fn get_namespaced_resources(&self, map_fn: F) -> Vec where diff --git a/src/network/mod.rs b/src/network/mod.rs index 5e50fc32..98fc4b5c 100644 --- a/src/network/mod.rs +++ b/src/network/mod.rs @@ -32,6 +32,7 @@ pub enum IoEvent { GetRoleBindings, GetClusterRoles, GetClusterRoleBinding, + GetIngress, GetMetrics, RefreshClient, } @@ -174,6 +175,9 @@ impl<'a> Network<'a> { IoEvent::GetClusterRoleBinding => { self.get_cluster_role_binding().await; } + IoEvent::GetIngress => { + self.get_ingress().await; + } }; let mut app = self.app.lock().await; diff --git a/src/ui/resource_tabs.rs b/src/ui/resource_tabs.rs index 56e83969..fd775f12 100644 --- a/src/ui/resource_tabs.rs +++ b/src/ui/resource_tabs.rs @@ -39,6 +39,7 @@ static ROLES_TITLE: &str = "Roles"; static ROLE_BINDINGS_TITLE: &str = "RoleBindings"; static CLUSTER_ROLES_TITLE: &str = "ClusterRoles"; static CLUSTER_ROLES_BINDING_TITLE: &str = "ClusterRoleBinding"; +static INGRESS_TITLE: &str = "Ingresses"; static DESCRIBE_ACTIVE: &str = "-> Describe "; static YAML_ACTIVE: &str = "-> YAML "; @@ -92,6 +93,7 @@ fn draw_more(block: ActiveBlock, f: &mut Frame<'_, B>, app: &mut App ActiveBlock::RoleBindings => draw_role_bindings_tab(block, f, app, area), ActiveBlock::ClusterRoles => draw_cluster_roles_tab(block, f, app, area), ActiveBlock::ClusterRoleBinding => draw_cluster_role_binding_tab(block, f, app, area), + ActiveBlock::Ingress => draw_ingress_tab(block, f, app, area), ActiveBlock::Describe | ActiveBlock::Yaml => { let mut prev_route = app.get_prev_route(); if prev_route.active_block == block { @@ -106,6 +108,7 @@ fn draw_more(block: ActiveBlock, f: &mut Frame<'_, B>, app: &mut App ActiveBlock::RoleBindings => draw_role_bindings_tab(block, f, app, area), ActiveBlock::ClusterRoles => draw_cluster_roles_tab(block, f, app, area), ActiveBlock::ClusterRoleBinding => draw_cluster_role_binding_tab(block, f, app, area), + ActiveBlock::Ingress => draw_ingress_tab(block, f, app, area), _ => { /* do nothing */ } } } @@ -1224,6 +1227,70 @@ fn draw_cluster_role_binding_block(f: &mut Frame<'_, B>, app: &mut A ); } +fn draw_ingress_tab( + block: ActiveBlock, + f: &mut Frame<'_, B>, + app: &mut App, + area: Rect, +) { + draw_resource_tab!( + INGRESS_TITLE, + block, + f, + app, + area, + draw_ingress_tab, + draw_ingress_block, + app.data.ingress + ); +} + +fn draw_ingress_block(f: &mut Frame<'_, B>, app: &mut App, area: Rect) { + let title = get_resource_title(app, INGRESS_TITLE, "", app.data.ingress.items.len()); + + draw_resource_block( + f, + area, + ResourceTableProps { + title, + inline_help: DESCRIBE_YAML_AND_ESC_HINT.into(), + resource: &mut app.data.ingress, + table_headers: vec![ + "Namespace", + "Name", + "Ingress class", + "Paths", + "Default backend", + "Addresses", + "Age", + ], + column_widths: vec![ + Constraint::Percentage(10), + Constraint::Percentage(20), + Constraint::Percentage(10), + Constraint::Percentage(25), + Constraint::Percentage(10), + Constraint::Percentage(10), + Constraint::Percentage(10), + ], + }, + |c| { + Row::new(vec![ + Cell::from(c.namespace.to_owned()), + Cell::from(c.name.to_owned()), + Cell::from(c.ingress_class.to_owned()), + Cell::from(c.paths.to_owned()), + Cell::from(c.default_backend.to_owned()), + Cell::from(c.address.to_owned()), + Cell::from(c.age.to_owned()), + ]) + .style(style_primary(app.light_theme)) + }, + app.light_theme, + app.is_loading, + ); +} + /// common for all resources fn draw_describe_block( f: &mut Frame<'_, B>, diff --git a/test_data/ingress.yaml b/test_data/ingress.yaml new file mode 100644 index 00000000..7bc3742c --- /dev/null +++ b/test_data/ingress.yaml @@ -0,0 +1,90 @@ +apiVersion: v1 +items: +- apiVersion: networking.k8s.io/v1 + kind: Ingress + metadata: + creationTimestamp: "2023-05-24T16:14:32Z" + generation: 1 + name: ingdefault + namespace: default + managedFields: [] + resourceVersion: "39551" + uid: 5facbac2-4678-4702-a731-4c616c0805b2 + spec: + defaultBackend: + service: + name: defaultsvc + port: + name: http + ingressClassName: default + rules: + - host: foo.com + http: + paths: + - backend: + service: + name: svc + port: + number: 8080 + path: / + pathType: Prefix + tls: + - hosts: + - foo.com + secretName: secret1 + status: + loadBalancer: {} +- apiVersion: networking.k8s.io/v1 + kind: Ingress + metadata: + creationTimestamp: "2023-05-24T16:20:48Z" + generation: 1 + name: test + namespace: default + resourceVersion: "40022" + managedFields: [] + uid: b56ea7ab-5f82-4112-bd7b-cf5fa1d31891 + spec: + defaultBackend: + service: + name: test + port: + number: 5701 + ingressClassName: nginx + status: + loadBalancer: + ingress: + - ip: 192.168.49.2 +- apiVersion: networking.k8s.io/v1 + kind: Ingress + metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"networking.k8s.io/v1","kind":"Ingress","metadata":{"annotations":{},"name":"test-ingress","namespace":"dev"},"spec":{"ingressClassName":"nginx","rules":[{"host":"demo.apps.mlopshub.com","http":{"paths":[{"backend":{"service":{"name":"hello-service","port":{"number":80}}},"path":"/","pathType":"Prefix"}]}}]}} + creationTimestamp: "2023-05-24T16:22:23Z" + managedFields: [] + generation: 1 + name: test-ingress + namespace: dev + resourceVersion: "40095" + uid: 75a4a34a-0859-4e8f-857b-c6769c8f79e0 + spec: + ingressClassName: nginx + rules: + - host: demo.apps.mlopshub.com + http: + paths: + - backend: + service: + name: hello-service + port: + number: 80 + path: / + pathType: Prefix + status: + loadBalancer: + ingress: + - ip: 192.168.49.2 +kind: List +metadata: + resourceVersion: ""