From 96b22f1571974edd4cb979c80631a691e4bf0ea4 Mon Sep 17 00:00:00 2001 From: wwatson Date: Tue, 2 Mar 2021 14:53:42 -0500 Subject: [PATCH 1/3] #601 kubectl apply now used in kubectl client --- .../sample-minimal-cnf/cnf-conformance.yml | 1 + src/tasks/litmus_setup.cr | 5 +-- src/tasks/utils/kubectl_client.cr | 35 ++++++++++++++----- src/tasks/workload/microservice.cr | 6 ++-- src/tasks/workload/resilience.cr | 18 +++++----- 5 files changed, 44 insertions(+), 21 deletions(-) diff --git a/sample-cnfs/sample-minimal-cnf/cnf-conformance.yml b/sample-cnfs/sample-minimal-cnf/cnf-conformance.yml index c98444404..d213f3fa7 100644 --- a/sample-cnfs/sample-minimal-cnf/cnf-conformance.yml +++ b/sample-cnfs/sample-minimal-cnf/cnf-conformance.yml @@ -6,3 +6,4 @@ container_names: rolling_downgrade_test_tag: 1.6.7 rolling_version_change_test_tag: latest rollback_from_tag: latest +release_name: coredns-1614713069 diff --git a/src/tasks/litmus_setup.cr b/src/tasks/litmus_setup.cr index f1bfe611f..58d94797e 100644 --- a/src/tasks/litmus_setup.cr +++ b/src/tasks/litmus_setup.cr @@ -6,8 +6,9 @@ require "./utils/utils.cr" desc "Install LitmusChaos" task "install_litmus" do |_, args| - litmus_install = `kubectl apply -f https://litmuschaos.github.io/litmus/litmus-operator-v1.11.0.yaml` - puts "#{litmus_install}" if check_verbose(args) + # litmus_install = `kubectl apply -f https://litmuschaos.github.io/litmus/litmus-operator-v1.11.0.yaml` + KubectlClient::Apply.file("https://litmuschaos.github.io/litmus/litmus-operator-v1.11.0.yaml") + # puts "#{litmus_install}" if check_verbose(args) end module LitmusManager diff --git a/src/tasks/utils/kubectl_client.cr b/src/tasks/utils/kubectl_client.cr index c335c3599..12f04ebab 100644 --- a/src/tasks/utils/kubectl_client.cr +++ b/src/tasks/utils/kubectl_client.cr @@ -60,21 +60,38 @@ module KubectlClient end module Apply def self.file(file_name) : Bool + # LOGGING.info "apply file: #{file_name}" + # apply = `kubectl apply -f #{file_name}` + # apply_status = $?.success? + # LOGGING.debug "kubectl apply resp: #{apply}" + # LOGGING.debug "apply? #{apply_status}" + # apply_status LOGGING.info "apply file: #{file_name}" - apply = `kubectl apply -f #{file_name}` + status = Process.run("kubectl apply -f #{file_name}", + shell: true, + output: output = IO::Memory.new, + error: stderr = IO::Memory.new) + LOGGING.info "KubectlClient.apply output: #{output.to_s}" + LOGGING.info "KubectlClient.apply stderr: #{stderr.to_s}" + # {status: status, output: output, error: stderr} apply_status = $?.success? - LOGGING.debug "kubectl apply resp: #{apply}" - LOGGING.debug "apply? #{apply_status}" - apply_status end end module Delete def self.file(file_name) - delete = `kubectl delete -f #{file_name}` - delete_status = $?.success? - LOGGING.debug "#{delete}" - LOGGING.debug "delete? #{delete_status}" - delete_status + # delete = `kubectl delete -f #{file_name}` + # delete_status = $?.success? + # LOGGING.debug "#{delete}" + # LOGGING.debug "delete? #{delete_status}" + # delete_status + # LOGGING.info "delete file: #{file_name}" + status = Process.run("kubectl delete -f #{file_name}", + shell: true, + output: output = IO::Memory.new, + error: stderr = IO::Memory.new) + LOGGING.info "KubectlClient.delete output: #{output.to_s}" + LOGGING.info "KubectlClient.delete stderr: #{stderr.to_s}" + {status: status, output: output, error: stderr} end end module Set diff --git a/src/tasks/workload/microservice.cr b/src/tasks/workload/microservice.cr index 0fa1602a7..fa3ab0722 100644 --- a/src/tasks/workload/microservice.cr +++ b/src/tasks/workload/microservice.cr @@ -63,7 +63,8 @@ task "reasonable_startup_time" do |_, args| raise "Manifest file not supported yet" end - kubectl_apply = `kubectl apply -f #{yml_file_path}/reasonable_startup_test.yml --namespace=startup-test` + # kubectl_apply = `kubectl apply -f #{yml_file_path}/reasonable_startup_test.yml --namespace=startup-test` + KubectlClient::Apply.file("#{yml_file_path}/reasonable_startup_test.yml --namespace=startup-test") is_kubectl_applied = $?.success? template_ymls = Helm::Manifest.parse_manifest_as_ymls("#{yml_file_path}/reasonable_startup_test.yml") @@ -102,7 +103,8 @@ task "reasonable_startup_time" do |_, args| ensure LOGGING.debug "Reasonable startup cleanup" delete_namespace = `kubectl delete namespace startup-test --force --grace-period 0 2>&1 >/dev/null` - rollback_non_namespaced = `kubectl apply -f #{yml_file_path}/reasonable_startup_orig.yml` + # rollback_non_namespaced = `kubectl apply -f #{yml_file_path}/reasonable_startup_orig.yml` + KubectlClient::Apply.file("#{yml_file_path}/reasonable_startup_orig.yml") # KubectlClient::Get.wait_for_install(deployment_name, wait_count=180) end end diff --git a/src/tasks/workload/resilience.cr b/src/tasks/workload/resilience.cr index 67a69f612..e742092da 100644 --- a/src/tasks/workload/resilience.cr +++ b/src/tasks/workload/resilience.cr @@ -183,12 +183,14 @@ task "pod_network_latency", ["install_litmus"] do |_, args| test_passed = false end if test_passed - install_experiment = `kubectl apply -f https://hub.litmuschaos.io/api/chaos/1.11.1?file=charts/generic/pod-network-latency/experiment.yaml` - install_rbac = `kubectl apply -f https://hub.litmuschaos.io/api/chaos/1.11.1?file=charts/generic/pod-network-latency/rbac.yaml` + KubectlClient::Apply.file("https://hub.litmuschaos.io/api/chaos/1.11.1?file=charts/generic/pod-network-latency/experiment.yaml") + # install_experiment = `kubectl apply -f https://hub.litmuschaos.io/api/chaos/1.11.1?file=charts/generic/pod-network-latency/experiment.yaml` + KubectlClient::Apply.file("https://hub.litmuschaos.io/api/chaos/1.11.1?file=charts/generic/pod-network-latency/rbac.yaml") + # install_rbac = `kubectl apply -f https://hub.litmuschaos.io/api/chaos/1.11.1?file=charts/generic/pod-network-latency/rbac.yaml` annotate = `kubectl annotate --overwrite deploy/#{resource["name"]} litmuschaos.io/chaos="true"` - puts "#{install_experiment}" if check_verbose(args) - puts "#{install_rbac}" if check_verbose(args) - puts "#{annotate}" if check_verbose(args) + # puts "#{install_experiment}" if check_verbose(args) + # puts "#{install_rbac}" if check_verbose(args) + # puts "#{annotate}" if check_verbose(args) chaos_experiment_name = "pod-network-latency" test_name = "#{resource["name"]}-conformance-#{Time.local.to_unix}" @@ -197,9 +199,9 @@ task "pod_network_latency", ["install_litmus"] do |_, args| template = Crinja.render(chaos_template_pod_network_latency, {"chaos_experiment_name"=> "#{chaos_experiment_name}", "deployment_label" => "#{KubectlClient::Get.resource_spec_labels(resource["kind"], resource["name"]).as_h.first_key}", "deployment_label_value" => "#{KubectlClient::Get.resource_spec_labels(resource["kind"], resource["name"]).as_h.first_value}", "test_name" => test_name}) chaos_config = `echo "#{template}" > "#{destination_cnf_dir}/#{chaos_experiment_name}-chaosengine.yml"` puts "#{chaos_config}" if check_verbose(args) - run_chaos = `kubectl apply -f "#{destination_cnf_dir}/#{chaos_experiment_name}-chaosengine.yml"` - puts "#{run_chaos}" if check_verbose(args) - + # run_chaos = `kubectl apply -f "#{destination_cnf_dir}/#{chaos_experiment_name}-chaosengine.yml"` + # puts "#{run_chaos}" if check_verbose(args) + KubectlClient::Apply.file("#{destination_cnf_dir}/#{chaos_experiment_name}-chaosengine.yml") LitmusManager.wait_for_test(test_name,chaos_experiment_name,args) LitmusManager.check_chaos_verdict(chaos_result_name,chaos_experiment_name,args) end From 6f14af29a17926fe1433bea24e2b3c4dd04cde16 Mon Sep 17 00:00:00 2001 From: wwatson Date: Tue, 2 Mar 2021 15:03:01 -0500 Subject: [PATCH 2/3] Revert "#545 cnf_manager now uses empty array for looping over resources that dont have a container" This reverts commit 90081613d5149484635e0411511c6c8b3810f238. Conflicts: src/tasks/utils/cnf_manager.cr --- src/tasks/workload/configuration_lifecycle.cr | 65 +++++++------------ 1 file changed, 24 insertions(+), 41 deletions(-) diff --git a/src/tasks/workload/configuration_lifecycle.cr b/src/tasks/workload/configuration_lifecycle.cr index 7f833e2dd..64c5897da 100644 --- a/src/tasks/workload/configuration_lifecycle.cr +++ b/src/tasks/workload/configuration_lifecycle.cr @@ -110,33 +110,6 @@ task "readiness" do |_, args| end end - -desc "Retrieve the manifest for the CNF's helm chart" -task "retrieve_manifest" do |_, args| - # TODO put this in a function - CNFManager::Task.task_runner(args) do |args| - VERBOSE_LOGGING.info "retrieve_manifest" if check_verbose(args) - # config = cnf_conformance_yml - config = CNFManager.parsed_config_file(CNFManager.ensure_cnf_conformance_yml_path(args.named["cnf-config"].as(String))) - # deployment_name = config.get("deployment_name").as_s - #TODO loop through all services - service_name = "#{config.get("service_name").as_s?}" - # VERBOSE_LOGGING.debug "Deployment_name: #{deployment_name}" if check_verbose(args) - VERBOSE_LOGGING.debug service_name if check_verbose(args) - destination_cnf_dir = CNFManager.cnf_destination_dir(CNFManager.ensure_cnf_conformance_dir(args.named["cnf-config"].as(String))) - # TODO move to kubectl client - # deployment = `kubectl get deployment #{deployment_name} -o yaml > #{destination_cnf_dir}/manifest.yml` - # KubectlClient::Get.save_manifest(deployment_name, "#{destination_cnf_dir}/manifest.yml") - # VERBOSE_LOGGING.debug deployment if check_verbose(args) - unless service_name.empty? - # TODO move to kubectl client - service = `kubectl get service #{service_name} -o yaml > #{destination_cnf_dir}/service.yml` - end - VERBOSE_LOGGING.debug service if check_verbose(args) - service - end -end - rolling_version_change_test_names.each do |tn| pretty_test_name = tn.split(/:|_/).join(" ") pretty_test_name_capitalized = tn.split(/:|_/).map(&.capitalize).join(" ") @@ -269,26 +242,36 @@ task "rollback" do |_, args| end desc "Does the CNF use NodePort" -task "nodeport_not_used", ["retrieve_manifest"] do |_, args| - task_response = CNFManager::Task.task_runner(args) do |args, config| +task "nodeport_not_used" do |_, args| + # TODO rename task_runner to multi_cnf_task_runner + CNFManager::Task.task_runner(args) do |args, config| VERBOSE_LOGGING.info "nodeport_not_used" if check_verbose(args) LOGGING.debug "cnf_config: #{config}" release_name = config.cnf_config[:release_name] service_name = config.cnf_config[:service_name] destination_cnf_dir = config.cnf_config[:destination_cnf_dir] - #TODO loop through all resources that have a kind of service - if File.exists?("#{destination_cnf_dir}/service.yml") - service = Totem.from_file "#{destination_cnf_dir}/service.yml" - VERBOSE_LOGGING.debug service.inspect if check_verbose(args) - service_type = service.get("spec").as_h["type"].as_s - VERBOSE_LOGGING.debug service_type if check_verbose(args) - if service_type == "NodePort" - upsert_failed_task("nodeport_not_used", "✖️ FAILURE: NodePort is being used") - else - upsert_passed_task("nodeport_not_used", "✔️ PASSED: NodePort is not used") + task_response = CNFManager.workload_resource_test(args, config, check_containers:false) do |resource, container, initialized| + LOGGING.info "nodeport_not_used resource: #{resource}" + if resource["kind"].as_s.downcase == "service" + LOGGING.info "resource kind: #{resource}" + service = KubectlClient::Get.resource(resource[:kind], resource[:name]) + LOGGING.debug "service: #{service}" + service_type = service.dig?("spec", "type") + LOGGING.info "service_type: #{service_type}" + VERBOSE_LOGGING.debug service_type if check_verbose(args) + if service_type == "NodePort" + #TODO make a service selector and display the related resources + # that are tied to this service + puts "resource service: #{resource} has a NodePort that is being used".colorize(:red) + test_passed=false + end + test_passed end - else + end + if task_response upsert_passed_task("nodeport_not_used", "✔️ PASSED: NodePort is not used") + else + upsert_failed_task("nodeport_not_used", "✖️ FAILURE: NodePort is being used") end end end @@ -433,7 +416,7 @@ def configmap_template end desc "Does the CNF use immutable configmaps?" -task "immutable_configmap", ["retrieve_manifest"] do |_, args| +task "immutable_configmap" do |_, args| task_response = CNFManager::Task.task_runner(args) do |args, config| VERBOSE_LOGGING.info "immutable_configmap" if check_verbose(args) LOGGING.debug "cnf_config: #{config}" From 23761a60db7a3042eff1b8b04634867d0b86a34b Mon Sep 17 00:00:00 2001 From: wwatson Date: Tue, 2 Mar 2021 16:02:55 -0500 Subject: [PATCH 3/3] #601 nodeport_not_used now has check_service set to true (regression) --- src/tasks/workload/configuration_lifecycle.cr | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tasks/workload/configuration_lifecycle.cr b/src/tasks/workload/configuration_lifecycle.cr index 64c5897da..9e980f190 100644 --- a/src/tasks/workload/configuration_lifecycle.cr +++ b/src/tasks/workload/configuration_lifecycle.cr @@ -250,7 +250,7 @@ task "nodeport_not_used" do |_, args| release_name = config.cnf_config[:release_name] service_name = config.cnf_config[:service_name] destination_cnf_dir = config.cnf_config[:destination_cnf_dir] - task_response = CNFManager.workload_resource_test(args, config, check_containers:false) do |resource, container, initialized| + task_response = CNFManager.workload_resource_test(args, config, check_containers:false, check_service: true) do |resource, container, initialized| LOGGING.info "nodeport_not_used resource: #{resource}" if resource["kind"].as_s.downcase == "service" LOGGING.info "resource kind: #{resource}"