Skip to content

Commit

Permalink
fix(terraform): Fixing issues with checks CKV_AZURE_226 & CKV_AZURE_2…
Browse files Browse the repository at this point in the history
…27 (#5638)

* Fixing CKV_AZURE_226

* Fixed issue with CKV_AZURE_227

* fix linting and test

---------

Co-authored-by: Thomas Defise <[email protected]>
Co-authored-by: gruebel <[email protected]>
  • Loading branch information
3 people committed Oct 16, 2023
1 parent 0f3c5ad commit 0d94461
Show file tree
Hide file tree
Showing 5 changed files with 132 additions and 61 deletions.
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck

Expand All @@ -18,11 +17,19 @@ def __init__(self) -> None:
id = "CKV_AZURE_227"
supported_resources = ("azurerm_kubernetes_cluster", "azurerm_kubernetes_cluster_node_pool")
categories = (CheckCategories.KUBERNETES,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources,
missing_block_result=CheckResult.FAILED)
super().__init__(
name=name,
id=id,
categories=categories,
supported_resources=supported_resources,
missing_block_result=CheckResult.FAILED,
)

def get_inspected_key(self) -> str:
return "enable_host_encryption"
if self.entity_type == "azurerm_kubernetes_cluster":
return "default_node_pool/[0]/enable_host_encryption"
else:
return "enable_host_encryption"


check = AKSEncryptionAtHostEnabled()
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def __init__(self) -> None:
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)

def get_inspected_key(self) -> str:
return "os_disk_type"
return "default_node_pool/[0]/os_disk_type"

def get_expected_value(self) -> Any:
return "Ephemeral"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,96 +1,111 @@
resource "azurerm_kubernetes_cluster" "pass" {
name = "example-aks1"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
dns_prefix = "exampleaks1"
enable_host_encryption = true
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
vm_size = "Standard_DS2_v2"
node_count = 1

default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2_v2"
name = "default"

enable_host_encryption = true
vm_size = "Standard_E4ads_v5"
os_disk_type = "Ephemeral"
zones = [1, 2, 3]
only_critical_addons_enabled = true

type = "VirtualMachineScaleSets"
vnet_subnet_id = var.subnet_id
enable_auto_scaling = true
max_count = 6
min_count = 2
orchestrator_version = local.kubernetes_version
}

identity {
type = "SystemAssigned"
}

tags = {
Environment = "Production"
}
}

resource "azurerm_kubernetes_cluster_node_pool" "pass" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
vm_size = "Standard_DS2_v2"
node_count = 1
enable_host_encryption = true
enable_host_encryption = true

tags = {
Environment = "Production"
}
}

resource "azurerm_kubernetes_cluster" "fail" {
name = "example-aks1"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
dns_prefix = "exampleaks1"

default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2_v2"
}

identity {
type = "SystemAssigned"
}
resource "azurerm_kubernetes_cluster" "fail1" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
vm_size = "Standard_DS2_v2"
node_count = 1

tags = {
Environment = "Production"
}

default_node_pool {
name = "default"

enable_host_encryption = false
vm_size = "Standard_E4ads_v5"
zones = [1, 2, 3]
only_critical_addons_enabled = true

type = "VirtualMachineScaleSets"
vnet_subnet_id = var.subnet_id
enable_auto_scaling = true
max_count = 6
min_count = 2
orchestrator_version = local.kubernetes_version
}

}

resource "azurerm_kubernetes_cluster_node_pool" "fail" {
resource "azurerm_kubernetes_cluster_node_pool" "fail1" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
vm_size = "Standard_DS2_v2"
node_count = 1
enable_host_encryption = false

tags = {
Environment = "Production"
}
}

resource "azurerm_kubernetes_cluster" "fail1" {
name = "example-aks1"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
dns_prefix = "exampleaks1"
enable_host_encryption = false

default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2_v2"
}
resource "azurerm_kubernetes_cluster" "fail2" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
vm_size = "Standard_DS2_v2"
node_count = 1

identity {
type = "SystemAssigned"
default_node_pool {
name = "default"

vm_size = "Standard_E4ads_v5"
os_disk_type = "Ephemeral"
zones = [1, 2, 3]
only_critical_addons_enabled = true

type = "VirtualMachineScaleSets"
vnet_subnet_id = var.subnet_id
enable_auto_scaling = true
max_count = 6
min_count = 2
orchestrator_version = local.kubernetes_version
}

tags = {
Environment = "Production"
}
}

resource "azurerm_kubernetes_cluster_node_pool" "fail1" {
resource "azurerm_kubernetes_cluster_node_pool" "fail2" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
vm_size = "Standard_DS2_v2"
node_count = 1
enable_host_encryption = false

tags = {
Environment = "Production"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,24 @@ resource "azurerm_kubernetes_cluster" "pass" {
kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
vm_size = "Standard_DS2_v2"
node_count = 1
os_disk_type = "Ephemeral"

tags = {
Environment = "Production"
default_node_pool {
name = "default"

enable_host_encryption = true
vm_size = "Standard_E4ads_v5"
os_disk_type = "Ephemeral"
zones = [1, 2, 3]
only_critical_addons_enabled = true

type = "VirtualMachineScaleSets"
vnet_subnet_id = var.subnet_id
enable_auto_scaling = true
max_count = 6
min_count = 2
orchestrator_version = local.kubernetes_version
}

}

resource "azurerm_kubernetes_cluster" "fail" {
Expand All @@ -19,6 +32,23 @@ resource "azurerm_kubernetes_cluster" "fail" {
tags = {
Environment = "Production"
}

default_node_pool {
name = "default"

enable_host_encryption = true
vm_size = "Standard_E4ads_v5"
zones = [1, 2, 3]
only_critical_addons_enabled = true

type = "VirtualMachineScaleSets"
vnet_subnet_id = var.subnet_id
enable_auto_scaling = true
max_count = 6
min_count = 2
orchestrator_version = local.kubernetes_version
}

}

resource "azurerm_kubernetes_cluster" "fail2" {
Expand All @@ -31,4 +61,23 @@ resource "azurerm_kubernetes_cluster" "fail2" {
tags = {
Environment = "Production"
}

default_node_pool {
name = "default"

enable_host_encryption = true
vm_size = "Standard_E4ads_v5"
os_disk_type = "Managed"
zones = [1, 2, 3]
only_critical_addons_enabled = true

type = "VirtualMachineScaleSets"
vnet_subnet_id = var.subnet_id
enable_auto_scaling = true
max_count = 6
min_count = 2
orchestrator_version = local.kubernetes_version
}


}
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,15 @@ def test(self):
'azurerm_kubernetes_cluster_node_pool.pass'
}
failing_resources = {
'azurerm_kubernetes_cluster.fail',
'azurerm_kubernetes_cluster.fail1',
'azurerm_kubernetes_cluster_node_pool.fail',
'azurerm_kubernetes_cluster.fail2',
'azurerm_kubernetes_cluster_node_pool.fail1',
'azurerm_kubernetes_cluster_node_pool.fail2',
}
skipped_resources = {}

passed_check_resources = set([c.resource for c in report.passed_checks])
failed_check_resources = set([c.resource for c in report.failed_checks])
passed_check_resources = {c.resource for c in report.passed_checks}
failed_check_resources = {c.resource for c in report.failed_checks}

self.assertEqual(summary['passed'], len(passing_resources))
self.assertEqual(summary['failed'], len(failing_resources))
Expand Down

0 comments on commit 0d94461

Please sign in to comment.