Skip to content

Deploying Template to an Existing Virtual Network

Kevin Mack edited this page Jun 19, 2019 · 1 revision

One common question is that the template does deploy to a new virtual network in azure, but how do I deploy this to an existing virtual network in Azure:

Step 1.) Go to the network.tf file, and commend out all the code (keyboard shortcut, Ctrl-A, and Ctrl K + C).

Step 2.) We need to get the resource ids of the subnet that you want to deploy the cluster into. To do this, go to the terminal and run the following command against azure government using the CLI: Azure CLI - Network / Subnet

az network vnet subnet list -g {Resource Group} --vnet-name {Name of VNET}

This will get you a JSON dump of all the subnets in the vnet you want to deploy to, and each entry will look like the following, grab the highlighted value as the “Resource id” of the subnet:

{
    "addressPrefix": "XX.X.X.X/24",
    "addressPrefixes": null,
    "delegations": [],
    "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/{resource-Group}/providers/Microsoft.Network/virtualNetworks/{virtual-network}/subnets/App",
    "interfaceEndpoints": null,
    "ipConfigurationProfiles": null,
    "ipConfigurations": null,
    "name": "App",
    "networkSecurityGroup": null,
    "provisioningState": "Succeeded",
    "purpose": null,
    "resourceGroup": "{resource-Group}",
    "resourceNavigationLinks": null,
    "routeTable": null,
    "serviceAssociationLinks": null,
    "serviceEndpointPolicies": null,
    "serviceEndpoints": [
      {
        "locations": [
          "usgovarizona",
          "usgovtexas"
        ],
        "provisioningState": "Succeeded",
        "service": "Microsoft.Storage"
      }
    ],
    "type": "Microsoft.Network/virtualNetworks/subnets"
  }

Now go into lkma.tf and update the following values (line 178)

module "lkma" {
  source = "modules/create_vm_linux"
  os_code = "l"
  instance_type = "kmas"
  ssh_key = "${var.ssh_key}"
  number_of_vms_in_avset = "${lookup(var.instance_counts, "lkma", 0)}"
  platform_fault_domain_count = "${var.platform_fault_domain_count}"
  environment_code = "${var.environment_code}"
  deployment_code = "${var.deployment_code}"
  location_code = "${var.location_code}"
  azure_location = "${var.azure_location}"
  instance_count = "${lookup(var.instance_counts, "lkma", 0)}"
  pip_count = "${lookup(var.instance_counts, "lkma", 0)}"
  vm_size = "${lookup(var.instance_sizes, "lkma", "")}"
  subnet_id = "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/{resource-Group}/providers/Microsoft.Network/virtualNetworks/{virtual-network}/subnets/Management"
  resource_group_name = "${azurerm_resource_group.management.name}"
  network_security_group_id = "${lookup(var.instance_counts, "lkma", 0) == 0 ? "" : element(concat(azurerm_network_security_group.lkma.*.id, list("")), 0)}"
  storage_type = "${lookup(var.storage_type, "lkma", var.storage_type_default)}"
  os_disk_image_id = "${data.azurerm_image.ubuntu.id}"
  os_disk_size = "${lookup(var.os_disk_sizes, "lkma", var.os_disk_size_default)}"
  data_disk_count = "${lookup(var.data_disk_counts, "lkma", 0)}"
  data_disk_size = "${lookup(var.data_disk_sizes, "lkma", 0)}"
  vm_extensions_command = "sudo /var/tmp/kubemaster.sh '${element(concat(azurerm_azuread_application.kub-ad-app-kv1.*.application_id, list("")), 0)}' '${element(concat(random_string.kub-rs-pd-kv.*.result, list("")), 0)}' '${var.environment_code}${var.deployment_code}${var.location_code}lkub-kv1' '${var.keyvault_tenantid}'"
}

And then go to lkwn.tf, and update the same (line 164)

module "lkwn" {
  source = "modules/create_vm_linux"
  os_code = "l"
  instance_type = "kwn"
  ssh_key = "${var.ssh_key}"
  number_of_vms_in_avset = "${lookup(var.instance_counts, "lkwn", 0)}"
  platform_fault_domain_count = "${var.platform_fault_domain_count}"
  environment_code = "${var.environment_code}"
  deployment_code = "${var.deployment_code}"
  location_code = "${var.location_code}"
  azure_location = "${var.azure_location}"
  instance_count = "${lookup(var.instance_counts, "lkwn", 0)}"
  pip_count = "${lookup(var.instance_counts, "lkwn", 0)}"
  vm_size = "${lookup(var.instance_sizes, "lkwn", "")}"
  subnet_id = "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/{resource-Group}/providers/Microsoft.Network/virtualNetworks/{virtual-network}/subnets/Data"
  lb_pools_ids = ["${lookup(var.instance_counts, "lkwn", 0) == 0 ? "" : element(concat(azurerm_lb_backend_address_pool.lb_int_backend_pool.*.id, list("")), 0)}"]
  resource_group_name = "${azurerm_resource_group.data.name}"
  network_security_group_id = "${lookup(var.instance_counts, "lkwn", 0) == 0 ? "" : element(concat(azurerm_network_security_group.lkwn.*.id, list("")), 0)}"
  storage_type = "${lookup(var.storage_type, "lkwn", var.storage_type_default)}"
  os_disk_image_id = "${data.azurerm_image.ubuntu.id}"
  os_disk_size = "${lookup(var.os_disk_sizes, "lkwn", var.os_disk_size_default)}"
  data_disk_count = "${lookup(var.data_disk_counts, "lkwn", 0)}"
  data_disk_size = "${lookup(var.data_disk_sizes, "lkwn", 0)}"
  vm_extensions_command = "${lookup(var.instance_counts, "lkma", 0) == 0 ? "" : "sudo /var/tmp/kubenode.sh '${element(concat(azurerm_azuread_application.kub-ad-app-kv1.*.application_id,list("")), 0)}' '${element(concat(random_string.kub-rs-pd-kv.*.result, list("")), 0)}' '${var.environment_code}${var.deployment_code}${var.location_code}lkub-kv1' '${var.keyvault_tenantid}'"}"
}

Then zero-out the template (redeploy with 0’s) to make sure all resources are cleaned up. And then re-run the template for lkma =1, and then re-run the template for lkwn={X} to deploy the cluster.

Clone this wiki locally