I struggle with the way terraform handles for each loops and counts, i cant seem to figure out how to utilize the locals block to achive this. How can i grab the data disks ids from the reosurce created from a for_each loop? The vm is also from a for_each loop, i cant seem to obtain the value of managed_disk_id to be passed down to the azurerm_virtual_machine_data_disk_attachment resource block.
main.tf
<code> ###############################################################################################
# Linux VMs #
###############################################################################################
resource "azurerm_virtual_machine" "spark-linux" {
for_each = var.spark_vms
name = each.value.vm_name
location = var.location
resource_group_name = var.iaas_rg_name
network_interface_ids = [azurerm_network_interface.spark-nic[each.key].id]
vm_size = each.value.vm_size
availability_set_id = azurerm_availability_set.spark_av_set.id
tags = merge(var.base_tags, var.spark_vm_tags)
identity {
type = "SystemAssigned"
}
boot_diagnostics {
enabled = true
storage_uri = ""
}
storage_os_disk {
name = "${each.value.vm_name}-osDisk"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = var.prem_os_managed_disk_type
}
os_profile {
computer_name = each.value.vm_name
admin_username = var.admin_username
admin_password = var.admin_password
}
storage_image_reference {
publisher = var.linux_publisher
offer = var.linux_offer
sku = var.linux_sku
version = var.linux_os_version
}
os_profile_linux_config {
disable_password_authentication = false
}
delete_os_disk_on_termination = true
delete_data_disks_on_termination = true
}
resource "azurerm_managed_disk" "data" {
for_each = var.spark_vms
name = resource "azurerm_managed_disk" "data" {
for_each = var.spark_vms
name = format("${each.value.vm_name}-DataDisk--%02d, ${each.valuevm_data_disk_count}")
location = var.location
resource_group_name = var.iaas_rg_name
storage_account_type = each.value.vm_data_disk_type
create_option = "Empty"
disk_size_gb = each.value.vm_data_disk_size
tags = var.spark_vm_tags
}
location = var.location
resource_group_name = var.iaas_rg_name
storage_account_type = each.value.vm_data_disk_type
create_option = "Empty"
disk_size_gb = each.value.vm_data_disk_size
tags = var.spark_vm_tags
}
resource "azurerm_virtual_machine_data_disk_attachment" "data" {
for_each = var.spark_vms
managed_disk_id = azurerm_managed_disk.data.*.id
virtual_machine_id = azurerm_virtual_machine[each.value.vm_name].vm.id
lun = 10
caching = "ReadWrite"
}
</code>
<code> ###############################################################################################
# Linux VMs #
###############################################################################################
resource "azurerm_virtual_machine" "spark-linux" {
for_each = var.spark_vms
name = each.value.vm_name
location = var.location
resource_group_name = var.iaas_rg_name
network_interface_ids = [azurerm_network_interface.spark-nic[each.key].id]
vm_size = each.value.vm_size
availability_set_id = azurerm_availability_set.spark_av_set.id
tags = merge(var.base_tags, var.spark_vm_tags)
identity {
type = "SystemAssigned"
}
boot_diagnostics {
enabled = true
storage_uri = ""
}
storage_os_disk {
name = "${each.value.vm_name}-osDisk"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = var.prem_os_managed_disk_type
}
os_profile {
computer_name = each.value.vm_name
admin_username = var.admin_username
admin_password = var.admin_password
}
storage_image_reference {
publisher = var.linux_publisher
offer = var.linux_offer
sku = var.linux_sku
version = var.linux_os_version
}
os_profile_linux_config {
disable_password_authentication = false
}
delete_os_disk_on_termination = true
delete_data_disks_on_termination = true
}
resource "azurerm_managed_disk" "data" {
for_each = var.spark_vms
name = resource "azurerm_managed_disk" "data" {
for_each = var.spark_vms
name = format("${each.value.vm_name}-DataDisk--%02d, ${each.valuevm_data_disk_count}")
location = var.location
resource_group_name = var.iaas_rg_name
storage_account_type = each.value.vm_data_disk_type
create_option = "Empty"
disk_size_gb = each.value.vm_data_disk_size
tags = var.spark_vm_tags
}
location = var.location
resource_group_name = var.iaas_rg_name
storage_account_type = each.value.vm_data_disk_type
create_option = "Empty"
disk_size_gb = each.value.vm_data_disk_size
tags = var.spark_vm_tags
}
resource "azurerm_virtual_machine_data_disk_attachment" "data" {
for_each = var.spark_vms
managed_disk_id = azurerm_managed_disk.data.*.id
virtual_machine_id = azurerm_virtual_machine[each.value.vm_name].vm.id
lun = 10
caching = "ReadWrite"
}
</code>
###############################################################################################
# Linux VMs #
###############################################################################################
resource "azurerm_virtual_machine" "spark-linux" {
for_each = var.spark_vms
name = each.value.vm_name
location = var.location
resource_group_name = var.iaas_rg_name
network_interface_ids = [azurerm_network_interface.spark-nic[each.key].id]
vm_size = each.value.vm_size
availability_set_id = azurerm_availability_set.spark_av_set.id
tags = merge(var.base_tags, var.spark_vm_tags)
identity {
type = "SystemAssigned"
}
boot_diagnostics {
enabled = true
storage_uri = ""
}
storage_os_disk {
name = "${each.value.vm_name}-osDisk"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = var.prem_os_managed_disk_type
}
os_profile {
computer_name = each.value.vm_name
admin_username = var.admin_username
admin_password = var.admin_password
}
storage_image_reference {
publisher = var.linux_publisher
offer = var.linux_offer
sku = var.linux_sku
version = var.linux_os_version
}
os_profile_linux_config {
disable_password_authentication = false
}
delete_os_disk_on_termination = true
delete_data_disks_on_termination = true
}
resource "azurerm_managed_disk" "data" {
for_each = var.spark_vms
name = resource "azurerm_managed_disk" "data" {
for_each = var.spark_vms
name = format("${each.value.vm_name}-DataDisk--%02d, ${each.valuevm_data_disk_count}")
location = var.location
resource_group_name = var.iaas_rg_name
storage_account_type = each.value.vm_data_disk_type
create_option = "Empty"
disk_size_gb = each.value.vm_data_disk_size
tags = var.spark_vm_tags
}
location = var.location
resource_group_name = var.iaas_rg_name
storage_account_type = each.value.vm_data_disk_type
create_option = "Empty"
disk_size_gb = each.value.vm_data_disk_size
tags = var.spark_vm_tags
}
resource "azurerm_virtual_machine_data_disk_attachment" "data" {
for_each = var.spark_vms
managed_disk_id = azurerm_managed_disk.data.*.id
virtual_machine_id = azurerm_virtual_machine[each.value.vm_name].vm.id
lun = 10
caching = "ReadWrite"
}
terraform.tfvars – spark_vm variables
<code>spark_vms = {
"vm1" = {
vm_name = "SPARK-VM-005"
vm_ip_address = "172.24.30.5"
vm_size = "Standard_D8s_v3"
vm_data_disk_count = 1
vm_data_disk_size = 256
vm_data_disk_type = "Premium_LRS"
}
"vm2" = {
vm_name = "SPARK-VM-006"
vm_ip_address = "172.24.30.6"
vm_size = "Standard_D8s_v3"
vm_data_disk_count = 1
vm_data_disk_size = 256
vm_data_disk_type = "Premium_LRS"
}
"vm3" = {
vm_name = "SPARK-VM-007"
vm_ip_address = "172.24.30.7"
vm_size = "Standard_D8s_v3"
vm_data_disk_count = 1
vm_data_disk_size = 256
vm_data_disk_type = "Premium_LRS"
}
}
</code>
<code>spark_vms = {
"vm1" = {
vm_name = "SPARK-VM-005"
vm_ip_address = "172.24.30.5"
vm_size = "Standard_D8s_v3"
vm_data_disk_count = 1
vm_data_disk_size = 256
vm_data_disk_type = "Premium_LRS"
}
"vm2" = {
vm_name = "SPARK-VM-006"
vm_ip_address = "172.24.30.6"
vm_size = "Standard_D8s_v3"
vm_data_disk_count = 1
vm_data_disk_size = 256
vm_data_disk_type = "Premium_LRS"
}
"vm3" = {
vm_name = "SPARK-VM-007"
vm_ip_address = "172.24.30.7"
vm_size = "Standard_D8s_v3"
vm_data_disk_count = 1
vm_data_disk_size = 256
vm_data_disk_type = "Premium_LRS"
}
}
</code>
spark_vms = {
"vm1" = {
vm_name = "SPARK-VM-005"
vm_ip_address = "172.24.30.5"
vm_size = "Standard_D8s_v3"
vm_data_disk_count = 1
vm_data_disk_size = 256
vm_data_disk_type = "Premium_LRS"
}
"vm2" = {
vm_name = "SPARK-VM-006"
vm_ip_address = "172.24.30.6"
vm_size = "Standard_D8s_v3"
vm_data_disk_count = 1
vm_data_disk_size = 256
vm_data_disk_type = "Premium_LRS"
}
"vm3" = {
vm_name = "SPARK-VM-007"
vm_ip_address = "172.24.30.7"
vm_size = "Standard_D8s_v3"
vm_data_disk_count = 1
vm_data_disk_size = 256
vm_data_disk_type = "Premium_LRS"
}
}