Hi
Can you please check below error and suggest a solution?
#############################
on Modules/HDInsight/main.tf line 88, in resource “azurerm_hdinsight_kafka_cluster” “hdi_kafka_cluster”:
88: min_tls_version = “1.2”
An argument named “min_tls_version” is not expected here .
##[error]Bash exited with code ‘1’.
##[error]Bash wrote one or more lines to the standard error stream.
##[error]
Error: Unsupported argument
#############################
resource "azurerm_hdinsight_kafka_cluster" "hdi_kafka_cluster" {
for_each = { for v in local.hd_insight_cluster : v.name => v } # create a temporary map (of maps) for for_each statement
name = each.value.name
resource_group_name = each.value.resource_group
location = each.value.location
cluster_version = each.value.cluster_version
tier = each.value.tier
min_tls_version = "1.2"
component_version {
kafka = each.value.component_version
}
gateway {
enabled = each.value.gateway.enabled
username = each.value.gateway.username
password = var.cluster_kv_ksc_map["Standard"].secrets["hdi-gw-password"].value
}
storage_account_gen2 {
is_default = true
filesystem_id = local.sa_dl_g2_fs_ids[each.value.storage_account_gen2.sa_data_lake_gen2_fs_name]
storage_resource_id = local.storage_account_ids[each.value.storage_account_gen2.storage_account_name]
managed_identity_resource_id = local.user_msi_ids[each.value.storage_account_gen2.user_msi_name]
}
roles {
head_node {
vm_size = each.value.head_node.vm_size
username = each.value.head_node.username
ssh_keys = [ var.cluster_kv_ksc_map["Standard"].secrets["ssh-pub-key"].value ]
virtual_network_id = local.vnet_ids[each.value.head_node.vnet_name]
subnet_id = var.subnet_ids[each.value.head_node.snet_name]
}
worker_node {
vm_size = each.value.worker_node.vm_size
username = each.value.worker_node.username
ssh_keys = [ var.cluster_kv_ksc_map["Standard"].secrets["ssh-pub-key"].value ]
virtual_network_id = local.vnet_ids[each.value.worker_node.vnet_name]
subnet_id = var.subnet_ids[each.value.worker_node.snet_name]
target_instance_count = each.value.worker_node.target_instance_count
number_of_disks_per_node = each.value.worker_node.number_of_disks_per_node
}
zookeeper_node {
vm_size = each.value.zookeeper_node.vm_size
username = each.value.zookeeper_node.username
ssh_keys = [ var.cluster_kv_ksc_map["Standard"].secrets["ssh-pub-key"].value ]
virtual_network_id = local.vnet_ids[each.value.zookeeper_node.vnet_name]
subnet_id = var.subnet_ids[each.value.zookeeper_node.snet_name]
}
}
monitor {
log_analytics_workspace_id = var.log_analytics.workspace_id
primary_key = var.log_analytics.primary_shared_key
}
# prevent deletion of resource
lifecycle {
# prevent_destroy = true
ignore_changes = [
cluster_version,
component_version[0].kafka,
]
}
#min_tls_version = each.value.min_tls_version
# TAGs
tags = var.tags
#
depends_on = [module.dlf2_msi.user_msi,
module.sa_data_lake_gen2_fs.sa_data_lake_gen2_fs]
}