How to use depends on while using terraform module

Hello all,

module dir: /opt/oke → in this we have oke.tf

oke.tf:

resource "tls_private_key" "public_private_key_pair" {
  algorithm = "RSA"
}

resource "oci_containerengine_cluster" "oci_oke_cluster" {
  compartment_id     = var.compartment_ocid
  kubernetes_version = var.k8s_version
  name               = var.oke_cluster_name
  vcn_id             = var.use_existing_vcn ? var.vcn_id : oci_core_vcn.oke_vcn[0].id
  #kms_key_id         = var.kms_key_id

  dynamic "endpoint_config" {
    for_each = var.vcn_native ? [1] : []
    content {
      is_public_ip_enabled = var.is_api_endpoint_subnet_public
      subnet_id            = var.use_existing_vcn ? var.api_endpoint_subnet_id : oci_core_subnet.oke_api_endpoint_subnet[0].id
      nsg_ids              = var.use_existing_vcn ? var.api_endpoint_nsg_ids : []
    }
  }

  dynamic "cluster_pod_network_options" {
    for_each = var.oci_vcn_ip_native == true ? [1] : []
    content {
      cni_type = "OCI_VCN_IP_NATIVE"
    }
  }

  options {
    #service_lb_subnet_ids = [var.use_existing_vcn ? var.lb_subnet_id : oci_core_subnet.oke_lb_subnet[0].id]

    add_ons {
      is_kubernetes_dashboard_enabled = var.cluster_options_add_ons_is_kubernetes_dashboard_enabled
      is_tiller_enabled               = var.cluster_options_add_ons_is_tiller_enabled
    }

    admission_controller_options {
      is_pod_security_policy_enabled = var.cluster_options_admission_controller_options_is_pod_security_policy_enabled
    }

    dynamic "kubernetes_network_config" {
      for_each = var.oci_vcn_ip_native == true ? [] : [1]
      content {
        pods_cidr     = var.pods_cidr
        services_cidr = var.services_cidr
      }
    }
  }
  #defined_tags = var.defined_tags
}

resource "oci_containerengine_node_pool" "oci_oke_node_pool" {
  cluster_id         = oci_containerengine_cluster.oci_oke_cluster.id
  compartment_id     = var.compartment_ocid
  kubernetes_version = var.k8s_version
  name               = var.pool_name
  node_shape         = var.node_shape

  initial_node_labels {
    key   = var.node_pool_initial_node_labels_key
    value = var.node_pool_initial_node_labels_value
  }

  node_source_details {
    image_id = var.node_image_id == "" ? element([for source in data.oci_containerengine_node_pool_option.oci_oke_node_pool_option.sources : source.image_id if length(regexall("Oracle-Linux-${var.node_linux_version}-20[0-9]*.*", source.source_name)) > 0], 0) : var.node_image_id
    #image_id                = var.node_image_id == "" ? element([for source in data.oci_containerengine_node_pool_option.oci_oke_node_pool_option.sources : source.image_id if length(regexall(local.node_image_regex, source.source_name)) > 0], 0) : var.node_image_id
    source_type             = "IMAGE"
    boot_volume_size_in_gbs = var.node_pool_boot_volume_size_in_gbs
  }


  ssh_public_key = var.ssh_public_key != "" ? var.ssh_public_key : tls_private_key.public_private_key_pair.public_key_openssh

  node_config_details {
    dynamic "placement_configs" {
      iterator = pc_iter
      for_each = local.availability_domains #data.oci_identity_availability_domains.ADs.availability_domains
      content {
        availability_domain = pc_iter.value.name
        subnet_id           = var.use_existing_vcn ? var.nodepool_subnet_id : oci_core_subnet.oke_nodepool_subnet[0].id
      }
    }
    size = var.node_count
    # defined_tags = var.defined_tags

    dynamic "node_pool_pod_network_option_details" {
      for_each = var.oci_vcn_ip_native == true ? [1] : []
      content {
        cni_type          = "OCI_VCN_IP_NATIVE"
        max_pods_per_node = var.max_pods_per_node
        pod_nsg_ids       = var.use_existing_nsg ? var.pods_nsg_ids : []
        pod_subnet_ids    = var.use_existing_vcn ? [var.pods_subnet_id] : []
      }
    }
  }

  dynamic "node_shape_config" {
    for_each = length(regexall("Flex", var.node_shape)) > 0 ? [1] : []
    content {
      ocpus         = var.node_ocpus
      memory_in_gbs = var.node_memory
    }
  }

  dynamic "node_eviction_node_pool_settings" {
    for_each = var.node_eviction_node_pool_settings == true ? [1] : []
    content {

      eviction_grace_duration              = var.eviction_grace_duration
      is_force_delete_after_grace_duration = var.is_force_delete_after_grace_duration
    }
  }

  #defined_tags = var.defined_tags
}

The directory where iam using the oke module: /opt/oke/examples/oci-native
Here we have oke.tf and main.tf

module "oci-oke" {
  #source                              = "github.com/oracle-devrel/terraform-oci-arch-oke"
  source                        = "../../"
  tenancy_ocid                  = var.tenancy_ocid
  compartment_ocid              = var.compartment_ocid
  oke_cluster_name              = var.oke_cluster_name
  k8s_version                   = var.k8s_version
  pool_name                     = var.pool_name
  node_shape                    = var.node_shape
  node_ocpus                    = var.node_ocpus
  node_memory                   = var.node_memory
  node_count                    = var.node_count
  node_linux_version            = var.node_linux_version
  use_existing_vcn              = true
  vcn_id                        = var.vcn_ocid
  is_api_endpoint_subnet_public = false
  api_endpoint_subnet_id        = var.my_api_endpoint_subnet_ocid
  nodepool_subnet_id                   = var.my_pods_nodepool_subnet_ocid
  oci_vcn_ip_native                    = true
  max_pods_per_node                    = 10
  pods_subnet_id                       = var.pods_subnet_ocid
  node_eviction_node_pool_settings     = true
  eviction_grace_duration              = "PT0M"
  is_force_delete_after_grace_duration = true
}

main.tf:

provider "oci" {
  region = "us-phoenix-1"
}


provider "kubectl" {
  config_path = "/tmp/oke_cluster_kubeconfig"
}


resource "null_resource" "configure_kubectl" {
  provisioner "local-exec" {
    command = "kubectl config use-context $(kubectl config get-contexts --kubeconfig /tmp/oke_cluster_kubeconfig --no-headers -o name) --kubeconfig /tmp/oke_cluster_kubeconfig"
  }

## how to add depends on in this null resource, until oke cluster is created.
depends_on = [ how to point to module in oke.tf i.e oci_oke_cluster and oci_oke_node_pool ]
}

Hi @anuddeeph1,

In your child module you can declare output values that export relevant information about the objects that are interesting to use as dependencies. I’m not familiar enough with OCI or “OKE” to know what some useful values might be, so I just guessed that the IDs might be interesting for my examples below:

output "cluster_id" {
  value = oci_containerengine_cluster.oci_oke_cluster.id
}

output "node_pool_id" {
  value = oci_containerengine_node_pool.oci_oke_node_pool.id
}

These two declarations will make these two ids available to your root module where you’ve defined null_resource.configure_kubectl. In your case it doesn’t seem like you actually need the values but you can still use those output values as dependencies for the resource:

resource "null_resource" "configure_kubectl" {
  provisioner "local-exec" {
    command = "kubectl config use-context $(kubectl config get-contexts --kubeconfig /tmp/oke_cluster_kubeconfig --no-headers -o name) --kubeconfig /tmp/oke_cluster_kubeconfig"
  }

  depends_on = [
    module.oci-oke.cluster_id,
    module.oci-oke.node_pool_id,
  ]
}

These two output values each depends on one of the resources in the module, so therefore anything which depends on those output variables transitively depends on the resources too, and so this should achieve the required ordering.

Note that it’s a bit suspicious to be using a provisioner to access information about a resource you’ve just declared. Again I don’t know enough about OKE to know if that’s just a limitation of the OKE API or OCI provider, but typically I’d expect the OCI provider to export the information needed to connect to the cluster you declared. Provisioners are a last resort, so if there’s any other possible way to achieve that result then I would recommend using that other approach instead.