Terraform plan is asking for a required argument

after a half year of not doing anything with my lil terraform play project, i wanted to to a terraform plan on it…

 terraform refresh
data.pass_password.hetznerdns_token: Reading...
data.pass_password.postgres_password: Reading...
data.pass_password.hetznerdns_token: Read complete after 2s [id=misc/roehrscloud/hetznerdns_token]
data.hetznerdns_zone.dns_zone: Reading...
data.hetznerdns_zone.dns_zone: Read complete after 0s [id=uHmbKKQesBHZ7KkrPxrRhE]
data.pass_password.postgres_password: Read complete after 2s [id=misc/roehrscloud/postgres_password]
╷
│ Error: Missing required argument
│
│ The argument "address" is required, but was not set.
╵

how can i know which resource is missing address? and this used to work. here my main.tf

terraform {
  required_providers {
    docker = {
      source  = "kreuzwerker/docker"
      version = "2.22.0"
    }

    null = {
      source  = "hashicorp/null"
      version = "3.1.1"
    }

    pass = {
      source  = "camptocamp/pass"
      version = "2.0.0"
    }

    hetznerdns = {
      source  = "timohirt/hetznerdns"
      version = "2.1.0"
    }

  }
}

# passwords
provider "pass" {}

data "pass_password" "postgres_password" {
  path = "misc/${var.name}/postgres_password"
}

data "pass_password" "hetznerdns_token" {
  path = "misc/${var.name}/hetznerdns_token"
}

# docker provider
provider "docker" {
  host = "ssh://foobar"
}

# variables
variable "name" {
  type    = string
  default = "foobar"
}

variable "external_port" {
  type        = string
  description = "caddy external port"
  default     = "443"
}

# prepare
resource "null_resource" "prepare" {
  connection {
    type  = "ssh"
    host  = "foobar"
    user  = "root"
    agent = true
  }

  provisioner "remote-exec" {
    inline = [
      "mkdir -p /etc/${var.name}",
      "mkdir -p /srv/${var.name}/db",
      "mkdir -p /srv/${var.name}/nextcloud"
    ]

  }

  provisioner "file" {
    destination = "/etc/${var.name}/Caddyfile"
    content     = <<EOF
foobar {
  rewrite /.well-known/carddav /remote.php/dav
  rewrite /.well-known/caldav /remote.php/dav

  reverse_proxy foobar:80
}

collabora.xn--rhrsdorf-n4a.de {
  reverse_proxy foobar:9980
}
    EOF
  }
}

# dns
provider "hetznerdns" {
  apitoken = data.pass_password.hetznerdns_token.password
}

data "hetznerdns_zone" "dns_zone" {
  name = "foobar"
}

resource "hetznerdns_record" "cloud" {
  zone_id = data.hetznerdns_zone.dns_zone.id
  name    = "cloud"
  value   = "0.0.0.0"
  type    = "A"
  ttl     = 60
}

resource "hetznerdns_record" "collabora" {
  zone_id = data.hetznerdns_zone.dns_zone.id
  name    = "collabora"
  value   = "0.0.0.0"
  type    = "A"
  ttl     = 60
}

# network
resource "docker_network" "internal" {
  name = "${var.name}_internal"
}

resource "docker_network" "external" {
  name = "${var.name}_external"
}

resource "docker_image" "caddy" {
  name = "caddy:2.6.1-alpine"
}

resource "docker_volume" "caddy_data" {
  name = "${var.name}_caddy_data"
}

resource "docker_container" "caddy" {
  name       = "${var.name}_caddy"
  image      = docker_image.caddy.image_id
  depends_on = [null_resource.prepare]

  networks_advanced {
    name = docker_network.internal.name
  }

  networks_advanced {
    name = docker_network.external.name
  }

  ports {
    internal = "443"
    external = var.external_port
  }

  ports {
    internal = "80"
    external = "80"
  }


  volumes {
    host_path      = "/etc/${var.name}/Caddyfile"
    container_path = "/etc/caddy/Caddyfile"
  }

  volumes {
    volume_name    = docker_volume.caddy_data.name
    container_path = "/data"
  }
}

# nextcloud
resource "docker_image" "nextcloud" {
  name = "nextcloud:stable"
}

resource "docker_container" "nextcloud" {
  name  = "${var.name}_nextcloud"
  image = docker_image.nextcloud.image_id

  networks_advanced {
    name = docker_network.internal.name
  }

  volumes {
    host_path      = "/srv/${var.name}/nextcloud"
    container_path = "/var/www/html"
  }
}

# postgres
resource "docker_image" "postgres" {
  name = "postgres:14.5-alpine"
}

resource "docker_container" "postgres" {
  name  = "${var.name}_db"
  image = docker_image.postgres.image_id
  env = [
    "POSTGRES_PASSWORD=${data.pass_password.postgres_password.password}",
    "POSTGRES_USER=nextcloud"
  ]

  networks_advanced {
    name = docker_network.internal.name
  }

  volumes {
    host_path      = "/srv/${var.name}/db"
    container_path = "/var/lib/postgresql/data"
  }
}

# redis
resource "docker_image" "redis" {
  name = "redis:7.0.5-alpine"
}

resource "docker_container" "redis" {
  name  = "${var.name}_redis"
  image = docker_image.redis.image_id

  networks_advanced {
    name = docker_network.internal.name
  }
}

# collabora
resource "docker_image" "collabora" {
  name = "collabora/code:22.05.6.3.1"
}

resource "docker_container" "collabora" {
  name  = "${var.name}_collabora"
  image = docker_image.collabora.image_id

  networks_advanced {
    name = docker_network.internal.name
  }

  capabilities {
    add = ["MKNOD"]
  }

  env = [
    "domain=qrgergerge",
    "extra_params=--o:ssl.enable=false --o:ssl.termination=true",
  ]
}

Are you using a current release of Terraform? The diagnostic output should show which configuration block is responsible for the error. It is strange that you have specific providers versions required, but something seems to have changed in the meantime.

You can also set TF_LOG_CORE=trace to try and see what is happening at the moment the error is generated, which may help correlate the error with the provider or resource.

im using version 1.4.2

i used the TF_LOG_CORE=trace but the only times an address is in the grep… its ip_address for the docker provider.

Doing a GitHub search for

and reviewing the issues, leads to a pretty strong signal that this is a complaint about provider configuration.

In particular, Error message without context when validating provider configuration · Issue #28482 · hashicorp/terraform · GitHub is still open about the error reporting being inadequate in this case.

Knowing that it’s likely provider configuration that’s the problem here, hopefully gives you an angle to investigate.

Note that there is a terraform providers CLI command you can use to have Terraform summarize its understanding of what providers you have configured - useful to confirm Terraform is processing the configuration you believe it should be.

My guess, based on the bits of the config and logs you’ve shared, is that the address argument being complained about is the docker provider’s address nested within a registry_auth block.