Az NetApp Files Volume switch between Capacity pool is deleting and re-creating in Terraform

I am setting up the Az NetApp Files, the setup includes the following sections

Az NetApp Account => Capacity Pool => Volumes

In the UI, if we have multiple Capacity pools, we can switch the volumes from one capacity pool to other. (Scenario, moving from Standard tier to Premium tier or vise-versa)

Below is the main.tf

data "azurerm_resource_group" "anf" {
  name = var.rg_name
}

data "azurerm_virtual_network" "anf" {
  name                = var.vnet_name
  resource_group_name = var.vnet_rg_name
}

data "azurerm_subnet" "anf" {
  name                 = var.vnet_subnet_name
  virtual_network_name = var.vnet_name
  resource_group_name  = var.vnet_rg_name
}

resource "azurerm_netapp_account" "anf" {
  name                = var.anf_account_name
  location            = data.azurerm_resource_group.anf.location
  resource_group_name = data.azurerm_resource_group.anf.name

  tags = var.default_tags

  active_directory {
    username            = var.anf_ad_config.username
    password            = var.anf_ad_config.password
    smb_server_name     = var.anf_ad_config.smb_server_name
    dns_servers         = var.anf_ad_config.dns_servers
    domain              = var.anf_ad_config.domain
    organizational_unit = var.anf_ad_config.ou_path
  }

  lifecycle {
    ignore_changes = [
      # Ignore changes to tags, e.g. because a management agent
      # updates these based on some ruleset managed elsewhere.
      tags
    ]
  }
}

resource "azurerm_netapp_pool" "anf" {
  for_each = { for idx, val in var.anf_cpool : idx => val }

  name                = each.value.name
  location            = data.azurerm_resource_group.anf.location
  resource_group_name = data.azurerm_resource_group.anf.name
  account_name        = azurerm_netapp_account.anf.name
  service_level       = each.value.service_level
  size_in_tb          = each.value.size

  tags = var.default_tags

  lifecycle {
    ignore_changes = [
      # Ignore changes to tags, e.g. because a management agent
      # updates these based on some ruleset managed elsewhere.
      tags
    ]
  }

  depends_on = [ azurerm_netapp_account.anf ]
}

resource "azurerm_netapp_volume" "anf" {
  lifecycle {
    prevent_destroy = true
    ignore_changes = [
      # Ignore changes to tags, e.g. because a management agent
      # updates these based on some ruleset managed elsewhere.
      tags,
      zone
    ]
  }

  for_each                   = { for idx, val in var.anf_volume : idx => val }
  name                       = each.value.name
  location                   = data.azurerm_resource_group.anf.location
  resource_group_name        = data.azurerm_resource_group.anf.name
  account_name               = azurerm_netapp_account.anf.name
  pool_name                  = each.value.anf_cpool_name
  volume_path                = each.value.name
  service_level              = each.value.anf_cpool_service_level
  subnet_id                  = data.azurerm_subnet.anf.id
  network_features           = each.value.network_features
  protocols                  = each.value.protocols
  storage_quota_in_gb        = each.value.storage_quota_in_gb
  snapshot_directory_visible = each.value.snapshot_directory_visible

  tags = var.default_tags

  dynamic "data_protection_snapshot_policy" {
    for_each = each.value.snapshot_policy_name != null ? [each.value.snapshot_policy_name] : []
    content {
      snapshot_policy_id = local.policy_lookup_table[data_protection_snapshot_policy.value].id
    }
  }

  depends_on = [ 
    azurerm_netapp_pool.anf, 
    azurerm_netapp_snapshot_policy.snapshot_pol_daily_to_weekly, 
    azurerm_netapp_snapshot_policy.snapshot_pol_daily_to_monthly 
  ]

}

locals {
  policy_lookup_table = {
    snapshot_pol_daily_to_weekly = azurerm_netapp_snapshot_policy.snapshot_pol_daily_to_weekly
    snapshot_pol_daily_to_monthly = azurerm_netapp_snapshot_policy.snapshot_pol_daily_to_monthly
  }
}

resource "azurerm_netapp_snapshot_policy" "snapshot_pol_daily_to_weekly" {
  name                = "snapshot_pol_daily_to_weekly"
  location                   = data.azurerm_resource_group.anf.location
  resource_group_name        = data.azurerm_resource_group.anf.name
  account_name               = azurerm_netapp_account.anf.name
  enabled             = true

  daily_schedule {
    snapshots_to_keep = 10
    hour              = 20
    minute            = 15
  }

  weekly_schedule {
    snapshots_to_keep = 4
    days_of_week      = ["Monday", "Wednesday"]
    hour              = 23
    minute            = 0
  }

  tags = var.default_tags
  lifecycle {
    ignore_changes = [
      # Ignore changes to tags, e.g. because a management agent
      # updates these based on some ruleset managed elsewhere.
      tags
    ]
  }
}

resource "azurerm_netapp_snapshot_policy" "snapshot_pol_daily_to_monthly" {
  name                = "snapshot_pol_daily_to_monthly"
  location                   = data.azurerm_resource_group.anf.location
  resource_group_name        = data.azurerm_resource_group.anf.name
  account_name               = azurerm_netapp_account.anf.name
  enabled             = true

  daily_schedule {
    snapshots_to_keep = 10
    hour              = 20
    minute            = 15
  }

  weekly_schedule {
    snapshots_to_keep = 4
    days_of_week      = ["Monday", "Wednesday"]
    hour              = 23
    minute            = 0
  }

  monthly_schedule {
    snapshots_to_keep = 2
    days_of_month     = [1, 15]
    hour              = 21
    minute            = 15
  }

  tags = var.default_tags
  lifecycle {
    ignore_changes = [
      # Ignore changes to tags, e.g. because a management agent
      # updates these based on some ruleset managed elsewhere.
      tags
    ]
  }
}

When I change the variable

  rg_location        = "eastus"
  rg_name            = "anf"
  vnet_rg_name       = "rg-eu-vnet"
  vnet_name          = "vnet-anf"
  vnet_subnet_name   = "vnet-anf-subnet"
  default_tags = {
    managedby       = "terraform"
  }
  anf_account_name   = "tf-anf"
  anf_cpool = [
    {
      name = "tf-std-cpool"
      service_level = "Standard"
      size     = 4
    },
    {
      name = "tf-pre-cpool"
      service_level = "Premium"
      size     = 4
    }
  ]
  anf_volume = [
    {
      name = "vol1"
      storage_quota_in_gb = 100 # Min 100
      protocols = ["CIFS"]
      network_features = "Standard"
      anf_cpool_name = "tf-std-cpool"
      anf_cpool_service_level = "Standard"
      snapshot_directory_visible = false
      snapshot_policy_name = null
    },
    {
      name = "vol2"
      storage_quota_in_gb = 120 # Min 100
      protocols = ["CIFS"]
      network_features = "Standard"
      anf_cpool_name = "tf-pre-cpool"
      anf_cpool_service_level = "Standard"
      snapshot_directory_visible = false
      snapshot_policy_name = null
    }
]

If I need to change the vol2 association from premium to standard tier capacity pool, the terraform is deleting and re-creating the volume, whereas in the UI you can move the volumes between pool without hazzel.

Am I missing anything here to accomplish that in the Terraform?

Thanks.

A quick search in GitHub suggests this is a longstanding issue with the provider: Support for [Azure NetApp Files - Dynamic Service Level Change ] · Issue #12102 · hashicorp/terraform-provider-azurerm · GitHub

1 Like

Is there any workaround for this ? Is it possible to use terraform AzAPI and fix this issue?