Dependency issue when destroying EMR cluster

Here is a snippet of the relevant EMR code:

# main.tf
resource "aws_emr_cluster" "cluster" {
  name          = "${local.name}-emr"
  release_label = "${var.emr_version}"
  applications  = "${var.emr_applications}"

  additional_info        = "${var.emr_additional_info}"
  termination_protection = "${var.emr_termination_protection}"
  log_uri                = "s3://i${local.short_env}-logs-${var.region}/"
  ebs_root_volume_size   = "${var.emr_ebs_root_volume_size}"
  autoscaling_role       = "${aws_iam_role.emr_autoscaling_role.arn}"
  configurations_json    = "${data.template_file.configuration.rendered}"
  service_role           = "${local.emr_service_role}"
  security_configuration = "${aws_emr_security_configuration.security.name}"

  keep_job_flow_alive_when_no_steps = "${var.emr_keep_job_flow_alive_when_no_steps}"

  tags = "${merge(local.tags, map("Name", "${local.name}"))}"
}

resource "aws_emr_instance_group" "task" {
  name           = "${local.name}-emr-task-instance-group"
  cluster_id     = "${aws_emr_cluster.cluster.id}"
  instance_count = "${var.emr_task_instance_group_count}"
  instance_type  = "${var.emr_task_instance_group_type}"

  ebs_config {
    size                 = "${var.emr_task_instance_group_ebs_config_size}"
    type                 = "${var.emr_task_instance_group_ebs_config_type}"
    iops                 = "${var.emr_task_instance_group_ebs_config_iops}"
    volumes_per_instance = "${var.emr_task_instance_group_ebs_config_volumes_per_instance}"
  }

  autoscaling_policy  = "${data.template_file.task_cluster_autoscaling.rendered}"
  configurations_json = "${data.template_file.configuration.rendered}"

  lifecycle {
    ignore_changes = [
      "configurations_json",
    ]
  }
}

resource "aws_emr_security_configuration" "security" {
  name = "${local.name}-emr-security-configuration"

  configuration = <<EOF
{
  "EncryptionConfiguration": {
    "EnableAtRestEncryption": true,
    "AtRestEncryptionConfiguration": {
      "S3EncryptionConfiguration": {
        "EncryptionMode": "SSE-S3"
      },
      "LocalDiskEncryptionConfiguration" : {
        "EnableEbsEncryption": true,
        "EncryptionKeyProviderType": "AwsKms",
        "AwsKmsKey": "${aws_kms_key.kms_key_emr.arn}"
      }
    },
    "EnableInTransitEncryption": true,
    "InTransitEncryptionConfiguration": {
      "TLSCertificateConfiguration": {
        "CertificateProviderType": "PEM",
        "S3Object": "s3://${aws_s3_bucket_object.cert_zip.bucket}/${aws_s3_bucket_object.cert_zip.key}"
      }
    }
  }
}
EOF
}

There is an implicit dependency created between the EMR cluster and the EMR Security configuration here on this line:

security_configuration = "${aws_emr_security_configuration.security.name}"

The problem occurs when I destroy, I get the following error every time.

1 error occurred:
	* aws_emr_security_configuration.security (destroy): 1 error occurred:
	* aws_emr_security_configuration.security: InvalidRequestException: Security configuration 'ip-etl-emr-security-configuration' cannot be deleted because it is in use by active clusters.
{
  RespMetadata: {
    StatusCode: 400,
    RequestID: "f94d3b2d-53ae-48ec-84dd-0ee8caa5a767"
  },
  ErrorCode: "SECURITY_CONFIGURATION_IN_USE",
  Message_: "Security configuration 'ip-etl-emr-security-configuration' cannot be deleted because it is in use by active clusters."
}

The destory always succeeds when I issue another destroy immediately after the initial destroy failure. Why is this happening? It is very annoying.

Thanks in advance!