Attaching storage volume to AWS Batch using Terraform

Hi everyone, I am running a docker container in AWS Batch and the job involves generating and writing a large file to disk - it returns Not enough space to write. I am trying to add additional volume using launch templates but the volume is not getting added. Here is a minimal config which I am using:

resource "aws_launch_template" "compute_env_launch_template_test" {
  name_prefix   = "compute_env_launch_template_test"
  block_device_mappings {
    device_name = "/dev/xvda"
    ebs {
      volume_size = 500 
      volume_type = "gp2"
    }
  }
  tag_specifications {
    resource_type = "instance"
    tags = {
      Name = "BatchVolume"
    }
  } 
}

resource "aws_batch_compute_environment" "compute_test" {
  compute_environment_name = "compute_test"

  compute_resources {
    instance_role = ### instance role ###
    
    instance_type = ["c4.xlarge",]
    
    max_vcpus = 16
    min_vcpus = 0
    
    security_group_ids = [
      aws_security_group.default.id,
    ]

    subnets = [
      aws_subnet.default.id,
    ]

    type = "EC2"

    launch_template {
      launch_template_id = "${aws_launch_template.compute_env_launch_template_test.id}"
      version = "$Latest"
    }
  }
 
  lifecycle {
    create_before_destroy = true
  }
  service_role = aws_iam_role.aws_batch_service_role.arn
  type         = "MANAGED"
  depends_on   = [aws_iam_role_policy_attachment.aws_batch_service_role]
}

resource "aws_batch_job_queue" "default_queue_test" {
  name     = "default_queue_test"
  state    = "ENABLED"
  priority = 1
  compute_environments = [
    aws_batch_compute_environment.compute_test.arn
  ]
}

resource "aws_batch_job_definition" "default_jobs_test" {
  name = "default_jobs_test"
  type = "container"
  platform_capabilities = ["EC2"]
  container_properties = <<CONTAINER_PROPERTIES
  {
    "executionRoleArn": "${aws_iam_role.ecsTaskExecutionRole.arn}",
    "image": ### container image ###
    "memory": 4096,
    "vcpus": 2
  }
CONTAINER_PROPERTIES
}

When the job is running, I see a Volume getting created when the job is running but it only has a storage of 100GiB. Should I create the volume in a different instance name? Or am I missing something like mounting of volume here?

I think the problem is in launch template. I am trying different configurations of launch template but the volume is only 100GiB. I also tried mounting the device at /dev/xvdcz (followed this aws docs) but still it is not changing. Is it possible that I am missing something in permissions/IAM roles?