Hi,
# configured aws provider with proper credentials
provider "aws" {
# region = "us-west-1"
# shared_credentials_files = ["/Users/rahulwagh/.aws/credentials"]
# shared_credentials_files = ["%USERPROFILE%.aws/credentials"]
# profile = "default"
}
# create default vpc if one does not exit
resource "aws_default_vpc" "default_vpc" {
tags = {
Name = "default vpc"
}
}
# use data source to get all avalablility zones in region
data "aws_availability_zones" "available_zones" {}
# create default subnet if one does not exit
resource "aws_default_subnet" "default_az1" {
availability_zone = data.aws_availability_zones.available_zones.names[0]
tags = {
Name = "default subnet"
}
}
# create security group for the ec2 instance
resource "aws_security_group" "ec2_security_group_terraform" {
name = "ec2 security group terraform"
description = "allow access on ports 80 and 22"
vpc_id = aws_default_vpc.default_vpc.id
ingress {
description = "http access"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "ssh access"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = -1
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "ec2 security group terraform"
}
}
# use data source to get a registered amazon linux 2 ami
data "aws_ami" "ubuntu" {
most_recent = true
# owners = ["amazon"]
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"]
}
filter {
name = "virtualization-type"
values = ["hvm*"]
}
owners = ["0997xxxxxx77"] # Canonical
}
# launch the ec2 instance and install website
resource "aws_instance" "ec2_instance" {
ami = data.aws_ami.ubuntu.id
instance_type = "t2.xlarge"
# instance_count = "2"
subnet_id = aws_default_subnet.default_az1.id
vpc_security_group_ids = [aws_security_group.ec2_security_group_terraform.id]
key_name = "terraformkey"
user_data = "${file("kubectl.sh")}"
# user_data = file("kubectl.sh")
# user_data = file("${path.module}/install_k8s.sh")
tags = {
Name = "k8s install"
}
# provisioner "file" {
# source = "/mnt/d/DevOps/Courses/terraform/AWS/Others/install_k8s.sh"
# destination = "/home/ubuntu/install_k8s.sh"
# } # Change permissions on bash script and execute from ec2-user.
# provisioner "remote-exec" {
# inline = [
# "sudo chmod 400 /home/ubuntu/install_k8s.sh",
# # "sudo su - root 'bash test.sh' &"
# "sudo ./home/ubuntu/install_k8s.sh",
# ]
# }
}
# Login to the ec2-user with the aws key.
# connection {
# type = "ssh"
# user = "ec2-user"
# password = ""
# private_key = file(var.keyPath)
# host = self.public_ip
# }
# print the ec2's public ipv4 address
output "public_ipv4_address" {
value = aws_instance.ec2_instance.public_ip
}
after typing terraform plan i got below error
Error: fetching Availability Zones: AuthFailure: AWS was not able to validate the provided access credentials
│ status code: 401, request id: 644xxxxx24d-b855-xxxxx-9da7-xxxxxxxx
│
│ with data.aws_availability_zones.available_zones,
│ on main.tf line 21, in data "aws_availability_zones" "available_zones":
│ 21: data "aws_availability_zones" "available_zones" {}
│
╵
╷
│ Error: reading EC2 AMIs: AuthFailure: AWS was not able to validate the provided access credentials
│ status code: 401, request id: 7acxxxx64c-xx-455c-a8e5-xxxxxx
│
│ with data.aws_ami.ubuntu,
│ on main.tf line 70, in data "aws_ami" "ubuntu":
│ 70: data "aws_ami" "ubuntu" {
I have pass aws cred using env variable like below
aws configure
Name Value Type Location
---- ----- ---- --------
profile <not set> None None
access_key ****************5ZYF env
secret_key ****************EwWm env
region us-west-1 env ['AWS_REGION', 'AWS_DEFAULT_REGION']
osboxes@osboxes:~/Desktop/terraform$ terraform version
Terraform v1.5.5
on linux_amd64
+ provider registry.terraform.io/hashicorp/aws v5.13.1
Even after change region from us-west-1 to ap-south-1 is not working
my access and secret key are in us-west-1
im new to terraform and aws
please correct me where im wrong