Hey everyone,
trying to debug what’s happening and I can’t find what am I doing wrong. The point obviously from the title is to keep the EC2 instances updated every Monday and put the output of the updating process into the bucket.
The outcome is tricky:
- the instances are not updated correctly
- the output is not put into the bucket (the folder structure is created, but they’re all empty)
My code regarding the patch management looks like this:
# SSM Patch Management
resource "aws_iam_policy" "gitlab_runners_maintenance" {
name = "gitlab_runners_ssm_maintenance"
path = "/service-role/"
description = "Permissions for the SSM tasks to patch EC2 with Gitlab Runners"
policy = templatefile("${path.module}/data/gitlab_runners_maintenance_iam_policy.json", {
SNS_TOPIC = local.sns_general_topic
})
}
## Bucket configuration. In here we're creating the bucket, enabling versioning
resource "aws_s3_bucket" "update_command_output_bucket" {
bucket = "my-gitlab-runners"
}
resource "aws_s3_bucket_versioning" "versioning_bucket" {
bucket = aws_s3_bucket.update_command_output_bucket.id
versioning_configuration {
status = "Enabled"
}
}
## IAM role for the writing to the bucket
data "aws_iam_policy_document" "gitlab_runners_write" {
statement {
actions = [
"s3:ListBucket"
]
resources = [
aws_s3_bucket.update_command_output_bucket.arn
]
}
statement {
actions = [
"s3:*Object"
]
resources = [
"${aws_s3_bucket.update_command_output_bucket.arn}/*"
]
}
}
resource "aws_iam_policy" "gitlab_runners_write" {
name = "gitlab_runners-write"
policy = data.aws_iam_policy_document.gitlab_runners_write.json
}
## End of bucket config
resource "aws_iam_role" "gitlab_runners_maintenance" {
name = "gitlab_runners_ssm_maintenance"
path = "/service-role/"
managed_policy_arns = [
aws_iam_policy.gitlab_runners_maintenance.arn,
aws_iam_policy.gitlab_runners_write.arn
]
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "ssm.amazonaws.com"
}
}
]
})
}
locals {
gitlab_runners_ids = [for instance in aws_instance.gitlab_runners : instance.id]
}
data "aws_ssm_patch_baseline" "amazon_linux2" {
owner = "AWS"
name_prefix = "AWS-AmazonLinux2DefaultPatchBaseline"
operating_system = "AMAZON_LINUX_2"
}
resource "aws_ssm_patch_group" "this" {
baseline_id = data.aws_ssm_patch_baseline.amazon_linux2.id
patch_group = "gitlab_runners"
}
resource "aws_ssm_maintenance_window" "this" {
name = "maintenance_gitlab_runners"
schedule = "cron(0 0 1 ? * MON *)"
schedule_timezone = "UTC"
allow_unassociated_targets = false
duration = 1
cutoff = 0
}
resource "aws_ssm_maintenance_window_target" "this" {
window_id = aws_ssm_maintenance_window.this.id
name = "gitlab_runners_target"
description = "Maintenance window target for Gitlab Runners Ec2"
resource_type = "INSTANCE"
owner_information = "Gitlab Runners"
targets {
key = "InstanceIds"
values = local.gitlab_runners_ids
}
}
resource "aws_ssm_maintenance_window_task" "install_patches" {
name = "PatchInstances"
max_concurrency = 1
max_errors = 0
priority = 1
service_role_arn = aws_iam_role.gitlab_runners_maintenance.arn
task_arn = "AWS-RunPatchBaseline"
task_type = "RUN_COMMAND"
window_id = aws_ssm_maintenance_window.this.id
targets {
key = "InstanceIds"
values = local.gitlab_runners_ids
}
task_invocation_parameters {
run_command_parameters {
output_s3_bucket = aws_s3_bucket.update_command_output_bucket.id
output_s3_key_prefix = "update"
timeout_seconds = 600
document_version = "1"
service_role_arn = aws_iam_role.gitlab_runners_maintenance.arn
notification_config {
notification_arn = local.sns_general_topic
notification_events = ["TimedOut", "Cancelled", "Failed"]
notification_type = "Command"
}
parameter {
name = "Operation"
values = ["Install"]
}
parameter {
name = "RebootOption"
values = ["RebootIfNeeded"]
}
}
}
}
Maybe something is wrong with the policy for the role? But I’m already allowing all the actions regarding the bucket items.
Thanks for any input!