From af564e959c2151e060d301351702c89095d0452b Mon Sep 17 00:00:00 2001 From: Lucas Renan Date: Fri, 22 Aug 2025 18:09:33 +0200 Subject: [PATCH 1/2] Add ignore_task_fleet_capacity_drifts to task_instance_fleet --- main.tf | 7 +++++++ variables.tf | 6 ++++++ 2 files changed, 13 insertions(+) diff --git a/main.tf b/main.tf index 1ed6e6c..3fea381 100644 --- a/main.tf +++ b/main.tf @@ -406,6 +406,13 @@ resource "aws_emr_instance_fleet" "this" { name = try(each.value.name, null) target_on_demand_capacity = try(each.value.target_on_demand_capacity, null) target_spot_capacity = try(each.value.target_spot_capacity, null) + + lifecycle { + ignore_changes = var.ignore_task_fleet_capacity_drifts ? [ + target_on_demand_capacity, + target_spot_capacity + ] : [] + } } ################################################################################ diff --git a/variables.tf b/variables.tf index 0ad0bf6..4e3ab1a 100644 --- a/variables.tf +++ b/variables.tf @@ -209,6 +209,12 @@ variable "task_instance_group" { default = {} } +variable "ignore_task_fleet_capacity_drifts" { + description = "Whether to ignore capacity drifts for task instance fleets. If set to `true`, the task instance fleet will not be resized if the capacity drifts from the desired configuration. Default value is `false`" + type = bool + default = false +} + ################################################################################ # Managed Scaling Policy ################################################################################ From c8b8d25c0b54e3d166dcafa7b6ee4c70b7cd5e82 Mon Sep 17 00:00:00 2001 From: Lucas Renan Date: Fri, 22 Aug 2025 18:56:34 +0200 Subject: [PATCH 2/2] Add support for ignore_task_fleet_capacity_drifts to task_instance_fleet --- README.md | 2 ++ main.tf | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 74 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index fa5bb55..a7aeafb 100644 --- a/README.md +++ b/README.md @@ -348,6 +348,7 @@ No modules. |------|------| | [aws_emr_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/emr_cluster) | resource | | [aws_emr_instance_fleet.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/emr_instance_fleet) | resource | +| [aws_emr_instance_fleet.this_ignore_capacity_drifts](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/emr_instance_fleet) | resource | | [aws_emr_instance_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/emr_instance_group) | resource | | [aws_emr_managed_scaling_policy.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/emr_managed_scaling_policy) | resource | | [aws_emr_security_configuration.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/emr_security_configuration) | resource | @@ -407,6 +408,7 @@ No modules. | [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no | | [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no | | [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name is used as a prefix | `bool` | `true` | no | +| [ignore\_task\_fleet\_capacity\_drifts](#input\_ignore\_task\_fleet\_capacity\_drifts) | Whether to ignore capacity drifts for task instance fleets. If set to `true`, the task instance fleet will not be resized if the capacity drifts from the desired configuration. Default value is `false` | `bool` | `false` | no | | [is\_private\_cluster](#input\_is\_private\_cluster) | Identifies whether the cluster is created in a private subnet | `bool` | `true` | no | | [keep\_job\_flow\_alive\_when\_no\_steps](#input\_keep\_job\_flow\_alive\_when\_no\_steps) | Switch on/off run cluster with no steps or when all steps are complete (default is on) | `bool` | `null` | no | | [kerberos\_attributes](#input\_kerberos\_attributes) | Kerberos configuration for the cluster | `any` | `{}` | no | diff --git a/main.tf b/main.tf index 3fea381..e78513b 100644 --- a/main.tf +++ b/main.tf @@ -342,7 +342,74 @@ resource "aws_emr_cluster" "this" { ################################################################################ resource "aws_emr_instance_fleet" "this" { - for_each = { for k, v in [var.task_instance_fleet] : k => v if var.create && length(var.task_instance_fleet) > 0 } + for_each = { for k, v in [var.task_instance_fleet] : k => v if var.create && length(var.task_instance_fleet) > 0 && !var.ignore_task_fleet_capacity_drifts } + + cluster_id = aws_emr_cluster.this[0].id + + dynamic "instance_type_configs" { + for_each = try(each.value.instance_type_configs, []) + + content { + bid_price = try(instance_type_configs.value.bid_price, null) + bid_price_as_percentage_of_on_demand_price = try(instance_type_configs.value.bid_price_as_percentage_of_on_demand_price, 60) + + dynamic "configurations" { + for_each = try(instance_type_configs.value.configurations, []) + + content { + classification = try(configurations.value.classification, null) + properties = try(configurations.value.properties, null) + } + } + + dynamic "ebs_config" { + for_each = try(instance_type_configs.value.ebs_config, []) + + content { + iops = try(ebs_config.value.iops, null) + size = try(ebs_config.value.size, 64) + type = try(ebs_config.value.type, "gp3") + volumes_per_instance = try(ebs_config.value.volumes_per_instance, null) + } + } + + instance_type = instance_type_configs.value.instance_type + weighted_capacity = try(instance_type_configs.value.weighted_capacity, null) + } + } + + dynamic "launch_specifications" { + for_each = try([each.value.launch_specifications], []) + + content { + dynamic "on_demand_specification" { + for_each = try([launch_specifications.value.on_demand_specification], []) + + content { + allocation_strategy = try(on_demand_specification.value.allocation_strategy, "lowest-price") + } + } + + dynamic "spot_specification" { + for_each = try([launch_specifications.value.spot_specification], []) + + content { + allocation_strategy = try(spot_specification.value.allocation_strategy, "capacity-optimized") + block_duration_minutes = try(launch_specifications.value.spot_specification.block_duration_minutes, null) + timeout_action = try(launch_specifications.value.spot_specification.timeout_action, "SWITCH_TO_ON_DEMAND") + timeout_duration_minutes = try(launch_specifications.value.spot_specification.timeout_duration_minutes, 60) + } + } + } + } + + name = try(each.value.name, null) + target_on_demand_capacity = try(each.value.target_on_demand_capacity, null) + target_spot_capacity = try(each.value.target_spot_capacity, null) +} + +resource "aws_emr_instance_fleet" "this_ignore_capacity_drifts" { + for_each = { for k, v in [var.task_instance_fleet] : k => v if var.create && length(var.task_instance_fleet) > 0 && var.ignore_task_fleet_capacity_drifts } cluster_id = aws_emr_cluster.this[0].id @@ -408,10 +475,10 @@ resource "aws_emr_instance_fleet" "this" { target_spot_capacity = try(each.value.target_spot_capacity, null) lifecycle { - ignore_changes = var.ignore_task_fleet_capacity_drifts ? [ - target_on_demand_capacity, - target_spot_capacity - ] : [] + ignore_changes = [ + target_on_demand_capacity, + target_spot_capacity + ] } }