diff --git a/CHANGELOG.md b/CHANGELOG.md index 09a369445c..59caab31e7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ UPGRADE NOTES: ENHANCEMENTS: +* The conditional enabled field is now supported for all types of resources within the `lifecycle` block. ([#3042](https://github.com/opentofu/opentofu/pull/3042)) * OpenTofu will now suggest using `-exclude` if a provider reports that it cannot create a plan for a particular resource instance due to values that won't be known until the apply phase. ([#2643](https://github.com/opentofu/opentofu/pull/2643)) * `tofu validate` now supports running in a module that contains provider configuration_aliases. ([#2905](https://github.com/opentofu/opentofu/pull/2905)) * The `regex` and `regexall` functions now support using `\p` and `\P` sequences with the long-form names for Unicode general character properties. For example, `\p{Letter}` now has the same meaning as `\p{L}`. ([#3166](https://github.com/opentofu/opentofu/pull/3166)) diff --git a/internal/configs/parser_config_test.go b/internal/configs/parser_config_test.go index 5abd6157cc..2dd0e643c1 100644 --- a/internal/configs/parser_config_test.go +++ b/internal/configs/parser_config_test.go @@ -128,13 +128,19 @@ func TestParserLoadConfigFileFailureMessages(t *testing.T) { "invalid-files/resource-count-and-for_each.tf", hcl.DiagError, `Invalid combination of "count" and "for_each"`, - `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, + `The "count" and "for_each" meta-arguments are mutually-exclusive. Only one may be used to be explicit about the number of resources to be created.`, + }, + { + "invalid-files/resource-count-and-for_each-and-enabled.tf", + hcl.DiagError, + `Invalid combination of "count", "enabled", and "for_each"`, + `The "count", "enabled", and "for_each" meta-arguments are mutually-exclusive. Only one may be used to be explicit about the number of resources to be created.`, }, { "invalid-files/data-count-and-for_each.tf", hcl.DiagError, `Invalid combination of "count" and "for_each"`, - `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, + `The "count" and "for_each" meta-arguments are mutually-exclusive. Only one may be used to be explicit about the number of resources to be created.`, }, { "invalid-files/resource-lifecycle-badbool.tf", diff --git a/internal/configs/resource.go b/internal/configs/resource.go index b8ebd6a6dc..6cf991296e 100644 --- a/internal/configs/resource.go +++ b/internal/configs/resource.go @@ -7,6 +7,8 @@ package configs import ( "fmt" + "sort" + "strings" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/gohcl" @@ -27,6 +29,7 @@ type Resource struct { Type string Config hcl.Body Count hcl.Expression + Enabled hcl.Expression ForEach hcl.Expression ProviderConfigRef *ProviderConfigRef @@ -149,21 +152,18 @@ func decodeResourceBlock(block *hcl.Block, override bool) (*Resource, hcl.Diagno }) } + repetitionArgs := 0 + var countRng, forEachRng, enabledRng hcl.Range if attr, exists := content.Attributes["count"]; exists { r.Count = attr.Expr + countRng = attr.NameRange + repetitionArgs++ } if attr, exists := content.Attributes["for_each"]; exists { r.ForEach = attr.Expr - // Cannot have count and for_each on the same resource block - if r.Count != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid combination of "count" and "for_each"`, - Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, - Subject: &attr.NameRange, - }) - } + forEachRng = attr.NameRange + repetitionArgs++ } if attr, exists := content.Attributes["provider"]; exists { @@ -204,6 +204,12 @@ func decodeResourceBlock(block *hcl.Block, override bool) (*Resource, hcl.Diagno r.Managed.CreateBeforeDestroySet = true } + if attr, exists := lcContent.Attributes["enabled"]; exists { + r.Enabled = attr.Expr + enabledRng = attr.NameRange + repetitionArgs++ + } + if attr, exists := lcContent.Attributes["prevent_destroy"]; exists { valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.PreventDestroy) diags = append(diags, valDiags...) @@ -357,6 +363,17 @@ func decodeResourceBlock(block *hcl.Block, override bool) (*Resource, hcl.Diagno } } + if repetitionArgs > 1 { + complainRng, complainMsg := complainRngAndMsg(countRng, enabledRng, forEachRng) + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf(`Invalid combination of %s`, complainMsg), + Detail: fmt.Sprintf(`The %s meta-arguments are mutually-exclusive. Only one may be used to be explicit about the number of resources to be created.`, complainMsg), + Subject: complainRng, + }) + } + // Now we can validate the connection block references if there are any destroy provisioners. // TODO: should we eliminate standalone connection blocks? if r.Managed.Connection != nil { @@ -371,6 +388,43 @@ func decodeResourceBlock(block *hcl.Block, override bool) (*Resource, hcl.Diagno return r, diags } +func complainRngAndMsg(countRng, enabledRng, forEachRng hcl.Range) (*hcl.Range, string) { + var complainRngs []hcl.Range + var complainAttrs []string + if !countRng.Empty() { + complainRngs = append(complainRngs, countRng) + complainAttrs = append(complainAttrs, "\"count\"") + } + if !enabledRng.Empty() { + complainRngs = append(complainRngs, enabledRng) + complainAttrs = append(complainAttrs, "\"enabled\"") + } + if !forEachRng.Empty() { + complainRngs = append(complainRngs, forEachRng) + complainAttrs = append(complainAttrs, "\"for_each\"") + } + + // We sort the complain ranges in order to understood who appeared first, + // and we use that as the valid one + sort.SliceStable(complainRngs, func(i, j int) bool { + return complainRngs[i].Start.Byte < complainRngs[j].Start.Byte + }) + + lastIndex := len(complainAttrs) - 1 + complainRng := complainRngs[lastIndex] + + var complainMsg string + if len(complainAttrs) >= 3 { + // Add an oxford comma to the last attribute + complainAttrs[lastIndex] = "and " + complainAttrs[lastIndex] + complainMsg = strings.Join(complainAttrs, ", ") + } else { + complainMsg = strings.Join(complainAttrs, " and ") + } + + return &complainRng, complainMsg +} + func decodeDataBlock(block *hcl.Block, override, nested bool) (*Resource, hcl.Diagnostics) { var diags hcl.Diagnostics r := &Resource{ @@ -421,7 +475,7 @@ func decodeDataBlock(block *hcl.Block, override, nested bool) (*Resource, hcl.Di diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `Invalid combination of "count" and "for_each"`, - Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, + Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive. Only one may be used to be explicit about the number of resources to be created.`, Subject: &attr.NameRange, }) } @@ -502,6 +556,11 @@ func decodeDataBlock(block *hcl.Block, override, nested bool) (*Resource, hcl.Di // managed resources only, so we can emit a common error message // for any given attributes that HCL accepted. for name, attr := range lcContent.Attributes { + // Enabled is a special case, it is allowed for data resources + if name == "enabled" { + r.Enabled = attr.Expr + continue + } diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Invalid data resource lifecycle argument", @@ -1078,6 +1137,9 @@ var resourceLifecycleBlockSchema = &hcl.BodySchema{ { Name: "replace_triggered_by", }, + { + Name: "enabled", + }, }, Blocks: []hcl.BlockHeaderSchema{ {Type: "precondition"}, diff --git a/internal/configs/testdata/invalid-files/resource-count-and-for_each-and-enabled.tf b/internal/configs/testdata/invalid-files/resource-count-and-for_each-and-enabled.tf new file mode 100644 index 0000000000..93a0017193 --- /dev/null +++ b/internal/configs/testdata/invalid-files/resource-count-and-for_each-and-enabled.tf @@ -0,0 +1,7 @@ +resource "test" "foo" { + lifecycle { + enabled = true + } + count = 2 + for_each = ["a"] +} diff --git a/internal/instances/expander.go b/internal/instances/expander.go index 8874b909ae..cd9a788493 100644 --- a/internal/instances/expander.go +++ b/internal/instances/expander.go @@ -87,6 +87,12 @@ func (e *Expander) SetResourceCount(moduleAddr addrs.ModuleInstance, resourceAdd e.setResourceExpansion(moduleAddr, resourceAddr, expansionCount(count)) } +// SetResourceEnabled records that the given resource inside the given module +// uses the "enabled" repetition argument, with the given value. +func (e *Expander) SetResourceEnabled(moduleAddr addrs.ModuleInstance, resourceAddr addrs.Resource, enabled bool) { + e.setResourceExpansion(moduleAddr, resourceAddr, expansionEnabled(enabled)) +} + // SetResourceForEach records that the given resource inside the given module // uses the "for_each" repetition argument, with the given map value. // diff --git a/internal/instances/expansion_mode.go b/internal/instances/expansion_mode.go index b1850df766..5f59f8a36d 100644 --- a/internal/instances/expansion_mode.go +++ b/internal/instances/expansion_mode.go @@ -42,6 +42,24 @@ func (e expansionSingle) repetitionData(key addrs.InstanceKey) RepetitionData { return RepetitionData{} } +// expansionEnabled is the expansion corresponding to the "enabled" argument, +// producing either no instances or one instance with no instance key. +type expansionEnabled bool + +func (e expansionEnabled) instanceKeys() []addrs.InstanceKey { + if !bool(e) { + return nil + } + return singleKeys +} + +func (e expansionEnabled) repetitionData(key addrs.InstanceKey) RepetitionData { + if key != addrs.NoKey { + panic("cannot use instance key with non-repeating object") + } + return RepetitionData{} +} + // expansionCount is the expansion corresponding to the "count" argument. type expansionCount int diff --git a/internal/tofu/context_apply2_test.go b/internal/tofu/context_apply2_test.go index cb541613ea..4a051f141a 100644 --- a/internal/tofu/context_apply2_test.go +++ b/internal/tofu/context_apply2_test.go @@ -6198,3 +6198,160 @@ func TestMergePlanAndApplyVariables(t *testing.T) { }) } } + +func TestContext2Apply_enabledForResource(t *testing.T) { + m := testModule(t, "apply-enabled-resource") + p := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(prcr providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: prcr.ProposedNewState, + } + } + p.ApplyResourceChangeFn = func(arcr providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: arcr.PlannedState, + } + } + tfCtx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + resourceInstAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "test", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + outputAddr := addrs.OutputValue{ + Name: "result", + }.Absolute(addrs.RootModuleInstance) + + // We'll overwrite this after each round, but it starts empty. + state := states.NewState() + + { + t.Logf("First round: var.on = false") + + plan, diags := tfCtx.Plan(context.Background(), m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "on": &InputValue{ + Value: cty.False, + }, + }, + }) + assertNoDiagnostics(t, diags) + + if change := plan.Changes.ResourceInstance(resourceInstAddr); change != nil { + t.Fatalf("unexpected plan for %s (should be disabled)", resourceInstAddr) + } + + newState, diags := tfCtx.Apply(context.Background(), plan, m) + assertNoDiagnostics(t, diags) + + if instState := newState.ResourceInstance(resourceInstAddr); instState != nil { + t.Fatalf("unexpected state entry for %s (should be disabled)", resourceInstAddr) + } + + outputState := newState.OutputValue(outputAddr) + if outputState == nil { + t.Errorf("missing state entry for %s", outputAddr) + } else if got := outputState.Value.Index(cty.Zero); !got.IsNull() { + t.Errorf("unexpected value for %s %#v; want null", outputAddr, got) + } + + state = newState // "persist" the state for the next round + } + { + t.Logf("Second round: var.on = true") + + plan, diags := tfCtx.Plan(context.Background(), m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "on": &InputValue{ + Value: cty.True, + }, + }, + }) + assertNoDiagnostics(t, diags) + + change := plan.Changes.ResourceInstance(resourceInstAddr) + if change == nil { + t.Fatalf("missing plan for %s", resourceInstAddr) + } + if got, want := change.Action, plans.Create; got != want { + t.Fatalf("plan for %s has wrong action %s; want %s", resourceInstAddr, got, want) + } + + newState, diags := tfCtx.Apply(context.Background(), plan, m) + assertNoDiagnostics(t, diags) + + instState := newState.ResourceInstance(resourceInstAddr) + if instState == nil { + t.Fatalf("missing state entry for %s", resourceInstAddr) + } + + outputState := newState.OutputValue(outputAddr) + if outputState == nil { + t.Errorf("missing state entry for %s", outputAddr) + } else if got, want := outputState.Value.Index(cty.Zero), cty.StringVal("boop"); !want.RawEquals(got) { + t.Errorf("unexpected value for %s\ngot: %#v\nwant: %#v", outputAddr, got, want) + } + + state = newState // "persist" the state for the next round + } + { + t.Logf("Third round: var.on = false, again") + + plan, diags := tfCtx.Plan(context.Background(), m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "on": &InputValue{ + Value: cty.False, + }, + }, + }) + assertNoDiagnostics(t, diags) + + change := plan.Changes.ResourceInstance(resourceInstAddr) + if change == nil { + t.Fatalf("missing plan for %s", resourceInstAddr) + } + if got, want := change.Action, plans.Delete; got != want { + t.Fatalf("plan for %s has wrong action %s; want %s", resourceInstAddr, got, want) + } + + if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseEnabledFalse; got != want { + t.Errorf("wrong action reason for %s %s; want %s", resourceInstAddr, got, want) + } + + newState, diags := tfCtx.Apply(context.Background(), plan, m) + assertNoDiagnostics(t, diags) + + if instState := newState.ResourceInstance(resourceInstAddr); instState != nil { + t.Fatalf("unexpected state entry for %s (should be disabled)", resourceInstAddr) + } + + outputState := newState.OutputValue(outputAddr) + if outputState == nil { + t.Errorf("missing state entry for %s", outputAddr) + } else if got := outputState.Value.Index(cty.Zero); !got.IsNull() { + t.Errorf("unexpected value for %s %#v; want null", outputAddr, got) + } + } +} diff --git a/internal/tofu/eval_expansion.go b/internal/tofu/eval_expansion.go index 62c4b6a70e..418db14fb0 100644 --- a/internal/tofu/eval_expansion.go +++ b/internal/tofu/eval_expansion.go @@ -38,6 +38,10 @@ func evaluateForEachExpression(ctx context.Context, expr hcl.Expression, evalCtx return evalchecks.EvaluateForEachExpression(expr, evalContextScope(ctx, evalCtx), excludeableAddr) } +func evaluateEnabledExpression(ctx context.Context, expr hcl.Expression, evalCtx EvalContext) (bool, tfdiags.Diagnostics) { + return evalchecks.EvaluateEnabledExpression(expr, evalContextScope(ctx, evalCtx)) +} + func evaluateForEachExpressionValue(ctx context.Context, expr hcl.Expression, evalCtx EvalContext, allowUnknown bool, allowTuple bool, excludeableAddr addrs.Targetable) (cty.Value, tfdiags.Diagnostics) { return evalchecks.EvaluateForEachExpressionValue(expr, evalContextScope(ctx, evalCtx), allowUnknown, allowTuple, excludeableAddr) } diff --git a/internal/tofu/node_resource_abstract.go b/internal/tofu/node_resource_abstract.go index f0f96c475a..94cd382478 100644 --- a/internal/tofu/node_resource_abstract.go +++ b/internal/tofu/node_resource_abstract.go @@ -516,6 +516,16 @@ func (n *NodeAbstractResource) writeResourceState(ctx context.Context, evalCtx E state.SetResourceProvider(addr, n.ResolvedProvider.ProviderConfig) expander.SetResourceCount(addr.Module, n.Addr.Resource, count) + case n.Config != nil && n.Config.Enabled != nil: + enabled, enabledDiags := evaluateEnabledExpression(ctx, n.Config.Enabled, evalCtx) + diags = diags.Append(enabledDiags) + if enabledDiags.HasErrors() { + return diags + } + + state.SetResourceProvider(addr, n.ResolvedProvider.ProviderConfig) + expander.SetResourceEnabled(addr.Module, n.Addr.Resource, enabled) + case n.Config != nil && n.Config.ForEach != nil: forEach, forEachDiags := evaluateForEachExpression(ctx, n.Config.ForEach, evalCtx, addr) diags = diags.Append(forEachDiags) diff --git a/internal/tofu/node_resource_plan_instance.go b/internal/tofu/node_resource_plan_instance.go index 441f241de5..366d5b697f 100644 --- a/internal/tofu/node_resource_plan_instance.go +++ b/internal/tofu/node_resource_plan_instance.go @@ -365,18 +365,12 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx context.Conte } // Plan the instance, unless we're in the refresh-only mode + expander := evalCtx.InstanceExpander() if !n.skipPlanChanges { // add this instance to n.forceReplace if replacement is triggered by // another change - repData := instances.RepetitionData{} - switch k := addr.Resource.Key.(type) { - case addrs.IntKey: - repData.CountIndex = k.Value() - case addrs.StringKey: - repData.EachKey = k.Value() - repData.EachValue = cty.DynamicVal - } + repData := expander.GetResourceInstanceRepetitionData(n.Addr) diags = diags.Append(n.replaceTriggered(ctx, evalCtx, repData)) if diags.HasErrors() { diff --git a/internal/tofu/node_resource_plan_orphan.go b/internal/tofu/node_resource_plan_orphan.go index e216d54f7f..af5499f8de 100644 --- a/internal/tofu/node_resource_plan_orphan.go +++ b/internal/tofu/node_resource_plan_orphan.go @@ -271,11 +271,9 @@ func (n *NodePlannableResourceInstanceOrphan) deleteActionReason(evalCtx EvalCon switch n.Addr.Resource.Key.(type) { case nil: // no instance key at all - // TODO: Conditional enable work - // Uncomment this when Enabled field exists on Cfg - // if cfg.Enabled != nil { - // return plans.ResourceInstanceDeleteBecauseEnabledFalse - // } + if cfg.Enabled != nil { + return plans.ResourceInstanceDeleteBecauseEnabledFalse + } if cfg.Count != nil || cfg.ForEach != nil { return plans.ResourceInstanceDeleteBecauseWrongRepetition } diff --git a/internal/tofu/testdata/apply-enabled-resource/apply-enabled-resource.tf b/internal/tofu/testdata/apply-enabled-resource/apply-enabled-resource.tf new file mode 100644 index 0000000000..06bed85d15 --- /dev/null +++ b/internal/tofu/testdata/apply-enabled-resource/apply-enabled-resource.tf @@ -0,0 +1,19 @@ +variable "on" { + type = bool +} + +resource "test" "test" { + lifecycle { + enabled = var.on + } + + name = "boop" +} + +output "result" { + // This is in a 1-tuple just because OpenTofu treats a fully-null + // root module output value as if it wasn't declared at all, + // but we want to make sure we're actually testing the result + // of this resource directly. + value = [one(test.test[*].name)] +}