mirror of
https://github.com/hashicorp/terraform.git
synced 2026-02-20 00:13:30 -05:00
Some checks failed
build / Determine intended Terraform version (push) Has been cancelled
build / Determine Go toolchain version (push) Has been cancelled
Quick Checks / Unit Tests (push) Has been cancelled
Quick Checks / Race Tests (push) Has been cancelled
Quick Checks / End-to-end Tests (push) Has been cancelled
Quick Checks / Code Consistency Checks (push) Has been cancelled
build / Generate release metadata (push) Has been cancelled
build / Build for freebsd_386 (push) Has been cancelled
build / Build for linux_386 (push) Has been cancelled
build / Build for openbsd_386 (push) Has been cancelled
build / Build for windows_386 (push) Has been cancelled
build / Build for darwin_amd64 (push) Has been cancelled
build / Build for freebsd_amd64 (push) Has been cancelled
build / Build for linux_amd64 (push) Has been cancelled
build / Build for openbsd_amd64 (push) Has been cancelled
build / Build for solaris_amd64 (push) Has been cancelled
build / Build for windows_amd64 (push) Has been cancelled
build / Build for freebsd_arm (push) Has been cancelled
build / Build for linux_arm (push) Has been cancelled
build / Build for darwin_arm64 (push) Has been cancelled
build / Build for linux_arm64 (push) Has been cancelled
build / Build for windows_arm64 (push) Has been cancelled
build / Build Docker image for linux_386 (push) Has been cancelled
build / Build Docker image for linux_amd64 (push) Has been cancelled
build / Build Docker image for linux_arm (push) Has been cancelled
build / Build Docker image for linux_arm64 (push) Has been cancelled
build / Build e2etest for linux_386 (push) Has been cancelled
build / Build e2etest for windows_386 (push) Has been cancelled
build / Build e2etest for darwin_amd64 (push) Has been cancelled
build / Build e2etest for linux_amd64 (push) Has been cancelled
build / Build e2etest for windows_amd64 (push) Has been cancelled
build / Build e2etest for linux_arm (push) Has been cancelled
build / Build e2etest for darwin_arm64 (push) Has been cancelled
build / Build e2etest for linux_arm64 (push) Has been cancelled
build / Run e2e test for linux_386 (push) Has been cancelled
build / Run e2e test for windows_386 (push) Has been cancelled
build / Run e2e test for darwin_amd64 (push) Has been cancelled
build / Run e2e test for linux_amd64 (push) Has been cancelled
build / Run e2e test for windows_amd64 (push) Has been cancelled
build / Run e2e test for linux_arm (push) Has been cancelled
build / Run e2e test for linux_arm64 (push) Has been cancelled
build / Run terraform-exec test for linux amd64 (push) Has been cancelled
200 lines
8 KiB
Go
200 lines
8 KiB
Go
// Copyright (c) HashiCorp, Inc.
|
|
// SPDX-License-Identifier: BUSL-1.1
|
|
|
|
package graph
|
|
|
|
import (
|
|
"fmt"
|
|
"log"
|
|
"path/filepath"
|
|
|
|
"github.com/hashicorp/terraform/internal/addrs"
|
|
"github.com/hashicorp/terraform/internal/configs"
|
|
"github.com/hashicorp/terraform/internal/lang"
|
|
"github.com/hashicorp/terraform/internal/moduletest"
|
|
teststates "github.com/hashicorp/terraform/internal/moduletest/states"
|
|
"github.com/hashicorp/terraform/internal/plans"
|
|
"github.com/hashicorp/terraform/internal/providers"
|
|
"github.com/hashicorp/terraform/internal/states"
|
|
"github.com/hashicorp/terraform/internal/terraform"
|
|
"github.com/hashicorp/terraform/internal/tfdiags"
|
|
)
|
|
|
|
// testApply defines how to execute a run block representing an apply command
|
|
//
|
|
// See also: (n *NodeTestRun).testPlan
|
|
func (n *NodeTestRun) testApply(ctx *EvalContext, variables terraform.InputValues, providers map[addrs.RootProviderConfig]providers.Interface, waiter *operationWaiter) {
|
|
file, run := n.File(), n.run
|
|
config := run.ModuleConfig
|
|
key := n.run.Config.StateKey
|
|
|
|
// FilterVariablesToModule only returns warnings, so we don't check the
|
|
// returned diags for errors.
|
|
setVariables, testOnlyVariables, setVariableDiags := FilterVariablesToModule(run.ModuleConfig, variables)
|
|
run.Diagnostics = run.Diagnostics.Append(setVariableDiags)
|
|
|
|
// ignore diags because validate has covered it
|
|
tfCtx, _ := terraform.NewContext(n.opts.ContextOpts)
|
|
|
|
// execute the terraform plan operation
|
|
_, plan, planDiags := plan(ctx, tfCtx, file.Config, run.Config, run.ModuleConfig, setVariables, providers, waiter)
|
|
|
|
// Any error during the planning prevents our apply from
|
|
// continuing which is an error.
|
|
planDiags = moduletest.ExplainExpectedFailures(run.Config, planDiags)
|
|
run.Diagnostics = run.Diagnostics.Append(planDiags)
|
|
if planDiags.HasErrors() {
|
|
run.Status = moduletest.Error
|
|
return
|
|
}
|
|
|
|
// Since we're carrying on an executing the apply operation as well, we're
|
|
// just going to do some post processing of the diagnostics. We remove the
|
|
// warnings generated from check blocks, as the apply operation will either
|
|
// reproduce them or fix them and we don't want fixed diagnostics to be
|
|
// reported and we don't want duplicates either.
|
|
var filteredDiags tfdiags.Diagnostics
|
|
for _, diag := range run.Diagnostics {
|
|
if rule, ok := addrs.DiagnosticOriginatesFromCheckRule(diag); ok && rule.Container.CheckableKind() == addrs.CheckableCheck {
|
|
continue
|
|
}
|
|
filteredDiags = filteredDiags.Append(diag)
|
|
}
|
|
run.Diagnostics = filteredDiags
|
|
|
|
// execute the apply operation
|
|
applyScope, updated, applyDiags := apply(tfCtx, run.Config, run.ModuleConfig, plan, moduletest.Running, variables, providers, waiter)
|
|
|
|
// Remove expected diagnostics, and add diagnostics in case anything that should have failed didn't.
|
|
// We'll also update the run status based on the presence of errors or missing expected failures.
|
|
status, applyDiags := checkForMissingExpectedFailures(ctx, run.Config, applyDiags)
|
|
run.Diagnostics = run.Diagnostics.Append(applyDiags)
|
|
run.Status = run.Status.Merge(status)
|
|
if status == moduletest.Error {
|
|
// Even though the apply operation failed, the graph may have done
|
|
// partial updates and the returned state should reflect this.
|
|
ctx.SetFileState(key, run, updated, teststates.StateReasonNone)
|
|
return
|
|
}
|
|
|
|
if ctx.Verbose() {
|
|
schemas, diags := tfCtx.Schemas(config, updated)
|
|
|
|
// If we're going to fail to render the plan, let's not fail the overall
|
|
// test. It can still have succeeded. So we'll add the diagnostics, but
|
|
// still report the test status as a success.
|
|
if diags.HasErrors() {
|
|
// This is very unlikely.
|
|
diags = diags.Append(tfdiags.Sourceless(
|
|
tfdiags.Warning,
|
|
"Failed to print verbose output",
|
|
fmt.Sprintf("Terraform failed to print the verbose output for %s, other diagnostics will contain more details as to why.", filepath.Join(file.Name, run.Name))))
|
|
} else {
|
|
run.Verbose = &moduletest.Verbose{
|
|
Plan: nil, // We don't have a plan to show in apply mode.
|
|
State: updated,
|
|
Config: config,
|
|
Providers: schemas.Providers,
|
|
Provisioners: schemas.Provisioners,
|
|
}
|
|
}
|
|
|
|
run.Diagnostics = run.Diagnostics.Append(diags)
|
|
}
|
|
|
|
// Evaluate the run block directly in the graph context to validate the assertions
|
|
// of the run. We also pass in all the
|
|
// previous contexts so this run block can refer to outputs from
|
|
// previous run blocks.
|
|
newStatus, outputVals, moreDiags := ctx.EvaluateRun(run.Config, run.ModuleConfig.Module, applyScope, testOnlyVariables)
|
|
run.Status = run.Status.Merge(newStatus)
|
|
run.Diagnostics = run.Diagnostics.Append(moreDiags)
|
|
run.Outputs = outputVals
|
|
|
|
// Only update the most recent run and state if the state was
|
|
// actually updated by this change. We want to use the run that
|
|
// most recently updated the tracked state as the cleanup
|
|
// configuration.
|
|
ctx.SetFileState(key, run, updated, teststates.StateReasonNone)
|
|
}
|
|
|
|
func apply(tfCtx *terraform.Context, run *configs.TestRun, module *configs.Config, plan *plans.Plan, progress moduletest.Progress, variables terraform.InputValues, providers map[addrs.RootProviderConfig]providers.Interface, waiter *operationWaiter) (*lang.Scope, *states.State, tfdiags.Diagnostics) {
|
|
log.Printf("[TRACE] TestFileRunner: called apply for %s", run.Name)
|
|
|
|
var diags tfdiags.Diagnostics
|
|
|
|
// If things get cancelled while we are executing the apply operation below
|
|
// we want to print out all the objects that we were creating so the user
|
|
// can verify we managed to tidy everything up possibly.
|
|
//
|
|
// Unfortunately, this creates a race condition as the apply operation can
|
|
// edit the plan (by removing changes once they are applied) while at the
|
|
// same time our cancellation process will try to read the plan.
|
|
//
|
|
// We take a quick copy of the changes we care about here, which will then
|
|
// be used in place of the plan when we print out the objects to be created
|
|
// as part of the cancellation process.
|
|
var created []*plans.ResourceInstanceChangeSrc
|
|
for _, change := range plan.Changes.Resources {
|
|
if change.Action != plans.Create {
|
|
continue
|
|
}
|
|
created = append(created, change)
|
|
}
|
|
|
|
// We only need to pass ephemeral variables to the apply operation, as the
|
|
// plan has already been evaluated with the full set of variables.
|
|
ephemeralVariables := make(terraform.InputValues)
|
|
for k, v := range module.Root.Module.Variables {
|
|
if v.EphemeralSet {
|
|
if value, ok := variables[k]; ok {
|
|
ephemeralVariables[k] = value
|
|
}
|
|
}
|
|
}
|
|
|
|
applyOpts := &terraform.ApplyOpts{
|
|
SetVariables: ephemeralVariables,
|
|
ExternalProviders: providers,
|
|
AllowRootEphemeralOutputs: true,
|
|
}
|
|
|
|
waiter.update(tfCtx, progress, created)
|
|
log.Printf("[DEBUG] TestFileRunner: starting apply for %s", run.Name)
|
|
updated, newScope, applyDiags := tfCtx.ApplyAndEval(plan, module, applyOpts)
|
|
log.Printf("[DEBUG] TestFileRunner: completed apply for %s", run.Name)
|
|
diags = diags.Append(applyDiags)
|
|
|
|
return newScope, updated, diags
|
|
}
|
|
|
|
// checkForMissingExpectedFailures checks for missing expected failures in the diagnostics.
|
|
// It updates the run status based on the presence of errors or missing expected failures.
|
|
func checkForMissingExpectedFailures(ctx *EvalContext, config *configs.TestRun, originals tfdiags.Diagnostics) (moduletest.Status, tfdiags.Diagnostics) {
|
|
// Retrieve and append diagnostics that are either unrelated to expected failures
|
|
// or report missing expected failures.
|
|
unexpectedDiags := moduletest.ValidateExpectedFailures(config, originals)
|
|
|
|
status := moduletest.Pass
|
|
for _, diag := range unexpectedDiags {
|
|
// // If any diagnostic indicates a missing expected failure, set the run status to fail.
|
|
if ok := moduletest.DiagnosticFromMissingExpectedFailure(diag); ok {
|
|
status = status.Merge(moduletest.Fail)
|
|
continue
|
|
}
|
|
|
|
// upgrade the run status to error if there still are other errors in the diagnostics
|
|
if diag.Severity() == tfdiags.Error {
|
|
status = status.Merge(moduletest.Error)
|
|
break
|
|
}
|
|
}
|
|
|
|
if ctx.Verbose() {
|
|
// in verbose mode, we still add all the original diagnostics for
|
|
// display even if they are expected.
|
|
return status, originals
|
|
} else {
|
|
return status, unexpectedDiags
|
|
}
|
|
}
|