terraform/internal/command/command_test.go

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1210 lines
34 KiB
Go
Raw Permalink Normal View History

// Copyright IBM Corp. 2014, 2026
// SPDX-License-Identifier: BUSL-1.1
2014-06-19 00:36:44 -04:00
package command
import (
2017-01-18 23:50:45 -05:00
"bytes"
"context"
2017-01-18 23:50:45 -05:00
"crypto/md5"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
2017-01-18 23:50:45 -05:00
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path"
2014-06-19 00:36:44 -04:00
"path/filepath"
2015-02-26 13:29:23 -05:00
"strings"
"syscall"
"testing"
2014-06-19 00:36:44 -04:00
2022-08-29 15:29:07 -04:00
"github.com/google/go-cmp/cmp"
svchost "github.com/hashicorp/terraform-svchost"
"github.com/hashicorp/terraform-svchost/disco"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/internal/addrs"
backendInit "github.com/hashicorp/terraform/internal/backend/init"
backendLocal "github.com/hashicorp/terraform/internal/backend/local"
"github.com/hashicorp/terraform/internal/command/views"
workdir: Start of a new package for working directory state management Thus far our various interactions with the bits of state we keep associated with a working directory have all been implemented directly inside the "command" package -- often in the huge command.Meta type -- and not managed collectively via a single component. There's too many little codepaths reading and writing from the working directory and data directory to refactor it all in one step, but this is an attempt at a first step towards a future where everything that reads and writes from the current working directory would do so via an object that encapsulates the implementation details and offers a high-level API to read and write all of these session-persistent settings. The design here continues our gradual path towards using a dependency injection style where "package main" is solely responsible for directly interacting with the OS command line, the OS environment, the OS working directory, the stdio streams, and the CLI configuration, and then communicating the resulting information to the rest of Terraform by wiring together objects. It seems likely that eventually we'll have enough wiring code in package main to justify a more explicit organization of that code, but for this commit the new "workdir.Dir" object is just wired directly in place of its predecessors, without any significant change of code organization at that top layer. This first commit focuses on the main files and directories we use to find provider plugins, because a subsequent commit will lightly reorganize the separation of concerns for plugin launching with a similar goal of collecting all of the relevant logic together into one spot.
2021-09-01 20:01:44 -04:00
"github.com/hashicorp/terraform/internal/command/workdir"
"github.com/hashicorp/terraform/internal/configs"
"github.com/hashicorp/terraform/internal/configs/configload"
"github.com/hashicorp/terraform/internal/configs/configschema"
"github.com/hashicorp/terraform/internal/copy"
backend/local: Check dependency lock consistency before any operations In historical versions of Terraform the responsibility to check this was inside the terraform.NewContext function, along with various other assorted concerns that made that function particularly complicated. More recently, we reduced the responsibility of the "terraform" package only to instantiating particular named plugins, assuming that its caller is responsible for selecting appropriate versions of any providers that _are_ external. However, until this commit we were just assuming that "terraform init" had correctly selected appropriate plugins and recorded them in the lock file, and so nothing was dealing with the problem of ensuring that there haven't been any changes to the lock file or config since the most recent "terraform init" which would cause us to need to re-evaluate those decisions. Part of the game here is to slightly extend the role of the dependency locks object to also carry information about a subset of provider addresses whose lock entries we're intentionally disregarding as part of the various little edge-case features we have for overridding providers: dev_overrides, "unmanaged providers", and the testing overrides in our own unit tests. This is an in-memory-only annotation, never included in the serialized plan files on disk. I had originally intended to create a new package to encapsulate all of this plugin-selection logic, including both the version constraint checking here and also the handling of the provider factory functions, but as an interim step I've just made version constraint consistency checks the responsibility of the backend/local package, which means that we'll always catch problems as part of preparing for local operations, while not imposing these additional checks on commands that _don't_ run local operations, such as "terraform apply" when in remote operations mode.
2021-09-29 20:31:43 -04:00
"github.com/hashicorp/terraform/internal/depsfile"
"github.com/hashicorp/terraform/internal/getproviders"
"github.com/hashicorp/terraform/internal/initwd"
_ "github.com/hashicorp/terraform/internal/logging"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/plans/planfile"
"github.com/hashicorp/terraform/internal/providers"
testing_provider "github.com/hashicorp/terraform/internal/providers/testing"
"github.com/hashicorp/terraform/internal/registry"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/states/statefile"
"github.com/hashicorp/terraform/internal/states/statemgr"
"github.com/hashicorp/terraform/internal/terminal"
"github.com/hashicorp/terraform/version"
2014-06-19 00:36:44 -04:00
)
// These are the directories for our test data and fixtures.
var (
fixtureDir = "./testdata"
testDataDir = "./testdata"
)
2014-07-12 00:03:56 -04:00
func init() {
2014-09-29 14:24:16 -04:00
test = true
// Initialize the backends
backendInit.Init(nil)
// Expand the data and fixture dirs on init because
// we change the working directory in some tests.
2014-07-12 00:03:56 -04:00
var err error
fixtureDir, err = filepath.Abs(fixtureDir)
if err != nil {
panic(err)
}
testDataDir, err = filepath.Abs(testDataDir)
if err != nil {
panic(err)
}
2014-07-12 00:03:56 -04:00
}
2014-06-19 00:36:44 -04:00
func TestMain(m *testing.M) {
// Make sure backend init is initialized, since our tests tend to assume it.
backendInit.Init(nil)
os.Exit(m.Run())
}
// tempWorkingDir constructs a workdir.Dir object referring to a newly-created
workdir: Start of a new package for working directory state management Thus far our various interactions with the bits of state we keep associated with a working directory have all been implemented directly inside the "command" package -- often in the huge command.Meta type -- and not managed collectively via a single component. There's too many little codepaths reading and writing from the working directory and data directory to refactor it all in one step, but this is an attempt at a first step towards a future where everything that reads and writes from the current working directory would do so via an object that encapsulates the implementation details and offers a high-level API to read and write all of these session-persistent settings. The design here continues our gradual path towards using a dependency injection style where "package main" is solely responsible for directly interacting with the OS command line, the OS environment, the OS working directory, the stdio streams, and the CLI configuration, and then communicating the resulting information to the rest of Terraform by wiring together objects. It seems likely that eventually we'll have enough wiring code in package main to justify a more explicit organization of that code, but for this commit the new "workdir.Dir" object is just wired directly in place of its predecessors, without any significant change of code organization at that top layer. This first commit focuses on the main files and directories we use to find provider plugins, because a subsequent commit will lightly reorganize the separation of concerns for plugin launching with a similar goal of collecting all of the relevant logic together into one spot.
2021-09-01 20:01:44 -04:00
// tempWorkingDir constructs a workdir.Dir object referring to a newly-created
// temporary directory. The temporary directory is automatically removed when
// the test and all its subtests complete.
workdir: Start of a new package for working directory state management Thus far our various interactions with the bits of state we keep associated with a working directory have all been implemented directly inside the "command" package -- often in the huge command.Meta type -- and not managed collectively via a single component. There's too many little codepaths reading and writing from the working directory and data directory to refactor it all in one step, but this is an attempt at a first step towards a future where everything that reads and writes from the current working directory would do so via an object that encapsulates the implementation details and offers a high-level API to read and write all of these session-persistent settings. The design here continues our gradual path towards using a dependency injection style where "package main" is solely responsible for directly interacting with the OS command line, the OS environment, the OS working directory, the stdio streams, and the CLI configuration, and then communicating the resulting information to the rest of Terraform by wiring together objects. It seems likely that eventually we'll have enough wiring code in package main to justify a more explicit organization of that code, but for this commit the new "workdir.Dir" object is just wired directly in place of its predecessors, without any significant change of code organization at that top layer. This first commit focuses on the main files and directories we use to find provider plugins, because a subsequent commit will lightly reorganize the separation of concerns for plugin launching with a similar goal of collecting all of the relevant logic together into one spot.
2021-09-01 20:01:44 -04:00
//
// Although workdir.Dir is built to support arbitrary base directories, the
// not-yet-migrated behaviors in command.Meta tend to expect the root module
// directory to be the real process working directory, and so if you intend
// to use the result inside a command.Meta object you must use a pattern
// similar to the following when initializing your test:
//
// wd := tempWorkingDir(t)
// t.Chdir(wd.RootModuleDir())
workdir: Start of a new package for working directory state management Thus far our various interactions with the bits of state we keep associated with a working directory have all been implemented directly inside the "command" package -- often in the huge command.Meta type -- and not managed collectively via a single component. There's too many little codepaths reading and writing from the working directory and data directory to refactor it all in one step, but this is an attempt at a first step towards a future where everything that reads and writes from the current working directory would do so via an object that encapsulates the implementation details and offers a high-level API to read and write all of these session-persistent settings. The design here continues our gradual path towards using a dependency injection style where "package main" is solely responsible for directly interacting with the OS command line, the OS environment, the OS working directory, the stdio streams, and the CLI configuration, and then communicating the resulting information to the rest of Terraform by wiring together objects. It seems likely that eventually we'll have enough wiring code in package main to justify a more explicit organization of that code, but for this commit the new "workdir.Dir" object is just wired directly in place of its predecessors, without any significant change of code organization at that top layer. This first commit focuses on the main files and directories we use to find provider plugins, because a subsequent commit will lightly reorganize the separation of concerns for plugin launching with a similar goal of collecting all of the relevant logic together into one spot.
2021-09-01 20:01:44 -04:00
//
// Note that t.Chdir() modifies global state for the test process, and so a
// test using this pattern is incompatible with use of t.Parallel().
func tempWorkingDir(t *testing.T) *workdir.Dir {
workdir: Start of a new package for working directory state management Thus far our various interactions with the bits of state we keep associated with a working directory have all been implemented directly inside the "command" package -- often in the huge command.Meta type -- and not managed collectively via a single component. There's too many little codepaths reading and writing from the working directory and data directory to refactor it all in one step, but this is an attempt at a first step towards a future where everything that reads and writes from the current working directory would do so via an object that encapsulates the implementation details and offers a high-level API to read and write all of these session-persistent settings. The design here continues our gradual path towards using a dependency injection style where "package main" is solely responsible for directly interacting with the OS command line, the OS environment, the OS working directory, the stdio streams, and the CLI configuration, and then communicating the resulting information to the rest of Terraform by wiring together objects. It seems likely that eventually we'll have enough wiring code in package main to justify a more explicit organization of that code, but for this commit the new "workdir.Dir" object is just wired directly in place of its predecessors, without any significant change of code organization at that top layer. This first commit focuses on the main files and directories we use to find provider plugins, because a subsequent commit will lightly reorganize the separation of concerns for plugin launching with a similar goal of collecting all of the relevant logic together into one spot.
2021-09-01 20:01:44 -04:00
t.Helper()
dirPath := t.TempDir()
workdir: Start of a new package for working directory state management Thus far our various interactions with the bits of state we keep associated with a working directory have all been implemented directly inside the "command" package -- often in the huge command.Meta type -- and not managed collectively via a single component. There's too many little codepaths reading and writing from the working directory and data directory to refactor it all in one step, but this is an attempt at a first step towards a future where everything that reads and writes from the current working directory would do so via an object that encapsulates the implementation details and offers a high-level API to read and write all of these session-persistent settings. The design here continues our gradual path towards using a dependency injection style where "package main" is solely responsible for directly interacting with the OS command line, the OS environment, the OS working directory, the stdio streams, and the CLI configuration, and then communicating the resulting information to the rest of Terraform by wiring together objects. It seems likely that eventually we'll have enough wiring code in package main to justify a more explicit organization of that code, but for this commit the new "workdir.Dir" object is just wired directly in place of its predecessors, without any significant change of code organization at that top layer. This first commit focuses on the main files and directories we use to find provider plugins, because a subsequent commit will lightly reorganize the separation of concerns for plugin launching with a similar goal of collecting all of the relevant logic together into one spot.
2021-09-01 20:01:44 -04:00
t.Logf("temporary directory %s", dirPath)
return workdir.NewDir(dirPath)
workdir: Start of a new package for working directory state management Thus far our various interactions with the bits of state we keep associated with a working directory have all been implemented directly inside the "command" package -- often in the huge command.Meta type -- and not managed collectively via a single component. There's too many little codepaths reading and writing from the working directory and data directory to refactor it all in one step, but this is an attempt at a first step towards a future where everything that reads and writes from the current working directory would do so via an object that encapsulates the implementation details and offers a high-level API to read and write all of these session-persistent settings. The design here continues our gradual path towards using a dependency injection style where "package main" is solely responsible for directly interacting with the OS command line, the OS environment, the OS working directory, the stdio streams, and the CLI configuration, and then communicating the resulting information to the rest of Terraform by wiring together objects. It seems likely that eventually we'll have enough wiring code in package main to justify a more explicit organization of that code, but for this commit the new "workdir.Dir" object is just wired directly in place of its predecessors, without any significant change of code organization at that top layer. This first commit focuses on the main files and directories we use to find provider plugins, because a subsequent commit will lightly reorganize the separation of concerns for plugin launching with a similar goal of collecting all of the relevant logic together into one spot.
2021-09-01 20:01:44 -04:00
}
// tempWorkingDirFixture is like tempWorkingDir but it also copies the content
// from a fixture directory into the temporary directory before returning it.
//
// The same caveats about working directory apply as for testWorkingDir. See
// the testWorkingDir commentary for an example of how to use this function
// along with t.TempDir and t.Chdir from the testing library to meet the
// expectations of command.Meta legacy functionality.
func tempWorkingDirFixture(t *testing.T, fixtureName string) *workdir.Dir {
workdir: Start of a new package for working directory state management Thus far our various interactions with the bits of state we keep associated with a working directory have all been implemented directly inside the "command" package -- often in the huge command.Meta type -- and not managed collectively via a single component. There's too many little codepaths reading and writing from the working directory and data directory to refactor it all in one step, but this is an attempt at a first step towards a future where everything that reads and writes from the current working directory would do so via an object that encapsulates the implementation details and offers a high-level API to read and write all of these session-persistent settings. The design here continues our gradual path towards using a dependency injection style where "package main" is solely responsible for directly interacting with the OS command line, the OS environment, the OS working directory, the stdio streams, and the CLI configuration, and then communicating the resulting information to the rest of Terraform by wiring together objects. It seems likely that eventually we'll have enough wiring code in package main to justify a more explicit organization of that code, but for this commit the new "workdir.Dir" object is just wired directly in place of its predecessors, without any significant change of code organization at that top layer. This first commit focuses on the main files and directories we use to find provider plugins, because a subsequent commit will lightly reorganize the separation of concerns for plugin launching with a similar goal of collecting all of the relevant logic together into one spot.
2021-09-01 20:01:44 -04:00
t.Helper()
dirPath := testTempDir(t)
workdir: Start of a new package for working directory state management Thus far our various interactions with the bits of state we keep associated with a working directory have all been implemented directly inside the "command" package -- often in the huge command.Meta type -- and not managed collectively via a single component. There's too many little codepaths reading and writing from the working directory and data directory to refactor it all in one step, but this is an attempt at a first step towards a future where everything that reads and writes from the current working directory would do so via an object that encapsulates the implementation details and offers a high-level API to read and write all of these session-persistent settings. The design here continues our gradual path towards using a dependency injection style where "package main" is solely responsible for directly interacting with the OS command line, the OS environment, the OS working directory, the stdio streams, and the CLI configuration, and then communicating the resulting information to the rest of Terraform by wiring together objects. It seems likely that eventually we'll have enough wiring code in package main to justify a more explicit organization of that code, but for this commit the new "workdir.Dir" object is just wired directly in place of its predecessors, without any significant change of code organization at that top layer. This first commit focuses on the main files and directories we use to find provider plugins, because a subsequent commit will lightly reorganize the separation of concerns for plugin launching with a similar goal of collecting all of the relevant logic together into one spot.
2021-09-01 20:01:44 -04:00
t.Logf("temporary directory %s with fixture %q", dirPath, fixtureName)
fixturePath := testFixturePath(fixtureName)
testCopyDir(t, fixturePath, dirPath)
// NOTE: Unfortunately because testCopyDir immediately aborts the test
// on failure, a failure to copy will prevent us from cleaning up the
// temporary directory. Oh well. :(
return workdir.NewDir(dirPath)
workdir: Start of a new package for working directory state management Thus far our various interactions with the bits of state we keep associated with a working directory have all been implemented directly inside the "command" package -- often in the huge command.Meta type -- and not managed collectively via a single component. There's too many little codepaths reading and writing from the working directory and data directory to refactor it all in one step, but this is an attempt at a first step towards a future where everything that reads and writes from the current working directory would do so via an object that encapsulates the implementation details and offers a high-level API to read and write all of these session-persistent settings. The design here continues our gradual path towards using a dependency injection style where "package main" is solely responsible for directly interacting with the OS command line, the OS environment, the OS working directory, the stdio streams, and the CLI configuration, and then communicating the resulting information to the rest of Terraform by wiring together objects. It seems likely that eventually we'll have enough wiring code in package main to justify a more explicit organization of that code, but for this commit the new "workdir.Dir" object is just wired directly in place of its predecessors, without any significant change of code organization at that top layer. This first commit focuses on the main files and directories we use to find provider plugins, because a subsequent commit will lightly reorganize the separation of concerns for plugin launching with a similar goal of collecting all of the relevant logic together into one spot.
2021-09-01 20:01:44 -04:00
}
2014-06-19 00:36:44 -04:00
func testFixturePath(name string) string {
2014-07-11 23:38:03 -04:00
return filepath.Join(fixtureDir, name)
2014-06-19 00:36:44 -04:00
}
func metaOverridesForProvider(p providers.Interface) *testingOverrides {
return &testingOverrides{
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("test"): providers.FactoryFixed(p),
addrs.NewProvider(addrs.DefaultProviderRegistryHost, "hashicorp2", "test"): providers.FactoryFixed(p),
},
2014-06-19 00:36:44 -04:00
}
}
func testModuleWithSnapshot(t *testing.T, name string) (*configs.Config, *configload.Snapshot) {
t.Helper()
dir := filepath.Join(fixtureDir, name)
// FIXME: We're not dealing with the cleanup function here because
// this testModule function is used all over and so we don't want to
// change its interface at this late stage.
loader, _ := configload.NewLoaderForTests(t)
// Test modules usually do not refer to remote sources, and for local
// sources only this ultimately just records all of the module paths
// in a JSON file so that we can load them below.
inst := initwd.NewModuleInstaller(loader.ModulesDir(), loader, registry.NewClient(nil, nil))
_, instDiags := inst.InstallModules(context.Background(), dir, "tests", true, false, initwd.ModuleInstallHooksImpl{})
if instDiags.HasErrors() {
t.Fatal(instDiags.Err())
2014-09-24 18:48:46 -04:00
}
config, snap, diags := loader.LoadConfigWithSnapshot(dir)
if diags.HasErrors() {
t.Fatal(diags.Error())
2014-09-24 18:48:46 -04:00
}
return config, snap
2014-09-24 18:48:46 -04:00
}
// testPlan returns a non-nil noop plan.
func testPlan(t *testing.T) *plans.Plan {
t.Helper()
// This is what an empty configuration block would look like after being
// decoded with the schema of the "local" backend.
backendConfig := cty.ObjectVal(map[string]cty.Value{
"path": cty.NullVal(cty.String),
"workspace_dir": cty.NullVal(cty.String),
})
backendConfigRaw, err := plans.NewDynamicValue(backendConfig, backendConfig.Type())
if err != nil {
t.Fatal(err)
}
return &plans.Plan{
Backend: &plans.Backend{
// This is just a placeholder so that the plan file can be written
// out. Caller may wish to override it to something more "real"
// where the plan will actually be subsequently applied.
Type: "local",
Config: backendConfigRaw,
Workspace: "default",
},
2024-08-12 14:28:26 -04:00
Changes: plans.NewChangesSrc(),
terraform: Plans can be "complete" and "applyable" These ideas are both already implied by some logic elsewhere in the system, but until now we didn't have the decision logic centralized in a single place that could therefore evolve over time without necessarily always updating every caller together. We'll now have the modules runtime produce its own boolean ruling about each characteristic, which callers can rely on for the mechanical decision-making of whether to offer the user an "approve" prompt, and whether to remind the user after apply that it was an incomplete plan that will probably therefore need at least one more plan/apply round to converge. The "Applyable" flag directly replaces the previous method Plan.CanApply, with equivalent logic. Making this a field instead of a method means that we can freeze it as part of a saved plan, rather than recalculating it when we reload the plan, and we can export the field value in our export formats like JSON while ensuring it'll always be consistent with what Terraform is using internally. Callers can (and should) still use other context in the plan to return more tailored messages for specific situations they already know about that might be useful to users, but with these flags as a baseline callers can now just fall back to a generic presentation when encountering a situation they don't yet understand, rather than making the wrong decision and causing something strange to happen. That is: a lack of awareness of a new rule will now cause just a generic message in the UI, rather than incorrect behavior. This commit mostly just deals with populating the flags, and then all of the direct consequences of that on our various tests. Further changes to actually make use of these flags elsewhere in the system will follow in later commits, both in this repository and in other repositories.
2024-02-08 14:07:55 -05:00
// We'll default to the fake plan being both applyable and complete,
// since that's what most tests expect. Tests can override these
// back to false again afterwards if they need to.
Applyable: true,
Complete: true,
}
}
func testPlanFile(t *testing.T, configSnap *configload.Snapshot, state *states.State, plan *plans.Plan) string {
return testPlanFileMatchState(t, configSnap, state, plan, statemgr.SnapshotMeta{})
}
func testPlanFileMatchState(t *testing.T, configSnap *configload.Snapshot, state *states.State, plan *plans.Plan, stateMeta statemgr.SnapshotMeta) string {
t.Helper()
stateFile := &statefile.File{
Lineage: stateMeta.Lineage,
Serial: stateMeta.Serial,
State: state,
TerraformVersion: version.SemVer,
}
2021-05-04 18:59:58 -04:00
prevStateFile := &statefile.File{
Lineage: stateMeta.Lineage,
Serial: stateMeta.Serial,
2021-05-04 18:59:58 -04:00
State: state, // we just assume no changes detected during refresh
TerraformVersion: version.SemVer,
}
path := testTempFile(t)
err := planfile.Create(path, planfile.CreateArgs{
ConfigSnapshot: configSnap,
PreviousRunStateFile: prevStateFile,
StateFile: stateFile,
Plan: plan,
backend/local: Check dependency lock consistency before any operations In historical versions of Terraform the responsibility to check this was inside the terraform.NewContext function, along with various other assorted concerns that made that function particularly complicated. More recently, we reduced the responsibility of the "terraform" package only to instantiating particular named plugins, assuming that its caller is responsible for selecting appropriate versions of any providers that _are_ external. However, until this commit we were just assuming that "terraform init" had correctly selected appropriate plugins and recorded them in the lock file, and so nothing was dealing with the problem of ensuring that there haven't been any changes to the lock file or config since the most recent "terraform init" which would cause us to need to re-evaluate those decisions. Part of the game here is to slightly extend the role of the dependency locks object to also carry information about a subset of provider addresses whose lock entries we're intentionally disregarding as part of the various little edge-case features we have for overridding providers: dev_overrides, "unmanaged providers", and the testing overrides in our own unit tests. This is an in-memory-only annotation, never included in the serialized plan files on disk. I had originally intended to create a new package to encapsulate all of this plugin-selection logic, including both the version constraint checking here and also the handling of the provider factory functions, but as an interim step I've just made version constraint consistency checks the responsibility of the backend/local package, which means that we'll always catch problems as part of preparing for local operations, while not imposing these additional checks on commands that _don't_ run local operations, such as "terraform apply" when in remote operations mode.
2021-09-29 20:31:43 -04:00
DependencyLocks: depsfile.NewLocks(),
})
if err != nil {
t.Fatalf("failed to create temporary plan file: %s", err)
}
return path
}
// testPlanFileNoop is a shortcut function that creates a plan file that
// represents no changes and returns its path. This is useful when a test
// just needs any plan file, and it doesn't matter what is inside it.
func testPlanFileNoop(t *testing.T) string {
snap := &configload.Snapshot{
Modules: map[string]*configload.SnapshotModule{
"": {
Dir: ".",
Files: map[string][]byte{
"main.tf": nil,
},
},
},
}
state := states.NewState()
plan := testPlan(t)
return testPlanFile(t, snap, state, plan)
}
func testFileEquals(t *testing.T, got, want string) {
t.Helper()
actual, err := os.ReadFile(got)
if err != nil {
t.Fatalf("error reading %s", got)
}
expected, err := os.ReadFile(want)
if err != nil {
t.Fatalf("error reading %s", want)
}
if diff := cmp.Diff(string(actual), string(expected)); len(diff) > 0 {
t.Fatalf("got:\n%s\nwant:\n%s\ndiff:\n%s", actual, expected, diff)
}
}
func testReadPlan(t *testing.T, path string) *plans.Plan {
t.Helper()
f, err := planfile.Open(path)
if err != nil {
t.Fatalf("error opening plan file %q: %s", path, err)
}
defer f.Close()
p, err := f.ReadPlan()
if err != nil {
t.Fatalf("error reading plan from plan file %q: %s", path, err)
}
return p
}
2014-09-17 14:15:07 -04:00
// testState returns a test State structure that we use for a lot of tests.
func testState() *states.State {
return states.BuildState(func(s *states.SyncState) {
s.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_instance",
Name: "foo",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),
&states.ResourceInstanceObjectSrc{
// The weird whitespace here is reflective of how this would
// get written out in a real state file, due to the indentation
// of all of the containing wrapping objects and arrays.
AttrsJSON: []byte("{\n \"id\": \"bar\"\n }"),
Status: states.ObjectReady,
Dependencies: []addrs.ConfigResource{},
2014-09-17 14:15:07 -04:00
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: addrs.RootModule,
},
)
// DeepCopy is used here to ensure our synthetic state matches exactly
// with a state that will have been copied during the command
// operation, and all fields have been copied correctly.
}).DeepCopy()
}
func testStateWithIdentity() *states.State {
return states.BuildState(func(s *states.SyncState) {
s.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_instance",
Name: "foo",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),
&states.ResourceInstanceObjectSrc{
// The weird whitespace here is reflective of how this would
// get written out in a real state file, due to the indentation
// of all of the containing wrapping objects and arrays.
AttrsJSON: []byte("{\n \"id\": \"foo\"\n }"),
Status: states.ObjectReady,
Dependencies: []addrs.ConfigResource{},
IdentitySchemaVersion: 0,
IdentityJSON: []byte("{\n \"id\": \"my-foo-id\"\n }"),
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: addrs.RootModule,
},
)
s.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_instance",
Name: "bar",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),
&states.ResourceInstanceObjectSrc{
AttrsJSON: []byte("{\n \"id\": \"bar\"\n }"),
Status: states.ObjectReady,
Dependencies: []addrs.ConfigResource{},
IdentitySchemaVersion: 0,
IdentityJSON: []byte("{\n \"id\": \"my-bar-id\"\n }"),
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: addrs.RootModule,
},
)
// DeepCopy is used here to ensure our synthetic state matches exactly
// with a state that will have been copied during the command
// operation, and all fields have been copied correctly.
}).DeepCopy()
}
// writeStateForTesting is a helper that writes the given naked state to the
// given writer, generating a stub *statefile.File wrapper which is then
// immediately discarded.
func writeStateForTesting(state *states.State, w io.Writer) error {
sf := &statefile.File{
Serial: 0,
Lineage: "fake-for-testing",
State: state,
}
return statefile.Write(sf, w)
}
// testStateMgrCurrentLineage returns the current lineage for the given state
// manager, or the empty string if it does not use lineage. This is primarily
// for testing against the local backend, which always supports lineage.
func testStateMgrCurrentLineage(mgr statemgr.Persistent) string {
if pm, ok := mgr.(statemgr.PersistentMeta); ok {
m := pm.StateSnapshotMeta()
return m.Lineage
}
return ""
}
// markStateForMatching is a helper that writes a specific marker value to
// a state so that it can be recognized later with getStateMatchingMarker.
//
// Internally this just sets a root module output value called "testing_mark"
// to the given string value. If the state is being checked in other ways,
// the test code may need to compensate for the addition or overwriting of this
// special output value name.
//
// The given mark string is returned verbatim, to allow the following pattern
// in tests:
//
// mark := markStateForMatching(state, "foo")
// // (do stuff to the state)
// assertStateHasMarker(state, mark)
func markStateForMatching(state *states.State, mark string) string {
states: Only track root module output values For a very long time we've had an annoying discrepancy between the in-memory state model and our state snapshot format where the in-memory format stores output values for all modules whereas the snapshot format only tracks the root module output values because those are all we actually need to preserve between runs. That design wart was a result of us using the state both as an internal and an external artifact, due to having nowhere else to store the transient values of non-root module output values while Terraform Core does its work. We now have namedvals.State to internally track all of the throwaway results from named values that don't need to persist between runs, so now we'll use that for our internal work instead and reserve the states.State model only for the data that we will preserve between runs in state snapshots. The namedvals internal model isn't really designed to support enumerating all of the output values for a particular module call, but our expression evaluator currently depends on being able to do that and so we have a temporary inefficient implementation of that which just scans the entire table of values as a stopgap just to avoid this commit growing even larger than it already is. In a future commit we'll rework the evaluator to support the PartialEval mode and at the same time move the responsiblity for enumerating all of the output values into the evaluator itself, since it should be able to determine what it's expecting by analyzing the configuration rather than just by trusting that earlier evaluation has completed correctly. Because our legacy state string serialization previously included output values for all modules, some of our context tests were accidentally depending on the implementation detail of how those got stored internally. Those tests are updated here to test only the data that is a real part of Terraform Core's result, by ensuring that the relevant data appears somewhere either in a root output value or in a resource attribute.
2023-03-09 22:26:49 -05:00
state.SetOutputValue(
addrs.OutputValue{Name: "testing_mark"}.Absolute(addrs.RootModuleInstance),
cty.StringVal(mark), false,
)
return mark
}
// getStateMatchingMarker is used with markStateForMatching to retrieve the
// mark string previously added to the given state. If no such mark is present,
// the result is an empty string.
func getStateMatchingMarker(state *states.State) string {
states: Only track root module output values For a very long time we've had an annoying discrepancy between the in-memory state model and our state snapshot format where the in-memory format stores output values for all modules whereas the snapshot format only tracks the root module output values because those are all we actually need to preserve between runs. That design wart was a result of us using the state both as an internal and an external artifact, due to having nowhere else to store the transient values of non-root module output values while Terraform Core does its work. We now have namedvals.State to internally track all of the throwaway results from named values that don't need to persist between runs, so now we'll use that for our internal work instead and reserve the states.State model only for the data that we will preserve between runs in state snapshots. The namedvals internal model isn't really designed to support enumerating all of the output values for a particular module call, but our expression evaluator currently depends on being able to do that and so we have a temporary inefficient implementation of that which just scans the entire table of values as a stopgap just to avoid this commit growing even larger than it already is. In a future commit we'll rework the evaluator to support the PartialEval mode and at the same time move the responsiblity for enumerating all of the output values into the evaluator itself, since it should be able to determine what it's expecting by analyzing the configuration rather than just by trusting that earlier evaluation has completed correctly. Because our legacy state string serialization previously included output values for all modules, some of our context tests were accidentally depending on the implementation detail of how those got stored internally. Those tests are updated here to test only the data that is a real part of Terraform Core's result, by ensuring that the relevant data appears somewhere either in a root output value or in a resource attribute.
2023-03-09 22:26:49 -05:00
os := state.RootOutputValues["testing_mark"]
if os == nil {
return ""
}
v := os.Value
if v.Type() == cty.String && v.IsKnown() && !v.IsNull() {
return v.AsString()
}
return ""
}
// stateHasMarker is a helper around getStateMatchingMarker that also includes
// the equality test, for more convenient use in test assertion branches.
func stateHasMarker(state *states.State, want string) bool {
return getStateMatchingMarker(state) == want
}
// assertStateHasMarker wraps stateHasMarker to automatically generate a
// fatal test result (i.e. t.Fatal) if the marker doesn't match.
func assertStateHasMarker(t *testing.T, state *states.State, want string) {
if !stateHasMarker(state, want) {
t.Fatalf("wrong state marker\ngot: %q\nwant: %q", getStateMatchingMarker(state), want)
2014-09-17 14:15:07 -04:00
}
}
func testStateFile(t *testing.T, s *states.State) string {
t.Helper()
path := testTempFile(t)
f, err := os.Create(path)
if err != nil {
t.Fatalf("failed to create temporary state file %s: %s", path, err)
}
defer f.Close()
err = writeStateForTesting(s, f)
if err != nil {
t.Fatalf("failed to write state to temporary file %s: %s", path, err)
}
return path
}
2015-02-26 13:29:23 -05:00
// testStateFileDefault writes the state out to the default statefile
// in the cwd.
//
// Before calling this, use:
//
// tmp := t.TempDir()
// t.Chdir(tmp)
//
// to change into a temp working directory
2020-10-05 08:33:49 -04:00
func testStateFileDefault(t *testing.T, s *states.State) {
t.Helper()
2015-02-26 13:29:23 -05:00
f, err := os.Create(DefaultStateFilename)
if err != nil {
t.Fatalf("err: %s", err)
}
defer f.Close()
2020-10-05 08:33:49 -04:00
if err := writeStateForTesting(s, f); err != nil {
2015-02-26 13:29:23 -05:00
t.Fatalf("err: %s", err)
}
}
// testStateFileWorkspaceDefault writes the state out to the default statefile
// for the given workspace in the cwd.
//
// Before calling this, use:
//
// tmp := t.TempDir()
// t.Chdir(tmp)
//
// to change into a temp working directory
func testStateFileWorkspaceDefault(t *testing.T, workspace string, s *states.State) string {
t.Helper()
workspaceDir := filepath.Join(backendLocal.DefaultWorkspaceDir, workspace)
err := os.MkdirAll(workspaceDir, os.ModePerm)
if err != nil {
t.Fatalf("err: %s", err)
}
path := filepath.Join(workspaceDir, DefaultStateFilename)
f, err := os.Create(path)
if err != nil {
t.Fatalf("err: %s", err)
}
defer f.Close()
if err := writeStateForTesting(s, f); err != nil {
t.Fatalf("err: %s", err)
}
return path
}
2015-03-05 17:55:15 -05:00
// testStateFileRemote writes the state out to the remote statefile
// in the cwd.
//
// Before calling this, use:
//
// tmp := t.TempDir()
// t.Chdir(tmp)
//
// to change into a temp working directory
command/workdir: Model the "backend state" file format Historically (before there were "backends") Terraform had a single state file format used both for real state snapshots and for tracking where remote state was stored. Terraform v0.12 caused these two to be split because we adopted state snapshot version 4 for real snapshots but retained a subset of version 3 for tracking the remote backend configuration in the local working directory. Unfortunately we previously kept that working by retaining a snapshot of the entire Terraform v0.11 "terraform" package as legacy/terraform, which happened to still be around because we also needed to retain a copy of the entire legacy SDK to keep the remote state backends working. This now hoists just the tiny slice of legacy terraform package functionality needed to implement the "backend state" file format into package workdir. This package is a good home for it because it's part of the working directory state. Ideally it would be accessed through methods of the workdir.Dir type, but that's too disruptive a refactor to combine into this and so that'll need to wait for another day; for now we'll keep the existing callers doing their access through our "clistate" package that is itself a forked snapshot of what statemgr.Filesystem used to be in Terraform v0.11. This removes all but one of the uses of "legacy/terraform" aside from calls in the other packages under "legacy". We'll clean up the last one in a later commit, because it's not related to the backend state file format.
2024-03-07 14:58:46 -05:00
func testStateFileRemote(t *testing.T, s *workdir.BackendStateFile) string {
t.Helper()
2015-03-05 17:55:15 -05:00
path := filepath.Join(DefaultDataDir, DefaultStateFilename)
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
t.Fatalf("err: %s", err)
}
command/workdir: Model the "backend state" file format Historically (before there were "backends") Terraform had a single state file format used both for real state snapshots and for tracking where remote state was stored. Terraform v0.12 caused these two to be split because we adopted state snapshot version 4 for real snapshots but retained a subset of version 3 for tracking the remote backend configuration in the local working directory. Unfortunately we previously kept that working by retaining a snapshot of the entire Terraform v0.11 "terraform" package as legacy/terraform, which happened to still be around because we also needed to retain a copy of the entire legacy SDK to keep the remote state backends working. This now hoists just the tiny slice of legacy terraform package functionality needed to implement the "backend state" file format into package workdir. This package is a good home for it because it's part of the working directory state. Ideally it would be accessed through methods of the workdir.Dir type, but that's too disruptive a refactor to combine into this and so that'll need to wait for another day; for now we'll keep the existing callers doing their access through our "clistate" package that is itself a forked snapshot of what statemgr.Filesystem used to be in Terraform v0.11. This removes all but one of the uses of "legacy/terraform" aside from calls in the other packages under "legacy". We'll clean up the last one in a later commit, because it's not related to the backend state file format.
2024-03-07 14:58:46 -05:00
raw, err := workdir.EncodeBackendStateFile(s)
2015-03-05 17:55:15 -05:00
if err != nil {
command/workdir: Model the "backend state" file format Historically (before there were "backends") Terraform had a single state file format used both for real state snapshots and for tracking where remote state was stored. Terraform v0.12 caused these two to be split because we adopted state snapshot version 4 for real snapshots but retained a subset of version 3 for tracking the remote backend configuration in the local working directory. Unfortunately we previously kept that working by retaining a snapshot of the entire Terraform v0.11 "terraform" package as legacy/terraform, which happened to still be around because we also needed to retain a copy of the entire legacy SDK to keep the remote state backends working. This now hoists just the tiny slice of legacy terraform package functionality needed to implement the "backend state" file format into package workdir. This package is a good home for it because it's part of the working directory state. Ideally it would be accessed through methods of the workdir.Dir type, but that's too disruptive a refactor to combine into this and so that'll need to wait for another day; for now we'll keep the existing callers doing their access through our "clistate" package that is itself a forked snapshot of what statemgr.Filesystem used to be in Terraform v0.11. This removes all but one of the uses of "legacy/terraform" aside from calls in the other packages under "legacy". We'll clean up the last one in a later commit, because it's not related to the backend state file format.
2024-03-07 14:58:46 -05:00
t.Fatalf("encoding backend state file: %s", err)
2015-03-05 17:55:15 -05:00
}
command/workdir: Model the "backend state" file format Historically (before there were "backends") Terraform had a single state file format used both for real state snapshots and for tracking where remote state was stored. Terraform v0.12 caused these two to be split because we adopted state snapshot version 4 for real snapshots but retained a subset of version 3 for tracking the remote backend configuration in the local working directory. Unfortunately we previously kept that working by retaining a snapshot of the entire Terraform v0.11 "terraform" package as legacy/terraform, which happened to still be around because we also needed to retain a copy of the entire legacy SDK to keep the remote state backends working. This now hoists just the tiny slice of legacy terraform package functionality needed to implement the "backend state" file format into package workdir. This package is a good home for it because it's part of the working directory state. Ideally it would be accessed through methods of the workdir.Dir type, but that's too disruptive a refactor to combine into this and so that'll need to wait for another day; for now we'll keep the existing callers doing their access through our "clistate" package that is itself a forked snapshot of what statemgr.Filesystem used to be in Terraform v0.11. This removes all but one of the uses of "legacy/terraform" aside from calls in the other packages under "legacy". We'll clean up the last one in a later commit, because it's not related to the backend state file format.
2024-03-07 14:58:46 -05:00
if err := os.WriteFile(path, raw, os.ModePerm); err != nil {
t.Fatalf("writing backend state file: %s", err)
2015-03-05 17:55:15 -05:00
}
return path
}
2017-01-18 23:50:45 -05:00
// testStateRead reads the state from a file
func testStateRead(t *testing.T, path string) *states.State {
t.Helper()
2015-02-26 13:29:23 -05:00
f, err := os.Open(path)
if err != nil {
t.Fatalf("err: %s", err)
}
defer f.Close()
2015-02-26 13:29:23 -05:00
sf, err := statefile.Read(f)
2015-02-26 13:29:23 -05:00
if err != nil {
t.Fatalf("err: %s", err)
}
return sf.State
}
command/workdir: Model the "backend state" file format Historically (before there were "backends") Terraform had a single state file format used both for real state snapshots and for tracking where remote state was stored. Terraform v0.12 caused these two to be split because we adopted state snapshot version 4 for real snapshots but retained a subset of version 3 for tracking the remote backend configuration in the local working directory. Unfortunately we previously kept that working by retaining a snapshot of the entire Terraform v0.11 "terraform" package as legacy/terraform, which happened to still be around because we also needed to retain a copy of the entire legacy SDK to keep the remote state backends working. This now hoists just the tiny slice of legacy terraform package functionality needed to implement the "backend state" file format into package workdir. This package is a good home for it because it's part of the working directory state. Ideally it would be accessed through methods of the workdir.Dir type, but that's too disruptive a refactor to combine into this and so that'll need to wait for another day; for now we'll keep the existing callers doing their access through our "clistate" package that is itself a forked snapshot of what statemgr.Filesystem used to be in Terraform v0.11. This removes all but one of the uses of "legacy/terraform" aside from calls in the other packages under "legacy". We'll clean up the last one in a later commit, because it's not related to the backend state file format.
2024-03-07 14:58:46 -05:00
// testDataStateRead reads a backend state, which is a file format resembling
// our state format v3 that is used only to track current backend settings.
command/workdir: Model the "backend state" file format Historically (before there were "backends") Terraform had a single state file format used both for real state snapshots and for tracking where remote state was stored. Terraform v0.12 caused these two to be split because we adopted state snapshot version 4 for real snapshots but retained a subset of version 3 for tracking the remote backend configuration in the local working directory. Unfortunately we previously kept that working by retaining a snapshot of the entire Terraform v0.11 "terraform" package as legacy/terraform, which happened to still be around because we also needed to retain a copy of the entire legacy SDK to keep the remote state backends working. This now hoists just the tiny slice of legacy terraform package functionality needed to implement the "backend state" file format into package workdir. This package is a good home for it because it's part of the working directory state. Ideally it would be accessed through methods of the workdir.Dir type, but that's too disruptive a refactor to combine into this and so that'll need to wait for another day; for now we'll keep the existing callers doing their access through our "clistate" package that is itself a forked snapshot of what statemgr.Filesystem used to be in Terraform v0.11. This removes all but one of the uses of "legacy/terraform" aside from calls in the other packages under "legacy". We'll clean up the last one in a later commit, because it's not related to the backend state file format.
2024-03-07 14:58:46 -05:00
func testDataStateRead(t *testing.T, path string) *workdir.BackendStateFile {
t.Helper()
f, err := os.Open(path)
if err != nil {
t.Fatalf("err: %s", err)
}
defer f.Close()
command/workdir: Model the "backend state" file format Historically (before there were "backends") Terraform had a single state file format used both for real state snapshots and for tracking where remote state was stored. Terraform v0.12 caused these two to be split because we adopted state snapshot version 4 for real snapshots but retained a subset of version 3 for tracking the remote backend configuration in the local working directory. Unfortunately we previously kept that working by retaining a snapshot of the entire Terraform v0.11 "terraform" package as legacy/terraform, which happened to still be around because we also needed to retain a copy of the entire legacy SDK to keep the remote state backends working. This now hoists just the tiny slice of legacy terraform package functionality needed to implement the "backend state" file format into package workdir. This package is a good home for it because it's part of the working directory state. Ideally it would be accessed through methods of the workdir.Dir type, but that's too disruptive a refactor to combine into this and so that'll need to wait for another day; for now we'll keep the existing callers doing their access through our "clistate" package that is itself a forked snapshot of what statemgr.Filesystem used to be in Terraform v0.11. This removes all but one of the uses of "legacy/terraform" aside from calls in the other packages under "legacy". We'll clean up the last one in a later commit, because it's not related to the backend state file format.
2024-03-07 14:58:46 -05:00
raw, err := io.ReadAll(f)
if err != nil {
t.Fatalf("err: %s", err)
}
s, err := workdir.ParseBackendStateFile(raw)
if err != nil {
t.Fatalf("err: %s", err)
}
return s
2017-01-18 23:50:45 -05:00
}
// testStateOutput tests that the state at the given path contains
// the expected state string.
func testStateOutput(t *testing.T, path string, expected string) {
t.Helper()
2017-01-18 23:50:45 -05:00
newState := testStateRead(t, path)
2015-02-26 13:29:23 -05:00
actual := strings.TrimSpace(newState.String())
expected = strings.TrimSpace(expected)
if actual != expected {
t.Fatalf("expected:\n%s\nactual:\n%s", expected, actual)
2015-02-26 13:29:23 -05:00
}
}
func testProvider() *testing_provider.MockProvider {
p := new(testing_provider.MockProvider)
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
resp.PlannedState = req.ProposedNewState
return resp
}
p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse {
return providers.ReadResourceResponse{
NewState: req.PriorState,
}
2014-06-19 00:36:44 -04:00
}
return p
}
func testTempFile(t *testing.T) string {
t.Helper()
return filepath.Join(testTempDir(t), "state.tfstate")
}
2024-10-23 06:37:04 -04:00
func testVarsFile(t *testing.T) string {
t.Helper()
return filepath.Join(testTempDir(t), "variables.tfvars")
}
func testTempDir(t *testing.T) string {
t.Helper()
d, err := filepath.EvalSymlinks(t.TempDir())
if err != nil {
t.Fatal(err)
}
return d
}
2014-10-08 15:08:35 -04:00
// testStdinPipe changes os.Stdin to be a pipe that sends the data from
// the reader before closing the pipe.
//
// The returned function should be deferred to properly clean up and restore
// the original stdin.
func testStdinPipe(t *testing.T, src io.Reader) func() {
t.Helper()
r, w, err := os.Pipe()
if err != nil {
t.Fatalf("err: %s", err)
}
// Modify stdin to point to our new pipe
old := os.Stdin
os.Stdin = r
// Copy the data from the reader to the pipe
go func() {
defer w.Close()
PSS: Implement initialisation of new working directory (or use of `-reconfigure` flag) while using `state_store` (#37732) * Minor fixes in diagnostics This can only be done once modules have been parsed and the required providers data is available. There are multiple places where config is parsed, into either Config or Module structs, so this needs to be implemented in multiple places. * Rename test to make it specific to use of backend block in config * Update initBackend to accept whole initArgs collection * Only process --backend-config data, when setting up a `backend`, if that data isn't empty * Simplify how mock provider factories are made in tests * Update mock provider's default logic to track and manage existing workspaces * Add `ProviderSchema` method to `Pluggable` structs. This allows calling code to access the provider schema when using provider configuration data. * Add function for converting a providerreqs.Version to a hashicorp/go-version Version. This is needed for using locks when creating the backend state file. * Implement initial version of init new working directories using `stateStore_C_s`. Default to creating the default workspace if no workspaces exist. * Update test fixtures to match the hashicorp/test mock provider used in PSS tests * Allow tests to obtain locks that include `testingOverrides` providers. The `testingOverrides` field will only be set in tests, so this should not impact end users. * Add tests showing TF can initialize a working directory for the first time (and do the same when forced by -reconfigure flag). Remove replaced tests. * Add -create-default-workspace flag, to be used to disable creating the default workspace by default when -input=false (i.e for use in CI). Refactor creation of default workspace logic. Add tests. * Allow reattached providers to be used during init for PSS * Rename variable to `backendHash` so relation to `backend` is clearer * Allow `(m *Meta) Backend` to return warning diagnostics * Protect against nil testingOverrides in providerFactoriesFromLocks * Add test case seeing what happens if default workspace selected, doesn't exist, but other workspaces do exist. The consequences here are due to using `selectWorkspace` in `stateStore_C_s`, matching what's done in `backend_C_r_s`. * Address code consistency check failure on PR * Refactor use of mock in test that's experiencing EOF error... * Remove test that requires test to supply input for user prompt This test passes when run in isolation but fails when run alongside other tests, even when skipping all other tests using `testStdinPipe`. I don't think the value of this test is great enough to start changing how we test stdin input. * Allow -create-default-workspace to be used regardless of whether input is enabled or disabled * Add TF_SKIP_CREATE_DEFAULT_WORKSPACE environment variable * Responses to feedback, including making testStdinPipe helper log details of errors copying data to stdin. Note: We cannot call t.Fatal from a non-test goroutine. * Use Errorf instead * Allow backend state files to not include version data when a builtin or reattached provider is in use. * Add clarifying comment about re-attached providers when finding the matching entry in required_providers * Report that the default workspace was created to the view * Refactor: use error comparison via `errors.Is` to identify when no workspaces exist. * Move handling of TF_ENABLE_PLUGGABLE_STATE_STORAGE into init's ParseInit func. * Validate that PSS-related flags can only be used when experiments are enabled, enforce coupling of PSS-related flags when in use. * Slight rewording of output message about default workspace * Update test to assert new output about default workspace
2025-10-15 05:44:21 -04:00
_, err := io.Copy(w, src)
if err != nil {
t.Errorf("error when copying data from testStdinPipe reader argument to stdin: %s", err)
}
}()
return func() {
// Close our read end
r.Close()
// Reset stdin
os.Stdin = old
}
}
// Modify os.Stdout to write to the given buffer. Note that this is generally
// not useful since the commands are configured to write to a cli.Ui, not
// Stdout directly. Commands like `console` though use the raw stdout.
func testStdoutCapture(t *testing.T, dst io.Writer) func() {
t.Helper()
r, w, err := os.Pipe()
if err != nil {
t.Fatalf("err: %s", err)
}
// Modify stdout
old := os.Stdout
os.Stdout = w
// Copy
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
defer r.Close()
io.Copy(dst, r)
}()
return func() {
// Close the writer end of the pipe
w.Sync()
w.Close()
// Reset stdout
os.Stdout = old
// Wait for the data copy to complete to avoid a race reading data
<-doneCh
}
}
2017-01-18 23:50:45 -05:00
// testInteractiveInput configures tests so that the answers given are sent
// in order to interactive prompts. The returned function must be called
// in a defer to clean up.
func testInteractiveInput(t *testing.T, answers []string) func() {
t.Helper()
2017-01-18 23:50:45 -05:00
// Disable test mode so input is called
test = false
// Set up reader/writers
2017-01-18 23:50:45 -05:00
testInputResponse = answers
defaultInputReader = bytes.NewBufferString("")
defaultInputWriter = new(bytes.Buffer)
// Return the cleanup
return func() {
test = true
testInputResponse = nil
}
}
// testInputMap configures tests so that the given answers are returned
// for calls to Input when the right question is asked. The key is the
// question "Id" that is used.
func testInputMap(t *testing.T, answers map[string]string) func() {
t.Helper()
// Disable test mode so input is called
test = false
// Set up reader/writers
defaultInputReader = bytes.NewBufferString("")
defaultInputWriter = new(bytes.Buffer)
// Setup answers
testInputResponse = nil
testInputResponseMap = answers
// Return the cleanup
return func() {
var unusedAnswers = testInputResponseMap
// First, clean up!
test = true
testInputResponseMap = nil
if len(unusedAnswers) > 0 {
t.Fatalf("expected no unused answers provided to command.testInputMap, got: %v", unusedAnswers)
}
}
}
2017-01-18 23:50:45 -05:00
// testBackendState is used to make a test HTTP server to test a configured
// backend. This returns the complete state that can be saved. Use
// `testStateFileRemote` to write the returned state.
//
// When using this function, the configuration fixture for the test must
// include an empty configuration block for the HTTP backend, like this:
//
// terraform {
// backend "http" {
// }
// }
//
// If such a block isn't present, or if it isn't empty, then an error will
// be returned about the backend configuration having changed and that
// "terraform init" must be run, since the test backend config cache created
// by this function contains the hash for an empty configuration.
command/workdir: Model the "backend state" file format Historically (before there were "backends") Terraform had a single state file format used both for real state snapshots and for tracking where remote state was stored. Terraform v0.12 caused these two to be split because we adopted state snapshot version 4 for real snapshots but retained a subset of version 3 for tracking the remote backend configuration in the local working directory. Unfortunately we previously kept that working by retaining a snapshot of the entire Terraform v0.11 "terraform" package as legacy/terraform, which happened to still be around because we also needed to retain a copy of the entire legacy SDK to keep the remote state backends working. This now hoists just the tiny slice of legacy terraform package functionality needed to implement the "backend state" file format into package workdir. This package is a good home for it because it's part of the working directory state. Ideally it would be accessed through methods of the workdir.Dir type, but that's too disruptive a refactor to combine into this and so that'll need to wait for another day; for now we'll keep the existing callers doing their access through our "clistate" package that is itself a forked snapshot of what statemgr.Filesystem used to be in Terraform v0.11. This removes all but one of the uses of "legacy/terraform" aside from calls in the other packages under "legacy". We'll clean up the last one in a later commit, because it's not related to the backend state file format.
2024-03-07 14:58:46 -05:00
func testBackendState(t *testing.T, s *states.State, c int) (*workdir.BackendStateFile, *httptest.Server) {
t.Helper()
2017-01-18 23:50:45 -05:00
var b64md5 string
buf := bytes.NewBuffer(nil)
cb := func(resp http.ResponseWriter, req *http.Request) {
if req.Method == "PUT" {
resp.WriteHeader(c)
return
}
if s == nil {
resp.WriteHeader(404)
return
}
resp.Header().Set("Content-MD5", b64md5)
resp.Write(buf.Bytes())
}
// If a state was given, make sure we calculate the proper b64md5
if s != nil {
err := statefile.Write(&statefile.File{State: s}, buf)
if err != nil {
2017-01-18 23:50:45 -05:00
t.Fatalf("err: %v", err)
}
md5 := md5.Sum(buf.Bytes())
b64md5 = base64.StdEncoding.EncodeToString(md5[:16])
}
srv := httptest.NewServer(http.HandlerFunc(cb))
backendConfig := &configs.Backend{
Type: "http",
Config: configs.SynthBody("<testBackendState>", map[string]cty.Value{}),
}
b := backendInit.Backend("http")()
configSchema := b.ConfigSchema()
hash := backendConfig.Hash(configSchema)
command/workdir: Model the "backend state" file format Historically (before there were "backends") Terraform had a single state file format used both for real state snapshots and for tracking where remote state was stored. Terraform v0.12 caused these two to be split because we adopted state snapshot version 4 for real snapshots but retained a subset of version 3 for tracking the remote backend configuration in the local working directory. Unfortunately we previously kept that working by retaining a snapshot of the entire Terraform v0.11 "terraform" package as legacy/terraform, which happened to still be around because we also needed to retain a copy of the entire legacy SDK to keep the remote state backends working. This now hoists just the tiny slice of legacy terraform package functionality needed to implement the "backend state" file format into package workdir. This package is a good home for it because it's part of the working directory state. Ideally it would be accessed through methods of the workdir.Dir type, but that's too disruptive a refactor to combine into this and so that'll need to wait for another day; for now we'll keep the existing callers doing their access through our "clistate" package that is itself a forked snapshot of what statemgr.Filesystem used to be in Terraform v0.11. This removes all but one of the uses of "legacy/terraform" aside from calls in the other packages under "legacy". We'll clean up the last one in a later commit, because it's not related to the backend state file format.
2024-03-07 14:58:46 -05:00
state := workdir.NewBackendStateFile()
Update backend state file so it can describe PSS state (#37179) * Split code for backend state file vs backend state * Rename BackendState to BackendConfigState * Spelling error * Add `StateStorageConfigState` struct as new implementation of new `ConfigState[T any]` interface. * Split tests for backend state file vs backend config state structs * Rename StateStorageConfigState to StateStoreConfigState * Clarify test name, add comments * Add tests for StateStoreConfigState methods * Add test showing state_store in JSON is parsed correctly * Add detection of malformed backend state files that contain both backend and state_store fields * Add validation that stops a backend state file being written if it will contain state for both backend and state_store blocks * Rename `state_storage` to `state_store` * Rename `state_storage` to `state_store` in filenames * Move`ConfigState` to its own file * Fix test name, remove whitespace * Update `StateStoreConfigState` comment using review suggestion * Update error message to no longer allude to the environment TF is being run in * Update the state_store state to use `version.Version` and an adapted version of `tfaddr.Provider` for marshalling version and source data * Update test helper so it doesn't accidentally supply validation in tests * Add protection against saving an empty backend state file * Remove direct testing of (s *Source) MarshalText() and UnmarshalText() methods * Add Validate method to StateStoreConfigState, use in backend state encoding logic * Refactor to use new features in registry dependency
2025-06-11 10:10:26 -04:00
state.Backend = &workdir.BackendConfigState{
Type: "http",
ConfigRaw: json.RawMessage(fmt.Sprintf(`{"address":%q}`, srv.URL)),
2018-12-18 19:06:49 -05:00
Hash: uint64(hash),
2017-01-18 23:50:45 -05:00
}
return state, srv
}
// testRemoteState is used to make a test HTTP server to return a given
// state file that can be used for testing legacy remote state.
//
command/workdir: Model the "backend state" file format Historically (before there were "backends") Terraform had a single state file format used both for real state snapshots and for tracking where remote state was stored. Terraform v0.12 caused these two to be split because we adopted state snapshot version 4 for real snapshots but retained a subset of version 3 for tracking the remote backend configuration in the local working directory. Unfortunately we previously kept that working by retaining a snapshot of the entire Terraform v0.11 "terraform" package as legacy/terraform, which happened to still be around because we also needed to retain a copy of the entire legacy SDK to keep the remote state backends working. This now hoists just the tiny slice of legacy terraform package functionality needed to implement the "backend state" file format into package workdir. This package is a good home for it because it's part of the working directory state. Ideally it would be accessed through methods of the workdir.Dir type, but that's too disruptive a refactor to combine into this and so that'll need to wait for another day; for now we'll keep the existing callers doing their access through our "clistate" package that is itself a forked snapshot of what statemgr.Filesystem used to be in Terraform v0.11. This removes all but one of the uses of "legacy/terraform" aside from calls in the other packages under "legacy". We'll clean up the last one in a later commit, because it's not related to the backend state file format.
2024-03-07 14:58:46 -05:00
// The return values are a [workdir.BackendStateFile] instance that should be
// written as the backend state and the server that the returned data state
// refers to.
func testRemoteState(t *testing.T, s *states.State, c int) (*workdir.BackendStateFile, *httptest.Server) {
t.Helper()
2017-01-18 23:50:45 -05:00
var b64md5 string
buf := bytes.NewBuffer(nil)
cb := func(resp http.ResponseWriter, req *http.Request) {
if req.Method == "PUT" {
resp.WriteHeader(c)
return
}
if s == nil {
resp.WriteHeader(404)
return
}
resp.Header().Set("Content-MD5", b64md5)
resp.Write(buf.Bytes())
}
command/workdir: Model the "backend state" file format Historically (before there were "backends") Terraform had a single state file format used both for real state snapshots and for tracking where remote state was stored. Terraform v0.12 caused these two to be split because we adopted state snapshot version 4 for real snapshots but retained a subset of version 3 for tracking the remote backend configuration in the local working directory. Unfortunately we previously kept that working by retaining a snapshot of the entire Terraform v0.11 "terraform" package as legacy/terraform, which happened to still be around because we also needed to retain a copy of the entire legacy SDK to keep the remote state backends working. This now hoists just the tiny slice of legacy terraform package functionality needed to implement the "backend state" file format into package workdir. This package is a good home for it because it's part of the working directory state. Ideally it would be accessed through methods of the workdir.Dir type, but that's too disruptive a refactor to combine into this and so that'll need to wait for another day; for now we'll keep the existing callers doing their access through our "clistate" package that is itself a forked snapshot of what statemgr.Filesystem used to be in Terraform v0.11. This removes all but one of the uses of "legacy/terraform" aside from calls in the other packages under "legacy". We'll clean up the last one in a later commit, because it's not related to the backend state file format.
2024-03-07 14:58:46 -05:00
retState := workdir.NewBackendStateFile()
2017-01-18 23:50:45 -05:00
srv := httptest.NewServer(http.HandlerFunc(cb))
Update backend state file so it can describe PSS state (#37179) * Split code for backend state file vs backend state * Rename BackendState to BackendConfigState * Spelling error * Add `StateStorageConfigState` struct as new implementation of new `ConfigState[T any]` interface. * Split tests for backend state file vs backend config state structs * Rename StateStorageConfigState to StateStoreConfigState * Clarify test name, add comments * Add tests for StateStoreConfigState methods * Add test showing state_store in JSON is parsed correctly * Add detection of malformed backend state files that contain both backend and state_store fields * Add validation that stops a backend state file being written if it will contain state for both backend and state_store blocks * Rename `state_storage` to `state_store` * Rename `state_storage` to `state_store` in filenames * Move`ConfigState` to its own file * Fix test name, remove whitespace * Update `StateStoreConfigState` comment using review suggestion * Update error message to no longer allude to the environment TF is being run in * Update the state_store state to use `version.Version` and an adapted version of `tfaddr.Provider` for marshalling version and source data * Update test helper so it doesn't accidentally supply validation in tests * Add protection against saving an empty backend state file * Remove direct testing of (s *Source) MarshalText() and UnmarshalText() methods * Add Validate method to StateStoreConfigState, use in backend state encoding logic * Refactor to use new features in registry dependency
2025-06-11 10:10:26 -04:00
b := &workdir.BackendConfigState{
Type: "http",
}
b.SetConfig(cty.ObjectVal(map[string]cty.Value{
"address": cty.StringVal(srv.URL),
}), &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"address": {
Type: cty.String,
Required: true,
},
},
})
retState.Backend = b
2017-01-18 23:50:45 -05:00
if s != nil {
err := statefile.Write(&statefile.File{State: s}, buf)
if err != nil {
t.Fatalf("failed to write initial state: %v", err)
2017-01-18 23:50:45 -05:00
}
}
return retState, srv
2017-01-18 23:50:45 -05:00
}
// testlockState calls a separate process to the lock the state file at path.
// deferFunc should be called in the caller to properly unlock the file.
// Since many tests change the working directory, the sourcedir argument must be
// supplied to locate the statelocker.go source.
func testLockState(t *testing.T, sourceDir, path string) (func(), error) {
// build and run the binary ourselves so we can quickly terminate it for cleanup
buildDir := t.TempDir()
source := filepath.Join(sourceDir, "statelocker.go")
lockBin := filepath.Join(buildDir, "statelocker")
2020-09-23 17:56:19 -04:00
cmd := exec.Command("go", "build", "-o", lockBin, source)
cmd.Dir = filepath.Dir(sourceDir)
out, err := cmd.CombinedOutput()
if err != nil {
return nil, fmt.Errorf("%s %s", err, out)
}
locker := exec.Command(lockBin, path)
pr, pw, err := os.Pipe()
if err != nil {
return nil, err
}
defer pr.Close()
defer pw.Close()
locker.Stderr = pw
locker.Stdout = pw
if err := locker.Start(); err != nil {
return nil, err
}
deferFunc := func() {
locker.Process.Signal(syscall.SIGTERM)
locker.Wait()
}
// wait for the process to lock
buf := make([]byte, 1024)
n, err := pr.Read(buf)
if err != nil {
return deferFunc, fmt.Errorf("read from statelocker returned: %s", err)
}
output := string(buf[:n])
if !strings.HasPrefix(output, "LOCKID") {
return deferFunc, fmt.Errorf("statelocker wrote: %s", string(buf[:n]))
}
return deferFunc, nil
}
// testCopyDir recursively copies a directory tree, attempting to preserve
workdir: Start of a new package for working directory state management Thus far our various interactions with the bits of state we keep associated with a working directory have all been implemented directly inside the "command" package -- often in the huge command.Meta type -- and not managed collectively via a single component. There's too many little codepaths reading and writing from the working directory and data directory to refactor it all in one step, but this is an attempt at a first step towards a future where everything that reads and writes from the current working directory would do so via an object that encapsulates the implementation details and offers a high-level API to read and write all of these session-persistent settings. The design here continues our gradual path towards using a dependency injection style where "package main" is solely responsible for directly interacting with the OS command line, the OS environment, the OS working directory, the stdio streams, and the CLI configuration, and then communicating the resulting information to the rest of Terraform by wiring together objects. It seems likely that eventually we'll have enough wiring code in package main to justify a more explicit organization of that code, but for this commit the new "workdir.Dir" object is just wired directly in place of its predecessors, without any significant change of code organization at that top layer. This first commit focuses on the main files and directories we use to find provider plugins, because a subsequent commit will lightly reorganize the separation of concerns for plugin launching with a similar goal of collecting all of the relevant logic together into one spot.
2021-09-01 20:01:44 -04:00
// permissions. Source directory must exist, destination directory may exist
// but will be created if not; it should typically be a temporary directory,
// and thus already created using os.MkdirTemp or similar.
// Symlinks are ignored and skipped.
func testCopyDir(t *testing.T, src, dst string) {
t.Helper()
src = filepath.Clean(src)
dst = filepath.Clean(dst)
si, err := os.Stat(src)
if err != nil {
t.Fatal(err)
}
if !si.IsDir() {
t.Fatal("source is not a directory")
}
_, err = os.Stat(dst)
if err != nil && !os.IsNotExist(err) {
t.Fatal(err)
}
err = os.MkdirAll(dst, si.Mode())
if err != nil {
t.Fatal(err)
}
entries, err := ioutil.ReadDir(src)
if err != nil {
return
}
for _, entry := range entries {
srcPath := filepath.Join(src, entry.Name())
dstPath := filepath.Join(dst, entry.Name())
// If the entry is a symlink, we copy the contents
for entry.Mode()&os.ModeSymlink != 0 {
target, err := os.Readlink(srcPath)
if err != nil {
t.Fatal(err)
}
entry, err = os.Stat(target)
if err != nil {
t.Fatal(err)
}
}
if entry.IsDir() {
testCopyDir(t, srcPath, dstPath)
} else {
err = copy.CopyFile(srcPath, dstPath)
if err != nil {
t.Fatal(err)
}
}
}
}
// normalizeJSON removes all insignificant whitespace from the given JSON buffer
// and returns it as a string for easier comparison.
func normalizeJSON(t *testing.T, src []byte) string {
t.Helper()
var buf bytes.Buffer
err := json.Compact(&buf, src)
if err != nil {
t.Fatalf("error normalizing JSON: %s", err)
}
return buf.String()
}
func mustResourceAddr(s string) addrs.ConfigResource {
addr, diags := addrs.ParseAbsResourceStr(s)
if diags.HasErrors() {
panic(diags.Err())
}
return addr.Config()
}
// This map from provider type name to namespace is used by the fake registry
// when called via LookupLegacyProvider. Providers not in this map will return
// a 404 Not Found error.
var legacyProviderNamespaces = map[string]string{
"foo": "hashicorp",
"bar": "hashicorp",
"baz": "terraform-providers",
"qux": "hashicorp",
}
// This map is used to mock the provider redirect feature.
var movedProviderNamespaces = map[string]string{
"qux": "acme",
}
// testServices starts up a local HTTP server running a fake provider registry
// service which responds only to discovery requests and legacy provider lookup
// API calls.
//
// The final return value is a function to call at the end of a test function
// to shut down the test server. After you call that function, the discovery
// object becomes useless.
func testServices(t *testing.T) (services *disco.Disco, cleanup func()) {
server := httptest.NewServer(http.HandlerFunc(fakeRegistryHandler))
services = disco.New()
services.ForceHostServices(svchost.Hostname("registry.terraform.io"), map[string]interface{}{
"providers.v1": server.URL + "/providers/v1/",
})
return services, func() {
server.Close()
}
}
// testRegistrySource is a wrapper around testServices that uses the created
// discovery object to produce a Source instance that is ready to use with the
// fake registry services.
//
// As with testServices, the final return value is a function to call at the end
// of your test in order to shut down the test server.
func testRegistrySource(t *testing.T) (source *getproviders.RegistrySource, cleanup func()) {
services, close := testServices(t)
source = getproviders.NewRegistrySource(services)
return source, close
}
func fakeRegistryHandler(resp http.ResponseWriter, req *http.Request) {
path := req.URL.EscapedPath()
if !strings.HasPrefix(path, "/providers/v1/") {
resp.WriteHeader(404)
resp.Write([]byte(`not a provider registry endpoint`))
return
}
pathParts := strings.Split(path, "/")[3:]
if len(pathParts) != 3 {
resp.WriteHeader(404)
resp.Write([]byte(`unrecognized path scheme`))
return
}
if pathParts[2] != "versions" {
resp.WriteHeader(404)
resp.Write([]byte(`this registry only supports legacy namespace lookup requests`))
return
}
name := pathParts[1]
// Legacy lookup
if pathParts[0] == "-" {
if namespace, ok := legacyProviderNamespaces[name]; ok {
resp.Header().Set("Content-Type", "application/json")
resp.WriteHeader(200)
if movedNamespace, ok := movedProviderNamespaces[name]; ok {
resp.Write([]byte(fmt.Sprintf(`{"id":"%s/%s","moved_to":"%s/%s","versions":[{"version":"1.0.0","protocols":["4"]}]}`, namespace, name, movedNamespace, name)))
} else {
resp.Write([]byte(fmt.Sprintf(`{"id":"%s/%s","versions":[{"version":"1.0.0","protocols":["4"]}]}`, namespace, name)))
}
} else {
resp.WriteHeader(404)
resp.Write([]byte(`provider not found`))
}
return
}
// Also return versions for redirect target
if namespace, ok := movedProviderNamespaces[name]; ok && pathParts[0] == namespace {
resp.Header().Set("Content-Type", "application/json")
resp.WriteHeader(200)
resp.Write([]byte(fmt.Sprintf(`{"id":"%s/%s","versions":[{"version":"1.0.0","protocols":["4"]}]}`, namespace, name)))
} else {
resp.WriteHeader(404)
resp.Write([]byte(`provider not found`))
}
}
func testView(t *testing.T) (*views.View, func(*testing.T) *terminal.TestOutput) {
streams, done := terminal.StreamsForTesting(t)
return views.NewView(streams), done
}
// checkGoldenReference compares the given test output with a known "golden" output log
// located under the specified fixture path.
//
// If any of these tests fail, please communicate with HCP Terraform folks before resolving,
// as changes to UI output may also affect the behavior of HCP Terraform's structured run output.
func checkGoldenReference(t *testing.T, output *terminal.TestOutput, fixturePathName string) {
t.Helper()
// Load the golden reference fixture
wantFile, err := os.Open(path.Join(testFixturePath(fixturePathName), "output.jsonlog"))
if err != nil {
t.Fatalf("failed to open output file: %s", err)
}
defer wantFile.Close()
wantBytes, err := ioutil.ReadAll(wantFile)
if err != nil {
t.Fatalf("failed to read output file: %s", err)
}
want := string(wantBytes)
got := output.Stdout()
// Split the output and the reference into lines so that we can compare
// messages
got = strings.TrimSuffix(got, "\n")
gotLines := strings.Split(got, "\n")
want = strings.TrimSuffix(want, "\n")
wantLines := strings.Split(want, "\n")
if len(gotLines) != len(wantLines) {
t.Errorf("unexpected number of log lines: got %d, want %d\n"+
"NOTE: This failure may indicate a UI change affecting the behavior of structured run output on HCP Terraform.\n"+
"Please communicate with HCP Terraform team before resolving", len(gotLines), len(wantLines))
}
// Verify that the log starts with a version message
type versionMessage struct {
Level string `json:"@level"`
Message string `json:"@message"`
Type string `json:"type"`
Terraform string `json:"terraform"`
UI string `json:"ui"`
}
var gotVersion versionMessage
if err := json.Unmarshal([]byte(gotLines[0]), &gotVersion); err != nil {
t.Errorf("failed to unmarshal version line: %s\n%s", err, gotLines[0])
}
wantVersion := versionMessage{
"info",
fmt.Sprintf("Terraform %s", version.String()),
"version",
version.String(),
views.JSON_UI_VERSION,
}
if !cmp.Equal(wantVersion, gotVersion) {
t.Errorf("unexpected first message:\n%s", cmp.Diff(wantVersion, gotVersion))
}
// Compare the rest of the lines against the golden reference
var gotLineMaps []map[string]interface{}
for i, line := range gotLines[1:] {
index := i + 1
var gotMap map[string]interface{}
if err := json.Unmarshal([]byte(line), &gotMap); err != nil {
t.Errorf("failed to unmarshal got line %d: %s\n%s", index, err, gotLines[index])
}
if _, ok := gotMap["@timestamp"]; !ok {
t.Errorf("missing @timestamp field in log: %s", gotLines[index])
}
delete(gotMap, "@timestamp")
gotLineMaps = append(gotLineMaps, gotMap)
}
var wantLineMaps []map[string]interface{}
for i, line := range wantLines[1:] {
index := i + 1
var wantMap map[string]interface{}
if err := json.Unmarshal([]byte(line), &wantMap); err != nil {
t.Errorf("failed to unmarshal want line %d: %s\n%s", index, err, gotLines[index])
}
wantLineMaps = append(wantLineMaps, wantMap)
}
if diff := cmp.Diff(wantLineMaps, gotLineMaps); diff != "" {
t.Errorf("wrong output lines\n%s\n"+
"NOTE: This failure may indicate a UI change affecting the behavior of structured run output on TFC.\n"+
"Please communicate with HCP Terraform team before resolving", diff)
}
}