mirror of
https://github.com/hashicorp/vault.git
synced 2026-02-18 18:38:08 -05:00
CE changes for recovery mode docker tests (#24567)
* CE changes for recovery mode docker tests * more conflicts * move vars from ent
This commit is contained in:
parent
b1d3f9618e
commit
1384aefc69
2 changed files with 89 additions and 38 deletions
|
|
@ -229,19 +229,6 @@ func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr
|
|||
return nil, "", err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
consumeLogs := false
|
||||
var logStdout, logStderr io.Writer
|
||||
if d.RunOptions.LogStdout != nil && d.RunOptions.LogStderr != nil {
|
||||
consumeLogs = true
|
||||
logStdout = d.RunOptions.LogStdout
|
||||
logStderr = d.RunOptions.LogStderr
|
||||
} else if d.RunOptions.LogConsumer != nil {
|
||||
consumeLogs = true
|
||||
logStdout = &LogConsumerWriter{d.RunOptions.LogConsumer}
|
||||
logStderr = &LogConsumerWriter{d.RunOptions.LogConsumer}
|
||||
}
|
||||
|
||||
// The waitgroup wg is used here to support some stuff in NewDockerCluster.
|
||||
// We can't generate the PKI cert for the https listener until we know the
|
||||
// container's address, meaning we must first start the container, then
|
||||
|
|
@ -252,28 +239,12 @@ func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr
|
|||
// passes in (which does all that PKI cert stuff) waits to see output from
|
||||
// Vault on stdout/stderr before it sends the signal, and we don't want to
|
||||
// run the PostStart until we've hooked into the docker logs.
|
||||
if consumeLogs {
|
||||
var wg sync.WaitGroup
|
||||
logConsumer := d.createLogConsumer(result.Container.ID, &wg)
|
||||
|
||||
if logConsumer != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
// We must run inside a goroutine because we're using Follow:true,
|
||||
// and StdCopy will block until the log stream is closed.
|
||||
stream, err := d.DockerAPI.ContainerLogs(context.Background(), result.Container.ID, types.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Timestamps: !d.RunOptions.OmitLogTimestamps,
|
||||
Details: true,
|
||||
Follow: true,
|
||||
})
|
||||
wg.Done()
|
||||
if err != nil {
|
||||
d.RunOptions.LogConsumer(fmt.Sprintf("error reading container logs: %v", err))
|
||||
} else {
|
||||
_, err := stdcopy.StdCopy(logStdout, logStderr, stream)
|
||||
if err != nil {
|
||||
d.RunOptions.LogConsumer(fmt.Sprintf("error demultiplexing docker logs: %v", err))
|
||||
}
|
||||
}
|
||||
}()
|
||||
go logConsumer()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
|
|
@ -336,6 +307,46 @@ func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr
|
|||
}, result.Container.ID, nil
|
||||
}
|
||||
|
||||
// createLogConsumer returns a function to consume the logs of the container with the given ID.
|
||||
// If a wait group is given, `WaitGroup.Done()` will be called as soon as the call to the
|
||||
// ContainerLogs Docker API call is done.
|
||||
// The returned function will block, so it should be run on a goroutine.
|
||||
func (d *Runner) createLogConsumer(containerId string, wg *sync.WaitGroup) func() {
|
||||
if d.RunOptions.LogStdout != nil && d.RunOptions.LogStderr != nil {
|
||||
return func() {
|
||||
d.consumeLogs(containerId, wg, d.RunOptions.LogStdout, d.RunOptions.LogStderr)
|
||||
}
|
||||
}
|
||||
if d.RunOptions.LogConsumer != nil {
|
||||
return func() {
|
||||
d.consumeLogs(containerId, wg, &LogConsumerWriter{d.RunOptions.LogConsumer}, &LogConsumerWriter{d.RunOptions.LogConsumer})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// consumeLogs is the function called by the function returned by createLogConsumer.
|
||||
func (d *Runner) consumeLogs(containerId string, wg *sync.WaitGroup, logStdout, logStderr io.Writer) {
|
||||
// We must run inside a goroutine because we're using Follow:true,
|
||||
// and StdCopy will block until the log stream is closed.
|
||||
stream, err := d.DockerAPI.ContainerLogs(context.Background(), containerId, types.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Timestamps: !d.RunOptions.OmitLogTimestamps,
|
||||
Details: true,
|
||||
Follow: true,
|
||||
})
|
||||
wg.Done()
|
||||
if err != nil {
|
||||
d.RunOptions.LogConsumer(fmt.Sprintf("error reading container logs: %v", err))
|
||||
} else {
|
||||
_, err := stdcopy.StdCopy(logStdout, logStderr, stream)
|
||||
if err != nil {
|
||||
d.RunOptions.LogConsumer(fmt.Sprintf("error demultiplexing docker logs: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
Config ServiceConfig
|
||||
Cleanup func()
|
||||
|
|
@ -508,6 +519,21 @@ func (d *Runner) Stop(ctx context.Context, containerID string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *Runner) RestartContainerWithTimeout(ctx context.Context, containerID string, timeout int) error {
|
||||
err := d.DockerAPI.ContainerRestart(ctx, containerID, container.StopOptions{Timeout: &timeout})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to restart container: %s", err)
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
logConsumer := d.createLogConsumer(containerID, &wg)
|
||||
if logConsumer != nil {
|
||||
wg.Add(1)
|
||||
go logConsumer()
|
||||
}
|
||||
// we don't really care about waiting for logs to start showing up, do we?
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Runner) Restart(ctx context.Context, containerID string) error {
|
||||
if err := d.DockerAPI.ContainerStart(ctx, containerID, types.ContainerStartOptions{}); err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -63,6 +63,11 @@ const (
|
|||
}
|
||||
}
|
||||
`
|
||||
// recoveryModeFileName serves as a signal for the softhsmSetupScript to add the `-recovery` flag
|
||||
// when launching Vault.
|
||||
recoveryModeFileName = "start-in-recovery-mode"
|
||||
recoveryModeFileDir = "/root/"
|
||||
recoveryModeFileContents = "Script setup-softhsm.sh looks for this file and starts vault in recovery mode if it sees it"
|
||||
)
|
||||
|
||||
type transitContainerConfig struct {
|
||||
|
|
@ -102,7 +107,8 @@ func createDockerImage(imageRepo, imageTag, containerFile string, bCtx dockhelpe
|
|||
}
|
||||
|
||||
_, err = runner.BuildImage(context.Background(), containerFile, bCtx,
|
||||
dockhelper.BuildRemove(true), dockhelper.BuildForceRemove(true),
|
||||
dockhelper.BuildRemove(true),
|
||||
dockhelper.BuildForceRemove(true),
|
||||
dockhelper.BuildPullParent(true),
|
||||
dockhelper.BuildTags([]string{fmt.Sprintf("%s:%s", imageRepo, imageTag)}))
|
||||
if err != nil {
|
||||
|
|
@ -122,9 +128,10 @@ func createContainerWithConfig(config string, imageRepo, imageTag string, logCon
|
|||
Cmd: []string{
|
||||
"server", "-log-level=trace",
|
||||
},
|
||||
Ports: []string{"8200/tcp"},
|
||||
Env: []string{fmt.Sprintf("VAULT_LICENSE=%s", os.Getenv("VAULT_LICENSE")), fmt.Sprintf("VAULT_LOCAL_CONFIG=%s", config)},
|
||||
LogConsumer: logConsumer,
|
||||
Ports: []string{"8200/tcp"},
|
||||
Env: []string{fmt.Sprintf("VAULT_LICENSE=%s", os.Getenv("VAULT_LICENSE")), fmt.Sprintf("VAULT_LOCAL_CONFIG=%s", config)},
|
||||
LogConsumer: logConsumer,
|
||||
DoNotAutoRemove: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating runner: %w", err)
|
||||
|
|
@ -311,3 +318,21 @@ func copyConfigToContainer(containerID string, bCtx dockhelper.BuildContext, run
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyRecoveryModeTriggerToContainer(containerID string, runner *dockhelper.Runner) error {
|
||||
bCtx := dockhelper.NewBuildContext()
|
||||
bCtx[recoveryModeFileName] = &dockhelper.FileContents{
|
||||
Data: []byte(recoveryModeFileContents),
|
||||
Mode: 0o644,
|
||||
}
|
||||
tar, err := bCtx.ToTarball()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating config tarball: %w", err)
|
||||
}
|
||||
|
||||
err = runner.DockerAPI.CopyToContainer(context.Background(), containerID, recoveryModeFileDir, tar, types.CopyToContainerOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error copying revovery mode trigger file to container: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in a new issue