mirror of
https://github.com/kreuzwerker/terraform-provider-docker.git
synced 2025-12-24 08:39:59 -05:00
* chore: format test configs for datasources * chore: outlines load test config helper and structure * docs(contributing): add command for resouce tests to have an example of the regex * refactor: move container test configs into separate files * fix: add insecure_skip_verify for image pulls to fix the local test setup with invalid certs * chore(ci): remove insecure registry adaption * chore: regenerate website * chore: update gitignore for scipts/testing dir * fix: replace nodejs services with go versions * fix: move testing program versions in separate files * test: reactivate flaky test from travis * chore: fix linter on all go files * fix(linter): testing go servers * chore(ci): add env for go version * chore(ci): name workflow steps also moves description of available docker versions in to acc dockerfile * Revert "test: reactivate flaky test from travis" This reverts commit b02654acc4d6b7d02c8f3ba090e6a3f248741b10. * docs: fix provider-ssh example * chore: use alpine als final image for tests * refactor: move test configs from folder into testname.tf files * refactor: image delete log is now debug and indented * refactor: image test config into seprate files * refactor: move network test config into seperate files * refactor: move plugin test config into seperate files * chore: rename registry image test file * refactor: move registry_image test config into seperate files * chore: format secret test configs * refactor: inline volume test configs * fix: remove unused volume label test function * refactor: move service test configs into seperate files * test: reactivate and fix service test * chore: simplify insecure skip verify add to http client * chore(ci): debug into service test * chore(ci): add testacc setup * chore: format tf config for provider test * chore(ci): add debug output for config.json * fix: check service auth for emptyness * fix: remove re-read of provider auth config because the bug occured only in CI as the meta object might be GCd * test: pass auth to service instead of provider * chore: reactivate all acc tests * test: outlines service inspect json check for full spec * test: add service inspect json checks * test: finish service inspect json checks * chore(service): move test helper to end to of the file * chore: move mapEquals to test helpers * test: add json inspect for config * chore: add debug inspect log for plugin, secret and volume * test: add json inspect for secret * test: add json inspect for image * test: add json inspect for network * test: add json inspect for plugin * test: add json inspect for volume * test: inline ds plugin test configs * test: inline network configs * test: move ds reg image configs into separate files * test: reactivates container upload checks * chore: adapt issues ref from old to new xw repo * fix: reactivate network ingress test and provide helpers for removing the default ingress network and leaving the swamr * docs: rerun website gen * test: fix reg image build and keep test * chore: add name to todo * chore: move ds network and plugin specs to file * chore: format provider test spec * chore: use simpler error message for empty strings
171 lines
5 KiB
Go
171 lines
5 KiB
Go
package provider
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"log"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/docker/docker/api/types"
|
|
"github.com/docker/docker/api/types/volume"
|
|
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
|
)
|
|
|
|
const (
|
|
volumeReadRefreshTimeout = 30 * time.Second
|
|
volumeReadRefreshWaitBeforeRefreshes = 5 * time.Second
|
|
volumeReadRefreshDelay = 2 * time.Second
|
|
)
|
|
|
|
func resourceDockerVolume() *schema.Resource {
|
|
return &schema.Resource{
|
|
Description: "Creates and destroys a volume in Docker. This can be used alongside [docker_container](container.md) to prepare volumes that can be shared across containers.",
|
|
|
|
CreateContext: resourceDockerVolumeCreate,
|
|
ReadContext: resourceDockerVolumeRead,
|
|
DeleteContext: resourceDockerVolumeDelete,
|
|
Importer: &schema.ResourceImporter{
|
|
StateContext: schema.ImportStatePassthroughContext,
|
|
},
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
"name": {
|
|
Type: schema.TypeString,
|
|
Description: "The name of the Docker volume (will be generated if not provided).",
|
|
Optional: true,
|
|
Computed: true,
|
|
ForceNew: true,
|
|
},
|
|
"labels": {
|
|
Type: schema.TypeSet,
|
|
Description: "User-defined key/value metadata",
|
|
Optional: true,
|
|
ForceNew: true,
|
|
Elem: labelSchema,
|
|
},
|
|
"driver": {
|
|
Type: schema.TypeString,
|
|
Description: "Driver type for the volume. Defaults to `local`.",
|
|
Optional: true,
|
|
Computed: true,
|
|
ForceNew: true,
|
|
},
|
|
"driver_opts": {
|
|
Type: schema.TypeMap,
|
|
Description: "Options specific to the driver.",
|
|
Optional: true,
|
|
ForceNew: true,
|
|
},
|
|
"mountpoint": {
|
|
Type: schema.TypeString,
|
|
Description: "The mountpoint of the volume.",
|
|
Computed: true,
|
|
},
|
|
},
|
|
SchemaVersion: 1,
|
|
StateUpgraders: []schema.StateUpgrader{
|
|
{
|
|
Version: 0,
|
|
Type: resourceDockerVolumeV0().CoreConfigSchema().ImpliedType(),
|
|
Upgrade: func(ctx context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) {
|
|
return replaceLabelsMapFieldWithSetField(rawState), nil
|
|
},
|
|
},
|
|
},
|
|
}
|
|
}
|
|
|
|
func resourceDockerVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
|
|
client := meta.(*ProviderConfig).DockerClient
|
|
|
|
createOpts := volume.VolumeCreateBody{}
|
|
|
|
if v, ok := d.GetOk("name"); ok {
|
|
createOpts.Name = v.(string)
|
|
}
|
|
if v, ok := d.GetOk("labels"); ok {
|
|
createOpts.Labels = labelSetToMap(v.(*schema.Set))
|
|
}
|
|
if v, ok := d.GetOk("driver"); ok {
|
|
createOpts.Driver = v.(string)
|
|
}
|
|
if v, ok := d.GetOk("driver_opts"); ok {
|
|
createOpts.DriverOpts = mapTypeMapValsToString(v.(map[string]interface{}))
|
|
}
|
|
|
|
var err error
|
|
var retVolume types.Volume
|
|
retVolume, err = client.VolumeCreate(ctx, createOpts)
|
|
|
|
if err != nil {
|
|
return diag.Errorf("Unable to create volume: %s", err)
|
|
}
|
|
|
|
d.SetId(retVolume.Name)
|
|
return resourceDockerVolumeRead(ctx, d, meta)
|
|
}
|
|
|
|
func resourceDockerVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
|
|
client := meta.(*ProviderConfig).DockerClient
|
|
|
|
volume, err := client.VolumeInspect(ctx, d.Id())
|
|
|
|
if err != nil {
|
|
return diag.Errorf("Unable to inspect volume: %s", err)
|
|
}
|
|
|
|
jsonObj, _ := json.MarshalIndent(volume, "", "\t")
|
|
log.Printf("[DEBUG] Docker volume inspect from readFunc: %s", jsonObj)
|
|
|
|
d.Set("name", volume.Name)
|
|
d.Set("labels", mapToLabelSet(volume.Labels))
|
|
d.Set("driver", volume.Driver)
|
|
d.Set("driver_opts", volume.Options)
|
|
d.Set("mountpoint", volume.Mountpoint)
|
|
|
|
return nil
|
|
}
|
|
|
|
func resourceDockerVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
|
|
log.Printf("[INFO] Waiting for volume: '%s' to get removed: max '%v seconds'", d.Id(), volumeReadRefreshTimeout)
|
|
|
|
stateConf := &resource.StateChangeConf{
|
|
Pending: []string{"in_use"},
|
|
Target: []string{"removed"},
|
|
Refresh: resourceDockerVolumeRemoveRefreshFunc(d.Id(), meta),
|
|
Timeout: volumeReadRefreshTimeout,
|
|
MinTimeout: volumeReadRefreshWaitBeforeRefreshes,
|
|
Delay: volumeReadRefreshDelay,
|
|
}
|
|
|
|
// Wait, catching any errors
|
|
_, err := stateConf.WaitForStateContext(ctx)
|
|
if err != nil {
|
|
return diag.FromErr(err)
|
|
}
|
|
|
|
d.SetId("")
|
|
return nil
|
|
}
|
|
|
|
func resourceDockerVolumeRemoveRefreshFunc(
|
|
volumeID string, meta interface{}) resource.StateRefreshFunc {
|
|
return func() (interface{}, string, error) {
|
|
client := meta.(*ProviderConfig).DockerClient
|
|
forceDelete := true
|
|
|
|
if err := client.VolumeRemove(context.Background(), volumeID, forceDelete); err != nil {
|
|
if strings.Contains(err.Error(), "volume is in use") { // store.IsInUse(err)
|
|
log.Printf("[INFO] Volume with id '%v' is still in use", volumeID)
|
|
return volumeID, "in_use", nil
|
|
}
|
|
log.Printf("[INFO] Removing volume with id '%v' caused an error: %v", volumeID, err)
|
|
return nil, "", err
|
|
}
|
|
log.Printf("[INFO] Removing volume with id '%v' got removed", volumeID)
|
|
return volumeID, "removed", nil
|
|
}
|
|
}
|