fix/move helpers (#170)

* chore: rename structures file

* fix: create volume migrators

* fix: move service migrators

* fix: move network migrators

* fix: rename container migrators

* chore: move label migrators and helpers

* chore: move container structures

* chore: move network structures

* fix: move container extrahosts flattener

* fix: move container ulimits flattener

* fix: move container devices flattener

* chore: move service mappers to structures file

* chore: move image helper funcs

* chore: add constants for network refresher funcs

* chore: move plugin crud funcs to the top

* chore: move registry image funcs to the top

* chore: add resfresh func constants for volume

* chore: extract ipam config flatten func
This commit is contained in:
Manuel Vogel 2021-04-19 15:33:13 +02:00 committed by GitHub
parent 9f1bca1178
commit 2845519dce
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
23 changed files with 4157 additions and 4087 deletions

View file

@ -2,8 +2,10 @@ package provider
import (
"context"
"log"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/network"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
@ -101,8 +103,16 @@ func dataSourceDockerNetworkRead(ctx context.Context, d *schema.ResourceData, me
d.Set("driver", network.Driver)
d.Set("options", network.Options)
d.Set("internal", network.Internal)
ipam := make([]ipamMap, len(network.IPAM.Config))
for i, config := range network.IPAM.Config {
if err = d.Set("ipam_config", flattenIpamConfig(network.IPAM.Config)); err != nil {
log.Printf("[WARN] failed to set ipam config from API: %s", err)
}
return nil
}
func flattenIpamConfig(in []network.IPAMConfig) []ipamMap {
ipam := make([]ipamMap, len(in))
for i, config := range in {
ipam[i] = ipamMap{
"subnet": config.Subnet,
"gateway": config.Gateway,
@ -110,7 +120,6 @@ func dataSourceDockerNetworkRead(ctx context.Context, d *schema.ResourceData, me
"ip_range": config.IPRange,
}
}
d.Set("ipam_config", ipam)
return nil
return ipam
}

View file

@ -1,49 +0,0 @@
package provider
func replaceLabelsMapFieldWithSetField(rawState map[string]interface{}) map[string]interface{} {
labelMapIFace := rawState["labels"]
if labelMapIFace != nil {
labelMap := labelMapIFace.(map[string]interface{})
rawState["labels"] = mapStringInterfaceToLabelList(labelMap)
} else {
rawState["labels"] = []interface{}{}
}
return rawState
}
func migrateContainerLabels(rawState map[string]interface{}) map[string]interface{} {
replaceLabelsMapFieldWithSetField(rawState)
m, ok := rawState["mounts"]
if !ok || m == nil {
// https://github.com/terraform-providers/terraform-provider-docker/issues/264
rawState["mounts"] = []interface{}{}
return rawState
}
mounts := m.([]interface{})
newMounts := make([]interface{}, len(mounts))
for i, mountI := range mounts {
mount := mountI.(map[string]interface{})
volumeOptionsList := mount["volume_options"].([]interface{})
if len(volumeOptionsList) != 0 {
replaceLabelsMapFieldWithSetField(volumeOptionsList[0].(map[string]interface{}))
}
newMounts[i] = mount
}
rawState["mounts"] = newMounts
return rawState
}
func migrateServiceLabels(rawState map[string]interface{}) map[string]interface{} {
replaceLabelsMapFieldWithSetField(rawState)
taskSpec := rawState["task_spec"].([]interface{})[0].(map[string]interface{})
containerSpec := taskSpec["container_spec"].([]interface{})[0].(map[string]interface{})
migrateContainerLabels(containerSpec)
return rawState
}

View file

@ -1,102 +0,0 @@
package provider
import (
"fmt"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
func TestMigrateServiceLabelState_empty_labels(t *testing.T) {
v0State := map[string]interface{}{
"name": "volume-name",
"task_spec": []interface{}{
map[string]interface{}{
"container_spec": []interface{}{
map[string]interface{}{
"image": "repo:tag",
"mounts": []interface{}{
map[string]interface{}{
"target": "path/to/target",
"type": "bind",
"volume_options": []interface{}{
map[string]interface{}{},
},
},
},
},
},
},
},
}
// first validate that we build that correctly
v0Config := terraform.NewResourceConfigRaw(v0State)
diags := resourceDockerServiceV0().Validate(v0Config)
if diags.HasError() {
t.Error("test precondition failed - attempt to migrate an invalid v0 config")
return
}
v1State := migrateServiceLabels(v0State)
v1Config := terraform.NewResourceConfigRaw(v1State)
diags = resourceDockerService().Validate(v1Config)
if diags.HasError() {
fmt.Println(diags)
t.Error("migrated service config is invalid")
return
}
}
func TestMigrateServiceLabelState_with_labels(t *testing.T) {
v0State := map[string]interface{}{
"name": "volume-name",
"task_spec": []interface{}{
map[string]interface{}{
"container_spec": []interface{}{
map[string]interface{}{
"image": "repo:tag",
"labels": map[string]interface{}{
"type": "container",
"env": "dev",
},
"mounts": []interface{}{
map[string]interface{}{
"target": "path/to/target",
"type": "bind",
"volume_options": []interface{}{
map[string]interface{}{
"labels": map[string]interface{}{
"type": "mount",
},
},
},
},
},
},
},
},
},
"labels": map[string]interface{}{
"foo": "bar",
"env": "dev",
},
}
// first validate that we build that correctly
v0Config := terraform.NewResourceConfigRaw(v0State)
diags := resourceDockerServiceV0().Validate(v0Config)
if diags.HasError() {
t.Error("test precondition failed - attempt to migrate an invalid v0 config")
return
}
v1State := migrateServiceLabels(v0State)
v1Config := terraform.NewResourceConfigRaw(v1State)
diags = resourceDockerService().Validate(v1Config)
if diags.HasError() {
fmt.Println(diags)
t.Error("migrated service config is invalid")
return
}
}

View file

@ -7,13 +7,10 @@ import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"sort"
"strconv"
"strings"
"time"
@ -653,25 +650,12 @@ func resourceDockerContainerRead(ctx context.Context, d *schema.ResourceData, me
d.Set("mounts", getDockerContainerMounts(container))
// volumes
d.Set("tmpfs", container.HostConfig.Tmpfs)
// TODO mavogel: move all fatteners to structures_container.go
extraHosts := make([]interface{}, len(container.HostConfig.ExtraHosts))
for i, extraHost := range container.HostConfig.ExtraHosts {
extraHostSplit := strings.Split(extraHost, ":")
extraHosts[i] = map[string]interface{}{
"host": extraHostSplit[0],
"ip": extraHostSplit[1],
}
if err := d.Set("host", flattenExtraHosts(container.HostConfig.ExtraHosts)); err != nil {
log.Printf("[WARN] failed to set container hostconfig extrahosts from API: %s", err)
}
d.Set("host", extraHosts)
ulimits := make([]interface{}, len(container.HostConfig.Ulimits))
for i, ul := range container.HostConfig.Ulimits {
ulimits[i] = map[string]interface{}{
"name": ul.Name,
"soft": ul.Soft,
"hard": ul.Hard,
}
if err = d.Set("ulimit", flattenUlimits(container.HostConfig.Ulimits)); err != nil {
log.Printf("[WARN] failed to set container hostconfig ulimits from API: %s", err)
}
d.Set("ulimit", ulimits)
// We decided not to set the environment variables and labels
// because they are taken over from the Docker image and aren't scalar
@ -682,15 +666,9 @@ func resourceDockerContainerRead(ctx context.Context, d *schema.ResourceData, me
d.Set("links", container.HostConfig.Links)
d.Set("privileged", container.HostConfig.Privileged)
devices := make([]interface{}, len(container.HostConfig.Devices))
for i, device := range container.HostConfig.Devices {
devices[i] = map[string]interface{}{
"host_path": device.PathOnHost,
"container_path": device.PathInContainer,
"permissions": device.CgroupPermissions,
}
if err = d.Set("devices", flattenDevices(container.HostConfig.Devices)); err != nil {
log.Printf("[WARN] failed to set container hostconfig devices from API: %s", err)
}
d.Set("devices", devices)
// "destroy_grace_seconds" can't be imported
d.Set("memory", container.HostConfig.Memory/1024/1024)
if container.HostConfig.MemorySwap > 0 {
@ -818,116 +796,6 @@ func resourceDockerContainerDelete(ctx context.Context, d *schema.ResourceData,
return nil
}
// TODO extract to structures_container.go
type byPortAndProtocol []string
func (s byPortAndProtocol) Len() int {
return len(s)
}
func (s byPortAndProtocol) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byPortAndProtocol) Less(i, j int) bool {
iSplit := strings.Split(string(s[i]), "/")
iPort, _ := strconv.Atoi(iSplit[0])
jSplit := strings.Split(string(s[j]), "/")
jPort, _ := strconv.Atoi(jSplit[0])
return iPort < jPort
}
func flattenContainerPorts(in nat.PortMap) []interface{} {
out := make([]interface{}, 0)
var internalPortKeys []string
for portAndProtocolKeys := range in {
internalPortKeys = append(internalPortKeys, string(portAndProtocolKeys))
}
sort.Sort(byPortAndProtocol(internalPortKeys))
for _, portKey := range internalPortKeys {
m := make(map[string]interface{})
portBindings := in[nat.Port(portKey)]
for _, portBinding := range portBindings {
portProtocolSplit := strings.Split(string(portKey), "/")
convertedInternal, _ := strconv.Atoi(portProtocolSplit[0])
convertedExternal, _ := strconv.Atoi(portBinding.HostPort)
m["internal"] = convertedInternal
m["external"] = convertedExternal
m["ip"] = portBinding.HostIP
m["protocol"] = portProtocolSplit[1]
out = append(out, m)
}
}
return out
}
func flattenContainerNetworks(in *types.NetworkSettings) []interface{} {
out := make([]interface{}, 0)
if in == nil || in.Networks == nil || len(in.Networks) == 0 {
return out
}
networks := in.Networks
for networkName, networkData := range networks {
m := make(map[string]interface{})
m["network_name"] = networkName
m["ip_address"] = networkData.IPAddress
m["ip_prefix_length"] = networkData.IPPrefixLen
m["gateway"] = networkData.Gateway
m["global_ipv6_address"] = networkData.GlobalIPv6Address
m["global_ipv6_prefix_length"] = networkData.GlobalIPv6PrefixLen
m["ipv6_gateway"] = networkData.IPv6Gateway
out = append(out, m)
}
return out
}
// TODO move to separate flattener file
func stringListToStringSlice(stringList []interface{}) []string {
ret := []string{}
for _, v := range stringList {
if v == nil {
ret = append(ret, "")
continue
}
ret = append(ret, v.(string))
}
return ret
}
func stringSetToStringSlice(stringSet *schema.Set) []string {
ret := []string{}
if stringSet == nil {
return ret
}
for _, envVal := range stringSet.List() {
ret = append(ret, envVal.(string))
}
return ret
}
func mapTypeMapValsToString(typeMap map[string]interface{}) map[string]string {
mapped := make(map[string]string, len(typeMap))
for k, v := range typeMap {
mapped[k] = v.(string)
}
return mapped
}
// mapTypeMapValsToStringSlice maps a map to a slice with '=': e.g. foo = "bar" -> 'foo=bar'
func mapTypeMapValsToStringSlice(typeMap map[string]interface{}) []string {
mapped := make([]string, 0)
for k, v := range typeMap {
if len(k) > 0 {
mapped = append(mapped, k+"="+v.(string))
}
}
return mapped
}
func fetchDockerContainer(ctx context.Context, ID string, client *client.Client) (*types.Container, error) {
apiContainers, err := client.ContainerList(ctx, types.ContainerListOptions{All: true})
if err != nil {
@ -942,174 +810,3 @@ func fetchDockerContainer(ctx context.Context, ID string, client *client.Client)
return nil, nil
}
func portSetToDockerPorts(ports []interface{}) (map[nat.Port]struct{}, map[nat.Port][]nat.PortBinding) {
retExposedPorts := map[nat.Port]struct{}{}
retPortBindings := map[nat.Port][]nat.PortBinding{}
for _, portInt := range ports {
port := portInt.(map[string]interface{})
internal := port["internal"].(int)
protocol := port["protocol"].(string)
exposedPort := nat.Port(strconv.Itoa(internal) + "/" + protocol)
retExposedPorts[exposedPort] = struct{}{}
portBinding := nat.PortBinding{}
external, extOk := port["external"].(int)
if extOk {
portBinding.HostPort = strconv.Itoa(external)
}
ip, ipOk := port["ip"].(string)
if ipOk {
portBinding.HostIP = ip
}
if extOk || ipOk {
retPortBindings[exposedPort] = append(retPortBindings[exposedPort], portBinding)
}
}
return retExposedPorts, retPortBindings
}
func ulimitsToDockerUlimits(extraUlimits *schema.Set) []*units.Ulimit {
retExtraUlimits := []*units.Ulimit{}
for _, ulimitInt := range extraUlimits.List() {
ulimits := ulimitInt.(map[string]interface{})
u := &units.Ulimit{
Name: ulimits["name"].(string),
Soft: int64(ulimits["soft"].(int)),
Hard: int64(ulimits["hard"].(int)),
}
retExtraUlimits = append(retExtraUlimits, u)
}
return retExtraUlimits
}
func extraHostsSetToDockerExtraHosts(extraHosts *schema.Set) []string {
retExtraHosts := []string{}
for _, hostInt := range extraHosts.List() {
host := hostInt.(map[string]interface{})
ip := host["ip"].(string)
hostname := host["host"].(string)
retExtraHosts = append(retExtraHosts, hostname+":"+ip)
}
return retExtraHosts
}
func volumeSetToDockerVolumes(volumes *schema.Set) (map[string]struct{}, []string, []string, error) {
retVolumeMap := map[string]struct{}{}
retHostConfigBinds := []string{}
retVolumeFromContainers := []string{}
for _, volumeInt := range volumes.List() {
volume := volumeInt.(map[string]interface{})
fromContainer := volume["from_container"].(string)
containerPath := volume["container_path"].(string)
volumeName := volume["volume_name"].(string)
if len(volumeName) == 0 {
volumeName = volume["host_path"].(string)
}
readOnly := volume["read_only"].(bool)
switch {
case len(fromContainer) == 0 && len(containerPath) == 0:
return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, errors.New("Volume entry without container path or source container")
case len(fromContainer) != 0 && len(containerPath) != 0:
return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, errors.New("Both a container and a path specified in a volume entry")
case len(fromContainer) != 0:
retVolumeFromContainers = append(retVolumeFromContainers, fromContainer)
case len(volumeName) != 0:
readWrite := "rw"
if readOnly {
readWrite = "ro"
}
retVolumeMap[containerPath] = struct{}{}
retHostConfigBinds = append(retHostConfigBinds, volumeName+":"+containerPath+":"+readWrite)
default:
retVolumeMap[containerPath] = struct{}{}
}
}
return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, nil
}
func deviceSetToDockerDevices(devices *schema.Set) []container.DeviceMapping {
retDevices := []container.DeviceMapping{}
for _, deviceInt := range devices.List() {
deviceMap := deviceInt.(map[string]interface{})
hostPath := deviceMap["host_path"].(string)
containerPath := deviceMap["container_path"].(string)
permissions := deviceMap["permissions"].(string)
switch {
case len(containerPath) == 0:
containerPath = hostPath
fallthrough
case len(permissions) == 0:
permissions = "rwm"
}
device := container.DeviceMapping{
PathOnHost: hostPath,
PathInContainer: containerPath,
CgroupPermissions: permissions,
}
retDevices = append(retDevices, device)
}
return retDevices
}
func getDockerContainerMounts(container types.ContainerJSON) []map[string]interface{} {
mounts := []map[string]interface{}{}
for _, mount := range container.HostConfig.Mounts {
m := map[string]interface{}{
"target": mount.Target,
"source": mount.Source,
"type": mount.Type,
"read_only": mount.ReadOnly,
}
if mount.BindOptions != nil {
m["bind_options"] = []map[string]interface{}{
{
"propagation": mount.BindOptions.Propagation,
},
}
}
if mount.VolumeOptions != nil {
labels := []map[string]string{}
for k, v := range mount.VolumeOptions.Labels {
labels = append(labels, map[string]string{
"label": k,
"volume": v,
})
}
m["volume_options"] = []map[string]interface{}{
{
"no_copy": mount.VolumeOptions.NoCopy,
"labels": labels,
"driver_name": mount.VolumeOptions.DriverConfig.Name,
"driver_options": mount.VolumeOptions.DriverConfig.Options,
},
}
}
if mount.TmpfsOptions != nil {
m["tmpfs_options"] = []map[string]interface{}{
{
"size_bytes": mount.TmpfsOptions.SizeBytes,
"mode": mount.TmpfsOptions.Mode,
},
}
}
mounts = append(mounts, m)
}
return mounts
}

View file

@ -1,117 +0,0 @@
package provider
import (
"fmt"
"log"
"sort"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
func resourceDockerContainerMigrateState(
v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
switch v {
case 0:
log.Println("[INFO] Found Docker Container State v0; migrating to v1")
return migrateDockerContainerMigrateStateV0toV1(is, meta)
default:
return is, fmt.Errorf("Unexpected schema version: %d", v)
}
}
func migrateDockerContainerMigrateStateV0toV1(is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
if is.Empty() {
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
return is, nil
}
log.Printf("[DEBUG] Docker Container Attributes before Migration: %#v", is.Attributes)
err := updateV0ToV1PortsOrder(is, meta)
log.Printf("[DEBUG] Docker Container Attributes after State Migration: %#v", is.Attributes)
return is, err
}
type mappedPort struct {
internal int
external int
ip string
protocol string
}
type byPort []mappedPort
func (s byPort) Len() int {
return len(s)
}
func (s byPort) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byPort) Less(i, j int) bool {
return s[i].internal < s[j].internal
}
func updateV0ToV1PortsOrder(is *terraform.InstanceState, meta interface{}) error {
reader := &schema.MapFieldReader{
Schema: resourceDockerContainer().Schema,
Map: schema.BasicMapReader(is.Attributes),
}
writer := &schema.MapFieldWriter{
Schema: resourceDockerContainer().Schema,
}
result, err := reader.ReadField([]string{"ports"})
if err != nil {
return err
}
if result.Value == nil {
return nil
}
// map the ports into a struct, so they can be sorted easily
portsMapped := make([]mappedPort, 0)
portsRaw := result.Value.([]interface{})
for _, portRaw := range portsRaw {
if portRaw == nil {
continue
}
portTyped := portRaw.(map[string]interface{})
portMapped := mappedPort{
internal: portTyped["internal"].(int),
external: portTyped["external"].(int),
ip: portTyped["ip"].(string),
protocol: portTyped["protocol"].(string),
}
portsMapped = append(portsMapped, portMapped)
}
sort.Sort(byPort(portsMapped))
// map the sorted ports to an output structure tf can write
outputPorts := make([]interface{}, 0)
for _, mappedPort := range portsMapped {
outputPort := make(map[string]interface{})
outputPort["internal"] = mappedPort.internal
outputPort["external"] = mappedPort.external
outputPort["ip"] = mappedPort.ip
outputPort["protocol"] = mappedPort.protocol
outputPorts = append(outputPorts, outputPort)
}
// store them back to state
if err := writer.WriteField([]string{"ports"}, outputPorts); err != nil {
return err
}
for k, v := range writer.Map() {
is.Attributes[k] = v
}
return nil
}

View file

@ -1,6 +1,13 @@
package provider
import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
import (
"fmt"
"log"
"sort"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
func resourceDockerContainerV1() *schema.Resource {
return &schema.Resource{
@ -792,3 +799,148 @@ func resourceDockerContainerV1() *schema.Resource {
},
}
}
func resourceDockerContainerMigrateState(
v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
switch v {
case 0:
log.Println("[INFO] Found Docker Container State v0; migrating to v1")
return migrateDockerContainerMigrateStateV0toV1(is, meta)
default:
return is, fmt.Errorf("Unexpected schema version: %d", v)
}
}
func migrateDockerContainerMigrateStateV0toV1(is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
if is.Empty() {
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
return is, nil
}
log.Printf("[DEBUG] Docker Container Attributes before Migration: %#v", is.Attributes)
err := updateV0ToV1PortsOrder(is, meta)
log.Printf("[DEBUG] Docker Container Attributes after State Migration: %#v", is.Attributes)
return is, err
}
type mappedPort struct {
internal int
external int
ip string
protocol string
}
type byPort []mappedPort
func (s byPort) Len() int {
return len(s)
}
func (s byPort) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byPort) Less(i, j int) bool {
return s[i].internal < s[j].internal
}
func updateV0ToV1PortsOrder(is *terraform.InstanceState, meta interface{}) error {
reader := &schema.MapFieldReader{
Schema: resourceDockerContainer().Schema,
Map: schema.BasicMapReader(is.Attributes),
}
writer := &schema.MapFieldWriter{
Schema: resourceDockerContainer().Schema,
}
result, err := reader.ReadField([]string{"ports"})
if err != nil {
return err
}
if result.Value == nil {
return nil
}
// map the ports into a struct, so they can be sorted easily
portsMapped := make([]mappedPort, 0)
portsRaw := result.Value.([]interface{})
for _, portRaw := range portsRaw {
if portRaw == nil {
continue
}
portTyped := portRaw.(map[string]interface{})
portMapped := mappedPort{
internal: portTyped["internal"].(int),
external: portTyped["external"].(int),
ip: portTyped["ip"].(string),
protocol: portTyped["protocol"].(string),
}
portsMapped = append(portsMapped, portMapped)
}
sort.Sort(byPort(portsMapped))
// map the sorted ports to an output structure tf can write
outputPorts := make([]interface{}, 0)
for _, mappedPort := range portsMapped {
outputPort := make(map[string]interface{})
outputPort["internal"] = mappedPort.internal
outputPort["external"] = mappedPort.external
outputPort["ip"] = mappedPort.ip
outputPort["protocol"] = mappedPort.protocol
outputPorts = append(outputPorts, outputPort)
}
// store them back to state
if err := writer.WriteField([]string{"ports"}, outputPorts); err != nil {
return err
}
for k, v := range writer.Map() {
is.Attributes[k] = v
}
return nil
}
func replaceLabelsMapFieldWithSetField(rawState map[string]interface{}) map[string]interface{} {
labelMapIFace := rawState["labels"]
if labelMapIFace != nil {
labelMap := labelMapIFace.(map[string]interface{})
rawState["labels"] = mapStringInterfaceToLabelList(labelMap)
} else {
rawState["labels"] = []interface{}{}
}
return rawState
}
func migrateContainerLabels(rawState map[string]interface{}) map[string]interface{} {
replaceLabelsMapFieldWithSetField(rawState)
m, ok := rawState["mounts"]
if !ok || m == nil {
// https://github.com/terraform-providers/terraform-provider-docker/issues/264
rawState["mounts"] = []interface{}{}
return rawState
}
mounts := m.([]interface{})
newMounts := make([]interface{}, len(mounts))
for i, mountI := range mounts {
mount := mountI.(map[string]interface{})
volumeOptionsList := mount["volume_options"].([]interface{})
if len(volumeOptionsList) != 0 {
replaceLabelsMapFieldWithSetField(volumeOptionsList[0].(map[string]interface{}))
}
newMounts[i] = mount
}
rawState["mounts"] = newMounts
return rawState
}

View file

@ -0,0 +1,332 @@
package provider
import (
"errors"
"sort"
"strconv"
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/go-connections/nat"
"github.com/docker/go-units"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
type byPortAndProtocol []string
func (s byPortAndProtocol) Len() int {
return len(s)
}
func (s byPortAndProtocol) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byPortAndProtocol) Less(i, j int) bool {
iSplit := strings.Split(string(s[i]), "/")
iPort, _ := strconv.Atoi(iSplit[0])
jSplit := strings.Split(string(s[j]), "/")
jPort, _ := strconv.Atoi(jSplit[0])
return iPort < jPort
}
func flattenContainerPorts(in nat.PortMap) []interface{} {
out := make([]interface{}, 0)
var internalPortKeys []string
for portAndProtocolKeys := range in {
internalPortKeys = append(internalPortKeys, string(portAndProtocolKeys))
}
sort.Sort(byPortAndProtocol(internalPortKeys))
for _, portKey := range internalPortKeys {
m := make(map[string]interface{})
portBindings := in[nat.Port(portKey)]
for _, portBinding := range portBindings {
portProtocolSplit := strings.Split(string(portKey), "/")
convertedInternal, _ := strconv.Atoi(portProtocolSplit[0])
convertedExternal, _ := strconv.Atoi(portBinding.HostPort)
m["internal"] = convertedInternal
m["external"] = convertedExternal
m["ip"] = portBinding.HostIP
m["protocol"] = portProtocolSplit[1]
out = append(out, m)
}
}
return out
}
func flattenContainerNetworks(in *types.NetworkSettings) []interface{} {
out := make([]interface{}, 0)
if in == nil || in.Networks == nil || len(in.Networks) == 0 {
return out
}
networks := in.Networks
for networkName, networkData := range networks {
m := make(map[string]interface{})
m["network_name"] = networkName
m["ip_address"] = networkData.IPAddress
m["ip_prefix_length"] = networkData.IPPrefixLen
m["gateway"] = networkData.Gateway
m["global_ipv6_address"] = networkData.GlobalIPv6Address
m["global_ipv6_prefix_length"] = networkData.GlobalIPv6PrefixLen
m["ipv6_gateway"] = networkData.IPv6Gateway
out = append(out, m)
}
return out
}
func stringListToStringSlice(stringList []interface{}) []string {
ret := []string{}
for _, v := range stringList {
if v == nil {
ret = append(ret, "")
continue
}
ret = append(ret, v.(string))
}
return ret
}
func stringSetToStringSlice(stringSet *schema.Set) []string {
ret := []string{}
if stringSet == nil {
return ret
}
for _, envVal := range stringSet.List() {
ret = append(ret, envVal.(string))
}
return ret
}
func mapTypeMapValsToString(typeMap map[string]interface{}) map[string]string {
mapped := make(map[string]string, len(typeMap))
for k, v := range typeMap {
mapped[k] = v.(string)
}
return mapped
}
// mapTypeMapValsToStringSlice maps a map to a slice with '=': e.g. foo = "bar" -> 'foo=bar'
func mapTypeMapValsToStringSlice(typeMap map[string]interface{}) []string {
mapped := make([]string, 0)
for k, v := range typeMap {
if len(k) > 0 {
mapped = append(mapped, k+"="+v.(string))
}
}
return mapped
}
func portSetToDockerPorts(ports []interface{}) (map[nat.Port]struct{}, map[nat.Port][]nat.PortBinding) {
retExposedPorts := map[nat.Port]struct{}{}
retPortBindings := map[nat.Port][]nat.PortBinding{}
for _, portInt := range ports {
port := portInt.(map[string]interface{})
internal := port["internal"].(int)
protocol := port["protocol"].(string)
exposedPort := nat.Port(strconv.Itoa(internal) + "/" + protocol)
retExposedPorts[exposedPort] = struct{}{}
portBinding := nat.PortBinding{}
external, extOk := port["external"].(int)
if extOk {
portBinding.HostPort = strconv.Itoa(external)
}
ip, ipOk := port["ip"].(string)
if ipOk {
portBinding.HostIP = ip
}
if extOk || ipOk {
retPortBindings[exposedPort] = append(retPortBindings[exposedPort], portBinding)
}
}
return retExposedPorts, retPortBindings
}
func ulimitsToDockerUlimits(extraUlimits *schema.Set) []*units.Ulimit {
retExtraUlimits := []*units.Ulimit{}
for _, ulimitInt := range extraUlimits.List() {
ulimits := ulimitInt.(map[string]interface{})
u := &units.Ulimit{
Name: ulimits["name"].(string),
Soft: int64(ulimits["soft"].(int)),
Hard: int64(ulimits["hard"].(int)),
}
retExtraUlimits = append(retExtraUlimits, u)
}
return retExtraUlimits
}
func extraHostsSetToDockerExtraHosts(extraHosts *schema.Set) []string {
retExtraHosts := []string{}
for _, hostInt := range extraHosts.List() {
host := hostInt.(map[string]interface{})
ip := host["ip"].(string)
hostname := host["host"].(string)
retExtraHosts = append(retExtraHosts, hostname+":"+ip)
}
return retExtraHosts
}
func volumeSetToDockerVolumes(volumes *schema.Set) (map[string]struct{}, []string, []string, error) {
retVolumeMap := map[string]struct{}{}
retHostConfigBinds := []string{}
retVolumeFromContainers := []string{}
for _, volumeInt := range volumes.List() {
volume := volumeInt.(map[string]interface{})
fromContainer := volume["from_container"].(string)
containerPath := volume["container_path"].(string)
volumeName := volume["volume_name"].(string)
if len(volumeName) == 0 {
volumeName = volume["host_path"].(string)
}
readOnly := volume["read_only"].(bool)
switch {
case len(fromContainer) == 0 && len(containerPath) == 0:
return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, errors.New("Volume entry without container path or source container")
case len(fromContainer) != 0 && len(containerPath) != 0:
return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, errors.New("Both a container and a path specified in a volume entry")
case len(fromContainer) != 0:
retVolumeFromContainers = append(retVolumeFromContainers, fromContainer)
case len(volumeName) != 0:
readWrite := "rw"
if readOnly {
readWrite = "ro"
}
retVolumeMap[containerPath] = struct{}{}
retHostConfigBinds = append(retHostConfigBinds, volumeName+":"+containerPath+":"+readWrite)
default:
retVolumeMap[containerPath] = struct{}{}
}
}
return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, nil
}
func deviceSetToDockerDevices(devices *schema.Set) []container.DeviceMapping {
retDevices := []container.DeviceMapping{}
for _, deviceInt := range devices.List() {
deviceMap := deviceInt.(map[string]interface{})
hostPath := deviceMap["host_path"].(string)
containerPath := deviceMap["container_path"].(string)
permissions := deviceMap["permissions"].(string)
switch {
case len(containerPath) == 0:
containerPath = hostPath
fallthrough
case len(permissions) == 0:
permissions = "rwm"
}
device := container.DeviceMapping{
PathOnHost: hostPath,
PathInContainer: containerPath,
CgroupPermissions: permissions,
}
retDevices = append(retDevices, device)
}
return retDevices
}
func getDockerContainerMounts(container types.ContainerJSON) []map[string]interface{} {
mounts := []map[string]interface{}{}
for _, mount := range container.HostConfig.Mounts {
m := map[string]interface{}{
"target": mount.Target,
"source": mount.Source,
"type": mount.Type,
"read_only": mount.ReadOnly,
}
if mount.BindOptions != nil {
m["bind_options"] = []map[string]interface{}{
{
"propagation": mount.BindOptions.Propagation,
},
}
}
if mount.VolumeOptions != nil {
labels := []map[string]string{}
for k, v := range mount.VolumeOptions.Labels {
labels = append(labels, map[string]string{
"label": k,
"volume": v,
})
}
m["volume_options"] = []map[string]interface{}{
{
"no_copy": mount.VolumeOptions.NoCopy,
"labels": labels,
"driver_name": mount.VolumeOptions.DriverConfig.Name,
"driver_options": mount.VolumeOptions.DriverConfig.Options,
},
}
}
if mount.TmpfsOptions != nil {
m["tmpfs_options"] = []map[string]interface{}{
{
"size_bytes": mount.TmpfsOptions.SizeBytes,
"mode": mount.TmpfsOptions.Mode,
},
}
}
mounts = append(mounts, m)
}
return mounts
}
func flattenExtraHosts(in []string) []interface{} {
extraHosts := make([]interface{}, len(in))
for i, extraHost := range in {
extraHostSplit := strings.Split(extraHost, ":")
extraHosts[i] = map[string]interface{}{
"host": extraHostSplit[0],
"ip": extraHostSplit[1],
}
}
return extraHosts
}
func flattenUlimits(in []*units.Ulimit) []interface{} {
ulimits := make([]interface{}, len(in))
for i, ul := range in {
ulimits[i] = map[string]interface{}{
"name": ul.Name,
"soft": ul.Soft,
"hard": ul.Hard,
}
}
return ulimits
}
func flattenDevices(in []container.DeviceMapping) []interface{} {
devices := make([]interface{}, len(in))
for i, device := range in {
devices[i] = map[string]interface{}{
"host_path": device.PathOnHost,
"container_path": device.PathInContainer,
"permissions": device.CgroupPermissions,
}
}
return devices
}

View file

@ -20,39 +20,6 @@ import (
"github.com/mitchellh/go-homedir"
)
func getBuildContext(filePath string, excludes []string) io.Reader {
filePath, _ = homedir.Expand(filePath)
ctx, _ := archive.TarWithOptions(filePath, &archive.TarOptions{
ExcludePatterns: excludes,
})
return ctx
}
func decodeBuildMessages(response types.ImageBuildResponse) (string, error) {
buf := new(bytes.Buffer)
buildErr := error(nil)
dec := json.NewDecoder(response.Body)
for dec.More() {
var m jsonmessage.JSONMessage
err := dec.Decode(&m)
if err != nil {
return buf.String(), fmt.Errorf("Problem decoding message from docker daemon: %s", err)
}
if err := m.Display(buf, false); err != nil {
return "", err
}
if m.Error != nil {
buildErr = fmt.Errorf("Unable to build image")
}
}
log.Printf("[DEBUG] %s", buf.String())
return buf.String(), buildErr
}
func resourceDockerImageCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*ProviderConfig).DockerClient
imageName := d.Get("name").(string)
@ -124,6 +91,7 @@ func resourceDockerImageDelete(ctx context.Context, d *schema.ResourceData, meta
return nil
}
// Helpers
func searchLocalImages(ctx context.Context, client *client.Client, data Data, imageName string) *types.ImageSummary {
imageInspect, _, err := client.ImageInspectWithRaw(ctx, imageName)
if err != nil {
@ -360,3 +328,36 @@ func buildDockerImage(ctx context.Context, rawBuild map[string]interface{}, imag
}
return nil
}
func getBuildContext(filePath string, excludes []string) io.Reader {
filePath, _ = homedir.Expand(filePath)
ctx, _ := archive.TarWithOptions(filePath, &archive.TarOptions{
ExcludePatterns: excludes,
})
return ctx
}
func decodeBuildMessages(response types.ImageBuildResponse) (string, error) {
buf := new(bytes.Buffer)
buildErr := error(nil)
dec := json.NewDecoder(response.Body)
for dec.More() {
var m jsonmessage.JSONMessage
err := dec.Decode(&m)
if err != nil {
return buf.String(), fmt.Errorf("Problem decoding message from docker daemon: %s", err)
}
if err := m.Display(buf, false); err != nil {
return "", err
}
if m.Error != nil {
buildErr = fmt.Errorf("Unable to build image")
}
}
log.Printf("[DEBUG] %s", buf.String())
return buf.String(), buildErr
}

View file

@ -138,118 +138,6 @@ func resourceDockerNetwork() *schema.Resource {
}
}
func resourceDockerNetworkV0() *schema.Resource {
return &schema.Resource{
// This is only used for state migration, so the CRUD
// callbacks are no longer relevant
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"labels": {
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
},
"check_duplicate": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"driver": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "bridge",
},
"options": {
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
Computed: true,
},
"internal": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
ForceNew: true,
},
"attachable": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"ingress": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"ipv6": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"ipam_driver": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "default",
},
"ipam_config": {
Type: schema.TypeSet,
Optional: true,
Computed: true,
ForceNew: true,
// DiffSuppressFunc: suppressIfIPAMConfigWithIpv6Changes(),
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"subnet": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"ip_range": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"gateway": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"aux_address": {
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
},
},
},
},
"scope": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func suppressIfIPAMConfigWithIpv6Changes() schema.SchemaDiffSuppressFunc { //nolint:deadcode,unused
return func(k, old, new string, d *schema.ResourceData) bool {
// the initial case when the resource is created

View file

@ -14,6 +14,15 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
const (
networkReadRefreshTimeout = 30 * time.Second
networkReadRefreshWaitBeforeRefreshes = 5 * time.Second
networkReadRefreshDelay = 2 * time.Second
networkRemoveRefreshTimeout = 30 * time.Second
networkRemoveRefreshWaitBeforeRefreshes = 5 * time.Second
networkRemoveRefreshDelay = 2 * time.Second
)
func resourceDockerNetworkCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*ProviderConfig).DockerClient
@ -69,15 +78,15 @@ func resourceDockerNetworkCreate(ctx context.Context, d *schema.ResourceData, me
}
func resourceDockerNetworkRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
log.Printf("[INFO] Waiting for network: '%s' to expose all fields: max '%v seconds'", d.Id(), 30)
log.Printf("[INFO] Waiting for network: '%s' to expose all fields: max '%v seconds'", d.Id(), networkReadRefreshTimeout)
stateConf := &resource.StateChangeConf{
Pending: []string{"pending"},
Target: []string{"all_fields", "removed"},
Refresh: resourceDockerNetworkReadRefreshFunc(ctx, d, meta),
Timeout: 30 * time.Second,
MinTimeout: 5 * time.Second,
Delay: 2 * time.Second,
Timeout: networkReadRefreshTimeout,
MinTimeout: networkReadRefreshWaitBeforeRefreshes,
Delay: networkReadRefreshDelay,
}
// Wait, catching any errors
@ -90,15 +99,15 @@ func resourceDockerNetworkRead(ctx context.Context, d *schema.ResourceData, meta
}
func resourceDockerNetworkDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
log.Printf("[INFO] Waiting for network: '%s' to be removed: max '%v seconds'", d.Id(), 30)
log.Printf("[INFO] Waiting for network: '%s' to be removed: max '%v seconds'", d.Id(), networkRemoveRefreshTimeout)
stateConf := &resource.StateChangeConf{
Pending: []string{"pending"},
Target: []string{"removed"},
Refresh: resourceDockerNetworkRemoveRefreshFunc(ctx, d, meta),
Timeout: 30 * time.Second,
MinTimeout: 5 * time.Second,
Delay: 2 * time.Second,
Timeout: networkRemoveRefreshTimeout,
MinTimeout: networkRemoveRefreshWaitBeforeRefreshes,
Delay: networkRemoveRefreshDelay,
}
// Wait, catching any errors
@ -201,35 +210,3 @@ func resourceDockerNetworkRemoveRefreshFunc(ctx context.Context,
return networkID, "removed", nil
}
}
// TODO mavogel: separate structure file
// TODO 2: seems like we can replace the set hash generation with plain lists -> #219
func flattenIpamConfigSpec(in []network.IPAMConfig) *schema.Set { // []interface{} {
out := make([]interface{}, len(in))
for i, v := range in {
log.Printf("[DEBUG] flatten ipam %d: %#v", i, v)
m := make(map[string]interface{})
if len(v.Subnet) > 0 {
m["subnet"] = v.Subnet
}
if len(v.IPRange) > 0 {
m["ip_range"] = v.IPRange
}
if len(v.Gateway) > 0 {
m["gateway"] = v.Gateway
}
if len(v.AuxAddress) > 0 {
aux := make(map[string]interface{}, len(v.AuxAddress))
for ka, va := range v.AuxAddress {
aux[ka] = va
}
m["aux_address"] = aux
}
out[i] = m
}
// log.Printf("[INFO] flatten ipam out: %#v", out)
imapConfigsResource := resourceDockerNetwork().Schema["ipam_config"].Elem.(*schema.Resource)
f := schema.HashResource(imapConfigsResource)
return schema.NewSet(f, out)
// return out
}

View file

@ -0,0 +1,115 @@
package provider
import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
func resourceDockerNetworkV0() *schema.Resource {
return &schema.Resource{
// This is only used for state migration, so the CRUD
// callbacks are no longer relevant
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"labels": {
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
},
"check_duplicate": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"driver": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "bridge",
},
"options": {
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
Computed: true,
},
"internal": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
ForceNew: true,
},
"attachable": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"ingress": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"ipv6": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"ipam_driver": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "default",
},
"ipam_config": {
Type: schema.TypeSet,
Optional: true,
Computed: true,
ForceNew: true,
// DiffSuppressFunc: suppressIfIPAMConfigWithIpv6Changes(),
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"subnet": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"ip_range": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"gateway": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"aux_address": {
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
},
},
},
},
"scope": {
Type: schema.TypeString,
Computed: true,
},
},
}
}

View file

@ -0,0 +1,39 @@
package provider
import (
"log"
"github.com/docker/docker/api/types/network"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
// TODO 2: seems like we can replace the set hash generation with plain lists -> #219
func flattenIpamConfigSpec(in []network.IPAMConfig) *schema.Set { // []interface{} {
out := make([]interface{}, len(in))
for i, v := range in {
log.Printf("[DEBUG] flatten ipam %d: %#v", i, v)
m := make(map[string]interface{})
if len(v.Subnet) > 0 {
m["subnet"] = v.Subnet
}
if len(v.IPRange) > 0 {
m["ip_range"] = v.IPRange
}
if len(v.Gateway) > 0 {
m["gateway"] = v.Gateway
}
if len(v.AuxAddress) > 0 {
aux := make(map[string]interface{}, len(v.AuxAddress))
for ka, va := range v.AuxAddress {
aux[ka] = va
}
m["aux_address"] = aux
}
out[i] = m
}
// log.Printf("[INFO] flatten ipam out: %#v", out)
imapConfigsResource := resourceDockerNetwork().Schema["ipam_config"].Elem.(*schema.Resource)
f := schema.HashResource(imapConfigsResource)
return schema.NewSet(f, out)
// return out
}

View file

@ -13,6 +13,67 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func resourceDockerPluginCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
ctx := context.Background()
pluginName := d.Get("name").(string)
alias := d.Get("alias").(string)
log.Printf("[DEBUG] Install a Docker plugin " + pluginName)
opts := types.PluginInstallOptions{
RemoteRef: pluginName,
AcceptAllPermissions: d.Get("grant_all_permissions").(bool),
Disabled: !d.Get("enabled").(bool),
// TODO support other settings
Args: getDockerPluginEnv(d.Get("env")),
}
if v, ok := d.GetOk("grant_permissions"); ok {
opts.AcceptPermissionsFunc = getDockerPluginGrantPermissions(v)
}
body, err := client.PluginInstall(ctx, alias, opts)
if err != nil {
return fmt.Errorf("install a Docker plugin "+pluginName+": %w", err)
}
_, _ = ioutil.ReadAll(body)
key := pluginName
if alias != "" {
key = alias
}
plugin, _, err := client.PluginInspectWithRaw(ctx, key)
if err != nil {
return fmt.Errorf("inspect a Docker plugin "+key+": %w", err)
}
setDockerPlugin(d, plugin)
return nil
}
func resourceDockerPluginRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
ctx := context.Background()
pluginID := d.Id()
plugin, _, err := client.PluginInspectWithRaw(ctx, pluginID)
if err != nil {
log.Printf("[DEBUG] Inspect a Docker plugin "+pluginID+": %w", err)
d.SetId("")
return nil
}
setDockerPlugin(d, plugin)
return nil
}
func resourceDockerPluginDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
ctx := context.Background()
pluginID := d.Id()
log.Printf("[DEBUG] Remove a Docker plugin " + pluginID)
if err := client.PluginRemove(ctx, pluginID, types.PluginRemoveOptions{
Force: d.Get("force_destroy").(bool),
}); err != nil {
return fmt.Errorf("remove the Docker plugin "+pluginID+": %w", err)
}
return nil
}
// Helpers
func getDockerPluginEnv(src interface{}) []string {
if src == nil {
return nil
@ -94,39 +155,6 @@ func getDockerPluginGrantPermissions(src interface{}) func(types.PluginPrivilege
}
}
func resourceDockerPluginCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
ctx := context.Background()
pluginName := d.Get("name").(string)
alias := d.Get("alias").(string)
log.Printf("[DEBUG] Install a Docker plugin " + pluginName)
opts := types.PluginInstallOptions{
RemoteRef: pluginName,
AcceptAllPermissions: d.Get("grant_all_permissions").(bool),
Disabled: !d.Get("enabled").(bool),
// TODO support other settings
Args: getDockerPluginEnv(d.Get("env")),
}
if v, ok := d.GetOk("grant_permissions"); ok {
opts.AcceptPermissionsFunc = getDockerPluginGrantPermissions(v)
}
body, err := client.PluginInstall(ctx, alias, opts)
if err != nil {
return fmt.Errorf("install a Docker plugin "+pluginName+": %w", err)
}
_, _ = ioutil.ReadAll(body)
key := pluginName
if alias != "" {
key = alias
}
plugin, _, err := client.PluginInspectWithRaw(ctx, key)
if err != nil {
return fmt.Errorf("inspect a Docker plugin "+key+": %w", err)
}
setDockerPlugin(d, plugin)
return nil
}
func setDockerPlugin(d *schema.ResourceData, plugin *types.Plugin) {
d.SetId(plugin.ID)
d.Set("plugin_reference", plugin.PluginReference)
@ -141,20 +169,6 @@ func setDockerPlugin(d *schema.ResourceData, plugin *types.Plugin) {
d.Set("env", plugin.Settings.Env)
}
func resourceDockerPluginRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
ctx := context.Background()
pluginID := d.Id()
plugin, _, err := client.PluginInspectWithRaw(ctx, pluginID)
if err != nil {
log.Printf("[DEBUG] Inspect a Docker plugin "+pluginID+": %w", err)
d.SetId("")
return nil
}
setDockerPlugin(d, plugin)
return nil
}
func disablePlugin(ctx context.Context, d *schema.ResourceData, cl *client.Client) error {
pluginID := d.Id()
log.Printf("[DEBUG] Disable a Docker plugin " + pluginID)
@ -198,7 +212,7 @@ func pluginUpdate(ctx context.Context, d *schema.ResourceData, meta interface{})
oldEnabled, newEnabled := o.(bool), n.(bool)
if d.HasChange("env") {
if oldEnabled {
// To update the plugin setttings, the plugin must be disabled
// To update the plugin settings, the plugin must be disabled
if err := disablePlugin(ctx, d, cl); err != nil {
return err
}
@ -247,16 +261,3 @@ func resourceDockerPluginUpdate(d *schema.ResourceData, meta interface{}) error
// https://learn.hashicorp.com/tutorials/terraform/provider-update?in=terraform/providers#implement-update
return resourceDockerPluginRead(d, meta)
}
func resourceDockerPluginDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
ctx := context.Background()
pluginID := d.Id()
log.Printf("[DEBUG] Remove a Docker plugin " + pluginID)
if err := client.PluginRemove(ctx, pluginID, types.PluginRemoveOptions{
Force: d.Get("force_destroy").(bool),
}); err != nil {
return fmt.Errorf("remove the Docker plugin "+pluginID+": %w", err)
}
return nil
}

View file

@ -28,6 +28,75 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func resourceDockerRegistryImageCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*ProviderConfig).DockerClient
providerConfig := meta.(*ProviderConfig)
name := d.Get("name").(string)
log.Printf("[DEBUG] Creating docker image %s", name)
pushOpts := createPushImageOptions(name)
if buildOptions, ok := d.GetOk("build"); ok {
buildOptionsMap := buildOptions.([]interface{})[0].(map[string]interface{})
err := buildDockerRegistryImage(ctx, client, buildOptionsMap, pushOpts.FqName)
if err != nil {
return diag.Errorf("Error building docker image: %s", err)
}
}
username, password := getDockerRegistryImageRegistryUserNameAndPassword(pushOpts, providerConfig)
if err := pushDockerRegistryImage(ctx, client, pushOpts, username, password); err != nil {
return diag.Errorf("Error pushing docker image: %s", err)
}
digest, err := getImageDigestWithFallback(pushOpts, username, password)
if err != nil {
return diag.Errorf("Unable to create image, image not found: %s", err)
}
d.SetId(digest)
d.Set("sha256_digest", digest)
return nil
}
func resourceDockerRegistryImageRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
providerConfig := meta.(*ProviderConfig)
name := d.Get("name").(string)
pushOpts := createPushImageOptions(name)
username, password := getDockerRegistryImageRegistryUserNameAndPassword(pushOpts, providerConfig)
digest, err := getImageDigestWithFallback(pushOpts, username, password)
if err != nil {
log.Printf("Got error getting registry image digest: %s", err)
d.SetId("")
return nil
}
d.Set("sha256_digest", digest)
return nil
}
func resourceDockerRegistryImageDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
if d.Get("keep_remotely").(bool) {
return nil
}
providerConfig := meta.(*ProviderConfig)
name := d.Get("name").(string)
pushOpts := createPushImageOptions(name)
username, password := getDockerRegistryImageRegistryUserNameAndPassword(pushOpts, providerConfig)
digest := d.Get("sha256_digest").(string)
err := deleteDockerRegistryImage(pushOpts, digest, username, password, false)
if err != nil {
err = deleteDockerRegistryImage(pushOpts, pushOpts.Tag, username, password, true)
if err != nil {
return diag.Errorf("Got error getting registry image digest: %s", err)
}
}
return nil
}
func resourceDockerRegistryImageUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
return resourceDockerRegistryImageRead(ctx, d, meta)
}
// Helpers
type internalPushImageOptions struct {
Name string
FqName string
@ -459,71 +528,3 @@ func createPushImageOptions(image string) internalPushImageOptions {
}
return pushOpts
}
func resourceDockerRegistryImageCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*ProviderConfig).DockerClient
providerConfig := meta.(*ProviderConfig)
name := d.Get("name").(string)
log.Printf("[DEBUG] Creating docker image %s", name)
pushOpts := createPushImageOptions(name)
if buildOptions, ok := d.GetOk("build"); ok {
buildOptionsMap := buildOptions.([]interface{})[0].(map[string]interface{})
err := buildDockerRegistryImage(ctx, client, buildOptionsMap, pushOpts.FqName)
if err != nil {
return diag.Errorf("Error building docker image: %s", err)
}
}
username, password := getDockerRegistryImageRegistryUserNameAndPassword(pushOpts, providerConfig)
if err := pushDockerRegistryImage(ctx, client, pushOpts, username, password); err != nil {
return diag.Errorf("Error pushing docker image: %s", err)
}
digest, err := getImageDigestWithFallback(pushOpts, username, password)
if err != nil {
return diag.Errorf("Unable to create image, image not found: %s", err)
}
d.SetId(digest)
d.Set("sha256_digest", digest)
return nil
}
func resourceDockerRegistryImageRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
providerConfig := meta.(*ProviderConfig)
name := d.Get("name").(string)
pushOpts := createPushImageOptions(name)
username, password := getDockerRegistryImageRegistryUserNameAndPassword(pushOpts, providerConfig)
digest, err := getImageDigestWithFallback(pushOpts, username, password)
if err != nil {
log.Printf("Got error getting registry image digest: %s", err)
d.SetId("")
return nil
}
d.Set("sha256_digest", digest)
return nil
}
func resourceDockerRegistryImageDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
if d.Get("keep_remotely").(bool) {
return nil
}
providerConfig := meta.(*ProviderConfig)
name := d.Get("name").(string)
pushOpts := createPushImageOptions(name)
username, password := getDockerRegistryImageRegistryUserNameAndPassword(pushOpts, providerConfig)
digest := d.Get("sha256_digest").(string)
err := deleteDockerRegistryImage(pushOpts, digest, username, password, false)
if err != nil {
err = deleteDockerRegistryImage(pushOpts, pushOpts.Tag, username, password, true)
if err != nil {
return diag.Errorf("Got error getting registry image digest: %s", err)
}
}
return nil
}
func resourceDockerRegistryImageUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
return resourceDockerRegistryImageRead(ctx, d, meta)
}

File diff suppressed because it is too large Load diff

View file

@ -6,15 +6,12 @@ import (
"encoding/json"
"fmt"
"log"
"os"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
@ -576,714 +573,6 @@ func terminalState(state swarm.TaskState) bool {
return numberedStates[state] > numberedStates[swarm.TaskStateRunning]
}
//////// Mappers
// createServiceSpec creates the service spec: https://docs.docker.com/engine/api/v1.32/#operation/ServiceCreate
func createServiceSpec(d *schema.ResourceData) (swarm.ServiceSpec, error) {
serviceSpec := swarm.ServiceSpec{
Annotations: swarm.Annotations{
Name: d.Get("name").(string),
},
}
labels, err := createServiceLabels(d)
if err != nil {
return serviceSpec, err
}
serviceSpec.Labels = labels
taskTemplate, err := createServiceTaskSpec(d)
if err != nil {
return serviceSpec, err
}
serviceSpec.TaskTemplate = taskTemplate
mode, err := createServiceMode(d)
if err != nil {
return serviceSpec, err
}
serviceSpec.Mode = mode
updateConfig, err := createServiceUpdateConfig(d)
if err != nil {
return serviceSpec, err
}
serviceSpec.UpdateConfig = updateConfig
rollbackConfig, err := createServiceRollbackConfig(d)
if err != nil {
return serviceSpec, err
}
serviceSpec.RollbackConfig = rollbackConfig
endpointSpec, err := createServiceEndpointSpec(d)
if err != nil {
return serviceSpec, err
}
serviceSpec.EndpointSpec = endpointSpec
return serviceSpec, nil
}
// createServiceLabels creates the labels for the service
func createServiceLabels(d *schema.ResourceData) (map[string]string, error) {
if v, ok := d.GetOk("labels"); ok {
return labelSetToMap(v.(*schema.Set)), nil
}
return nil, nil
}
// == start taskSpec
// createServiceTaskSpec creates the task template for the service
func createServiceTaskSpec(d *schema.ResourceData) (swarm.TaskSpec, error) {
taskSpec := swarm.TaskSpec{}
if v, ok := d.GetOk("task_spec"); ok {
if len(v.([]interface{})) > 0 {
for _, rawTaskSpec := range v.([]interface{}) {
rawTaskSpec := rawTaskSpec.(map[string]interface{})
if rawContainerSpec, ok := rawTaskSpec["container_spec"]; ok {
containerSpec, err := createContainerSpec(rawContainerSpec)
if err != nil {
return taskSpec, err
}
taskSpec.ContainerSpec = containerSpec
}
if rawResourcesSpec, ok := rawTaskSpec["resources"]; ok {
resources, err := createResources(rawResourcesSpec)
if err != nil {
return taskSpec, err
}
taskSpec.Resources = resources
}
if rawRestartPolicySpec, ok := rawTaskSpec["restart_policy"]; ok {
restartPolicy, err := createRestartPolicy(rawRestartPolicySpec)
if err != nil {
return taskSpec, err
}
taskSpec.RestartPolicy = restartPolicy
}
if rawPlacementSpec, ok := rawTaskSpec["placement"]; ok {
placement, err := createPlacement(rawPlacementSpec)
if err != nil {
return taskSpec, err
}
taskSpec.Placement = placement
}
if rawForceUpdate, ok := rawTaskSpec["force_update"]; ok {
taskSpec.ForceUpdate = uint64(rawForceUpdate.(int))
}
if rawRuntimeSpec, ok := rawTaskSpec["runtime"]; ok {
taskSpec.Runtime = swarm.RuntimeType(rawRuntimeSpec.(string))
}
if rawNetworksSpec, ok := rawTaskSpec["networks"]; ok {
networks, err := createServiceNetworks(rawNetworksSpec)
if err != nil {
return taskSpec, err
}
taskSpec.Networks = networks
}
if rawLogDriverSpec, ok := rawTaskSpec["log_driver"]; ok {
logDriver, err := createLogDriver(rawLogDriverSpec)
if err != nil {
return taskSpec, err
}
taskSpec.LogDriver = logDriver
}
}
}
}
return taskSpec, nil
}
// createContainerSpec creates the container spec
func createContainerSpec(v interface{}) (*swarm.ContainerSpec, error) {
containerSpec := swarm.ContainerSpec{}
if len(v.([]interface{})) > 0 {
for _, rawContainerSpec := range v.([]interface{}) {
rawContainerSpec := rawContainerSpec.(map[string]interface{})
if value, ok := rawContainerSpec["image"]; ok {
containerSpec.Image = value.(string)
}
if value, ok := rawContainerSpec["labels"]; ok {
containerSpec.Labels = labelSetToMap(value.(*schema.Set))
}
if value, ok := rawContainerSpec["command"]; ok {
containerSpec.Command = stringListToStringSlice(value.([]interface{}))
}
if value, ok := rawContainerSpec["args"]; ok {
containerSpec.Args = stringListToStringSlice(value.([]interface{}))
}
if value, ok := rawContainerSpec["hostname"]; ok {
containerSpec.Hostname = value.(string)
}
if value, ok := rawContainerSpec["env"]; ok {
containerSpec.Env = mapTypeMapValsToStringSlice(value.(map[string]interface{}))
}
if value, ok := rawContainerSpec["dir"]; ok {
containerSpec.Dir = value.(string)
}
if value, ok := rawContainerSpec["user"]; ok {
containerSpec.User = value.(string)
}
if value, ok := rawContainerSpec["groups"]; ok {
containerSpec.Groups = stringListToStringSlice(value.([]interface{}))
}
if value, ok := rawContainerSpec["privileges"]; ok {
if len(value.([]interface{})) > 0 {
containerSpec.Privileges = &swarm.Privileges{}
for _, rawPrivilegesSpec := range value.([]interface{}) {
rawPrivilegesSpec := rawPrivilegesSpec.(map[string]interface{})
if value, ok := rawPrivilegesSpec["credential_spec"]; ok {
if len(value.([]interface{})) > 0 {
containerSpec.Privileges.CredentialSpec = &swarm.CredentialSpec{}
for _, rawCredentialSpec := range value.([]interface{}) {
rawCredentialSpec := rawCredentialSpec.(map[string]interface{})
if value, ok := rawCredentialSpec["file"]; ok {
containerSpec.Privileges.CredentialSpec.File = value.(string)
}
if value, ok := rawCredentialSpec["registry"]; ok {
containerSpec.Privileges.CredentialSpec.File = value.(string)
}
}
}
}
if value, ok := rawPrivilegesSpec["se_linux_context"]; ok {
if len(value.([]interface{})) > 0 {
containerSpec.Privileges.SELinuxContext = &swarm.SELinuxContext{}
for _, rawSELinuxContext := range value.([]interface{}) {
rawSELinuxContext := rawSELinuxContext.(map[string]interface{})
if value, ok := rawSELinuxContext["disable"]; ok {
containerSpec.Privileges.SELinuxContext.Disable = value.(bool)
}
if value, ok := rawSELinuxContext["user"]; ok {
containerSpec.Privileges.SELinuxContext.User = value.(string)
}
if value, ok := rawSELinuxContext["role"]; ok {
containerSpec.Privileges.SELinuxContext.Role = value.(string)
}
if value, ok := rawSELinuxContext["type"]; ok {
containerSpec.Privileges.SELinuxContext.Type = value.(string)
}
if value, ok := rawSELinuxContext["level"]; ok {
containerSpec.Privileges.SELinuxContext.Level = value.(string)
}
}
}
}
}
}
}
if value, ok := rawContainerSpec["read_only"]; ok {
containerSpec.ReadOnly = value.(bool)
}
if value, ok := rawContainerSpec["mounts"]; ok {
mounts := []mount.Mount{}
for _, rawMount := range value.(*schema.Set).List() {
rawMount := rawMount.(map[string]interface{})
mountType := mount.Type(rawMount["type"].(string))
mountInstance := mount.Mount{
Type: mountType,
Target: rawMount["target"].(string),
Source: rawMount["source"].(string),
}
if value, ok := rawMount["read_only"]; ok {
mountInstance.ReadOnly = value.(bool)
}
if mountType == mount.TypeBind {
if value, ok := rawMount["bind_options"]; ok {
if len(value.([]interface{})) > 0 {
mountInstance.BindOptions = &mount.BindOptions{}
for _, rawBindOptions := range value.([]interface{}) {
rawBindOptions := rawBindOptions.(map[string]interface{})
if value, ok := rawBindOptions["propagation"]; ok {
mountInstance.BindOptions.Propagation = mount.Propagation(value.(string))
}
}
}
}
} else if mountType == mount.TypeVolume {
if value, ok := rawMount["volume_options"]; ok {
if len(value.([]interface{})) > 0 {
mountInstance.VolumeOptions = &mount.VolumeOptions{}
for _, rawVolumeOptions := range value.([]interface{}) {
rawVolumeOptions := rawVolumeOptions.(map[string]interface{})
if value, ok := rawVolumeOptions["no_copy"]; ok {
mountInstance.VolumeOptions.NoCopy = value.(bool)
}
if value, ok := rawVolumeOptions["labels"]; ok {
mountInstance.VolumeOptions.Labels = labelSetToMap(value.(*schema.Set))
}
// because it is not possible to nest maps
if value, ok := rawVolumeOptions["driver_name"]; ok {
if mountInstance.VolumeOptions.DriverConfig == nil {
mountInstance.VolumeOptions.DriverConfig = &mount.Driver{}
}
mountInstance.VolumeOptions.DriverConfig.Name = value.(string)
}
if value, ok := rawVolumeOptions["driver_options"]; ok {
if mountInstance.VolumeOptions.DriverConfig == nil {
mountInstance.VolumeOptions.DriverConfig = &mount.Driver{}
}
mountInstance.VolumeOptions.DriverConfig.Options = mapTypeMapValsToString(value.(map[string]interface{}))
}
}
}
}
} else if mountType == mount.TypeTmpfs {
if value, ok := rawMount["tmpfs_options"]; ok {
if len(value.([]interface{})) > 0 {
mountInstance.TmpfsOptions = &mount.TmpfsOptions{}
for _, rawTmpfsOptions := range value.([]interface{}) {
rawTmpfsOptions := rawTmpfsOptions.(map[string]interface{})
if value, ok := rawTmpfsOptions["size_bytes"]; ok {
mountInstance.TmpfsOptions.SizeBytes = value.(int64)
}
if value, ok := rawTmpfsOptions["mode"]; ok {
mountInstance.TmpfsOptions.Mode = os.FileMode(value.(int))
}
}
}
}
}
mounts = append(mounts, mountInstance)
}
containerSpec.Mounts = mounts
}
if value, ok := rawContainerSpec["stop_signal"]; ok {
containerSpec.StopSignal = value.(string)
}
if value, ok := rawContainerSpec["stop_grace_period"]; ok {
parsed, _ := time.ParseDuration(value.(string))
containerSpec.StopGracePeriod = &parsed
}
if value, ok := rawContainerSpec["healthcheck"]; ok {
containerSpec.Healthcheck = &container.HealthConfig{}
if len(value.([]interface{})) > 0 {
for _, rawHealthCheck := range value.([]interface{}) {
rawHealthCheck := rawHealthCheck.(map[string]interface{})
if testCommand, ok := rawHealthCheck["test"]; ok {
containerSpec.Healthcheck.Test = stringListToStringSlice(testCommand.([]interface{}))
}
if rawInterval, ok := rawHealthCheck["interval"]; ok {
containerSpec.Healthcheck.Interval, _ = time.ParseDuration(rawInterval.(string))
}
if rawTimeout, ok := rawHealthCheck["timeout"]; ok {
containerSpec.Healthcheck.Timeout, _ = time.ParseDuration(rawTimeout.(string))
}
if rawStartPeriod, ok := rawHealthCheck["start_period"]; ok {
containerSpec.Healthcheck.StartPeriod, _ = time.ParseDuration(rawStartPeriod.(string))
}
if rawRetries, ok := rawHealthCheck["retries"]; ok {
containerSpec.Healthcheck.Retries, _ = rawRetries.(int)
}
}
}
}
if value, ok := rawContainerSpec["hosts"]; ok {
containerSpec.Hosts = extraHostsSetToDockerExtraHosts(value.(*schema.Set))
}
if value, ok := rawContainerSpec["dns_config"]; ok {
containerSpec.DNSConfig = &swarm.DNSConfig{}
if len(v.([]interface{})) > 0 {
for _, rawDNSConfig := range value.([]interface{}) {
if rawDNSConfig != nil {
rawDNSConfig := rawDNSConfig.(map[string]interface{})
if nameservers, ok := rawDNSConfig["nameservers"]; ok {
containerSpec.DNSConfig.Nameservers = stringListToStringSlice(nameservers.([]interface{}))
}
if search, ok := rawDNSConfig["search"]; ok {
containerSpec.DNSConfig.Search = stringListToStringSlice(search.([]interface{}))
}
if options, ok := rawDNSConfig["options"]; ok {
containerSpec.DNSConfig.Options = stringListToStringSlice(options.([]interface{}))
}
}
}
}
}
if value, ok := rawContainerSpec["secrets"]; ok {
secrets := []*swarm.SecretReference{}
for _, rawSecret := range value.(*schema.Set).List() {
rawSecret := rawSecret.(map[string]interface{})
rawFilemode := rawSecret["file_mode"].(int)
secret := swarm.SecretReference{
SecretID: rawSecret["secret_id"].(string),
File: &swarm.SecretReferenceFileTarget{
Name: rawSecret["file_name"].(string),
UID: rawSecret["file_uid"].(string),
GID: rawSecret["file_gid"].(string),
Mode: os.FileMode(uint32(rawFilemode)),
},
}
if value, ok := rawSecret["secret_name"]; ok {
secret.SecretName = value.(string)
}
secrets = append(secrets, &secret)
}
containerSpec.Secrets = secrets
}
if value, ok := rawContainerSpec["configs"]; ok {
configs := []*swarm.ConfigReference{}
for _, rawConfig := range value.(*schema.Set).List() {
rawConfig := rawConfig.(map[string]interface{})
rawFilemode := rawConfig["file_mode"].(int)
config := swarm.ConfigReference{
ConfigID: rawConfig["config_id"].(string),
File: &swarm.ConfigReferenceFileTarget{
Name: rawConfig["file_name"].(string),
UID: rawConfig["file_uid"].(string),
GID: rawConfig["file_gid"].(string),
Mode: os.FileMode(uint32(rawFilemode)),
},
}
if value, ok := rawConfig["config_name"]; ok {
config.ConfigName = value.(string)
}
configs = append(configs, &config)
}
containerSpec.Configs = configs
}
if value, ok := rawContainerSpec["isolation"]; ok {
containerSpec.Isolation = container.Isolation(value.(string))
}
}
}
return &containerSpec, nil
}
// createResources creates the resource requirements for the service
func createResources(v interface{}) (*swarm.ResourceRequirements, error) {
resources := swarm.ResourceRequirements{}
if len(v.([]interface{})) > 0 {
for _, rawResourcesSpec := range v.([]interface{}) {
if rawResourcesSpec != nil {
rawResourcesSpec := rawResourcesSpec.(map[string]interface{})
if value, ok := rawResourcesSpec["limits"]; ok {
if len(value.([]interface{})) > 0 {
resources.Limits = &swarm.Limit{}
for _, rawLimitsSpec := range value.([]interface{}) {
rawLimitsSpec := rawLimitsSpec.(map[string]interface{})
if value, ok := rawLimitsSpec["nano_cpus"]; ok {
resources.Limits.NanoCPUs = int64(value.(int))
}
if value, ok := rawLimitsSpec["memory_bytes"]; ok {
resources.Limits.MemoryBytes = int64(value.(int))
}
}
}
}
if value, ok := rawResourcesSpec["reservation"]; ok {
if len(value.([]interface{})) > 0 {
resources.Reservations = &swarm.Resources{}
for _, rawReservationSpec := range value.([]interface{}) {
rawReservationSpec := rawReservationSpec.(map[string]interface{})
if value, ok := rawReservationSpec["nano_cpus"]; ok {
resources.Reservations.NanoCPUs = int64(value.(int))
}
if value, ok := rawReservationSpec["memory_bytes"]; ok {
resources.Reservations.MemoryBytes = int64(value.(int))
}
if value, ok := rawReservationSpec["generic_resources"]; ok {
resources.Reservations.GenericResources, _ = createGenericResources(value)
}
}
}
}
}
}
}
return &resources, nil
}
// createGenericResources creates generic resources for a container
func createGenericResources(value interface{}) ([]swarm.GenericResource, error) {
genericResources := make([]swarm.GenericResource, 0)
if len(value.([]interface{})) > 0 {
for _, rawGenericResource := range value.([]interface{}) {
rawGenericResource := rawGenericResource.(map[string]interface{})
if rawNamedResources, ok := rawGenericResource["named_resources_spec"]; ok {
for _, rawNamedResource := range rawNamedResources.(*schema.Set).List() {
namedGenericResource := &swarm.NamedGenericResource{}
splitted := strings.Split(rawNamedResource.(string), "=")
namedGenericResource.Kind = splitted[0]
namedGenericResource.Value = splitted[1]
genericResource := swarm.GenericResource{}
genericResource.NamedResourceSpec = namedGenericResource
genericResources = append(genericResources, genericResource)
}
}
if rawDiscreteResources, ok := rawGenericResource["discrete_resources_spec"]; ok {
for _, rawDiscreteResource := range rawDiscreteResources.(*schema.Set).List() {
discreteGenericResource := &swarm.DiscreteGenericResource{}
splitted := strings.Split(rawDiscreteResource.(string), "=")
discreteGenericResource.Kind = splitted[0]
discreteGenericResource.Value, _ = strconv.ParseInt(splitted[1], 10, 64)
genericResource := swarm.GenericResource{}
genericResource.DiscreteResourceSpec = discreteGenericResource
genericResources = append(genericResources, genericResource)
}
}
}
}
return genericResources, nil
}
// createRestartPolicy creates the restart poliyc of the service
func createRestartPolicy(v interface{}) (*swarm.RestartPolicy, error) {
restartPolicy := swarm.RestartPolicy{}
rawRestartPolicySingleItem := v.([]interface{})
if len(rawRestartPolicySingleItem) == 0 {
return &restartPolicy, nil
}
// because it's a list with MaxItems=1
rawRestartPolicy := rawRestartPolicySingleItem[0].(map[string]interface{})
if v, ok := rawRestartPolicy["condition"]; ok {
restartPolicy.Condition = swarm.RestartPolicyCondition(v.(string))
}
if v, ok := rawRestartPolicy["delay"]; ok {
parsed, _ := time.ParseDuration(v.(string))
restartPolicy.Delay = &parsed
}
if v, ok := rawRestartPolicy["max_attempts"]; ok {
parsed := uint64(v.(int))
restartPolicy.MaxAttempts = &parsed
}
if v, ok := rawRestartPolicy["window"]; ok {
parsed, _ := time.ParseDuration(v.(string))
restartPolicy.Window = &parsed
}
return &restartPolicy, nil
}
// createPlacement creates the placement strategy for the service
func createPlacement(v interface{}) (*swarm.Placement, error) {
placement := swarm.Placement{}
if len(v.([]interface{})) > 0 {
for _, rawPlacement := range v.([]interface{}) {
if rawPlacement != nil {
rawPlacement := rawPlacement.(map[string]interface{})
if v, ok := rawPlacement["constraints"]; ok {
placement.Constraints = stringSetToStringSlice(v.(*schema.Set))
}
if v, ok := rawPlacement["prefs"]; ok {
placement.Preferences = stringSetToPlacementPrefs(v.(*schema.Set))
}
if v, ok := rawPlacement["platforms"]; ok {
placement.Platforms = mapSetToPlacementPlatforms(v.(*schema.Set))
}
if v, ok := rawPlacement["max_replicas"]; ok {
placement.MaxReplicas = uint64(v.(int))
}
}
}
}
return &placement, nil
}
// createServiceNetworks creates the networks the service will be attachted to
func createServiceNetworks(v interface{}) ([]swarm.NetworkAttachmentConfig, error) {
networks := []swarm.NetworkAttachmentConfig{}
if len(v.(*schema.Set).List()) > 0 {
for _, rawNetwork := range v.(*schema.Set).List() {
network := swarm.NetworkAttachmentConfig{
Target: rawNetwork.(string),
}
networks = append(networks, network)
}
}
return networks, nil
}
// createLogDriver creates the log driver for the service
func createLogDriver(v interface{}) (*swarm.Driver, error) {
logDriver := swarm.Driver{}
if len(v.([]interface{})) > 0 {
for _, rawLogging := range v.([]interface{}) {
rawLogging := rawLogging.(map[string]interface{})
if rawName, ok := rawLogging["name"]; ok {
logDriver.Name = rawName.(string)
}
if rawOptions, ok := rawLogging["options"]; ok {
logDriver.Options = mapTypeMapValsToString(rawOptions.(map[string]interface{}))
}
// TODO SA4004: the surrounding loop is unconditionally terminated (staticcheck)
return &logDriver, nil //nolint:staticcheck
}
}
return nil, nil
}
// == end taskSpec
// createServiceMode creates the mode the service will run in
func createServiceMode(d *schema.ResourceData) (swarm.ServiceMode, error) {
serviceMode := swarm.ServiceMode{}
if v, ok := d.GetOk("mode"); ok {
// because its a list
if len(v.([]interface{})) > 0 {
for _, rawMode := range v.([]interface{}) {
// with a map
rawMode := rawMode.(map[string]interface{})
if rawReplicatedMode, replModeOk := rawMode["replicated"]; replModeOk {
// with a list
if len(rawReplicatedMode.([]interface{})) > 0 {
for _, rawReplicatedModeInt := range rawReplicatedMode.([]interface{}) {
// which is a map
rawReplicatedModeMap := rawReplicatedModeInt.(map[string]interface{})
log.Printf("[INFO] Setting service mode to 'replicated'")
serviceMode.Replicated = &swarm.ReplicatedService{}
if testReplicas, testReplicasOk := rawReplicatedModeMap["replicas"]; testReplicasOk {
log.Printf("[INFO] Setting %v replicas", testReplicas)
replicas := uint64(testReplicas.(int))
serviceMode.Replicated.Replicas = &replicas
}
}
}
}
if rawGlobalMode, globalModeOk := rawMode["global"]; globalModeOk && rawGlobalMode.(bool) {
log.Printf("[INFO] Setting service mode to 'global' is %v", rawGlobalMode)
serviceMode.Global = &swarm.GlobalService{}
}
}
}
}
return serviceMode, nil
}
// createServiceUpdateConfig creates the service update config
func createServiceUpdateConfig(d *schema.ResourceData) (*swarm.UpdateConfig, error) {
if v, ok := d.GetOk("update_config"); ok {
return createUpdateOrRollbackConfig(v.([]interface{}))
}
return nil, nil
}
// createServiceRollbackConfig create the service rollback config
func createServiceRollbackConfig(d *schema.ResourceData) (*swarm.UpdateConfig, error) {
if v, ok := d.GetOk("rollback_config"); ok {
return createUpdateOrRollbackConfig(v.([]interface{}))
}
return nil, nil
}
// == start endpointSpec
// createServiceEndpointSpec creates the spec for the endpoint
func createServiceEndpointSpec(d *schema.ResourceData) (*swarm.EndpointSpec, error) {
endpointSpec := swarm.EndpointSpec{}
if v, ok := d.GetOk("endpoint_spec"); ok {
if len(v.([]interface{})) > 0 {
for _, rawEndpointSpec := range v.([]interface{}) {
if rawEndpointSpec != nil {
rawEndpointSpec := rawEndpointSpec.(map[string]interface{})
if value, ok := rawEndpointSpec["mode"]; ok {
endpointSpec.Mode = swarm.ResolutionMode(value.(string))
}
if value, ok := rawEndpointSpec["ports"]; ok {
endpointSpec.Ports = portSetToServicePorts(value)
}
}
}
}
}
return &endpointSpec, nil
}
// portSetToServicePorts maps a set of ports to portConfig
func portSetToServicePorts(v interface{}) []swarm.PortConfig {
retPortConfigs := []swarm.PortConfig{}
if len(v.([]interface{})) > 0 {
for _, portInt := range v.([]interface{}) {
portConfig := swarm.PortConfig{}
rawPort := portInt.(map[string]interface{})
if value, ok := rawPort["name"]; ok {
portConfig.Name = value.(string)
}
if value, ok := rawPort["protocol"]; ok {
portConfig.Protocol = swarm.PortConfigProtocol(value.(string))
}
if value, ok := rawPort["target_port"]; ok {
portConfig.TargetPort = uint32(value.(int))
}
if externalPort, ok := rawPort["published_port"]; ok {
portConfig.PublishedPort = uint32(externalPort.(int))
}
if value, ok := rawPort["publish_mode"]; ok {
portConfig.PublishMode = swarm.PortConfigPublishMode(value.(string))
}
retPortConfigs = append(retPortConfigs, portConfig)
}
}
return retPortConfigs
}
// == end endpointSpec
// createUpdateOrRollbackConfig create the configuration for and update or rollback
func createUpdateOrRollbackConfig(config []interface{}) (*swarm.UpdateConfig, error) {
updateConfig := swarm.UpdateConfig{}
if len(config) > 0 {
sc := config[0].(map[string]interface{})
if v, ok := sc["parallelism"]; ok {
updateConfig.Parallelism = uint64(v.(int))
}
if v, ok := sc["delay"]; ok {
updateConfig.Delay, _ = time.ParseDuration(v.(string))
}
if v, ok := sc["failure_action"]; ok {
updateConfig.FailureAction = v.(string)
}
if v, ok := sc["monitor"]; ok {
updateConfig.Monitor, _ = time.ParseDuration(v.(string))
}
if v, ok := sc["max_failure_ratio"]; ok {
value, _ := strconv.ParseFloat(v.(string), 64)
updateConfig.MaxFailureRatio = float32(value)
}
if v, ok := sc["order"]; ok {
updateConfig.Order = v.(string)
}
}
return &updateConfig, nil
}
// createConvergeConfig creates the configuration for converging
func createConvergeConfig(config []interface{}) *convergeConfig {
plainConvergeConfig := &convergeConfig{}
if len(config) > 0 {
for _, rawConvergeConfig := range config {
rawConvergeConfig := rawConvergeConfig.(map[string]interface{})
if delay, ok := rawConvergeConfig["delay"]; ok {
plainConvergeConfig.delay, _ = time.ParseDuration(delay.(string))
}
if timeout, ok := rawConvergeConfig["timeout"]; ok {
plainConvergeConfig.timeoutRaw, _ = timeout.(string)
plainConvergeConfig.timeout, _ = time.ParseDuration(timeout.(string))
}
}
}
return plainConvergeConfig
}
// authToServiceAuth maps the auth to AuthConfiguration
func authToServiceAuth(auths []interface{}) types.AuthConfig {
if len(auths) == 0 {
@ -1342,40 +631,6 @@ func retrieveAndMarshalAuth(d *schema.ResourceData, meta interface{}, stageType
return marshalledAuth
}
// stringSetToPlacementPrefs maps a string set to PlacementPreference
func stringSetToPlacementPrefs(stringSet *schema.Set) []swarm.PlacementPreference {
ret := []swarm.PlacementPreference{}
if stringSet == nil {
return ret
}
for _, envVal := range stringSet.List() {
ret = append(ret, swarm.PlacementPreference{
Spread: &swarm.SpreadOver{
SpreadDescriptor: envVal.(string),
},
})
}
return ret
}
// mapSetToPlacementPlatforms maps a string set to Platform
func mapSetToPlacementPlatforms(stringSet *schema.Set) []swarm.Platform {
ret := []swarm.Platform{}
if stringSet == nil {
return ret
}
for _, rawPlatform := range stringSet.List() {
rawPlatform := rawPlatform.(map[string]interface{})
ret = append(ret, swarm.Platform{
Architecture: rawPlatform["architecture"].(string),
OS: rawPlatform["os"].(string),
})
}
return ret
}
//////// States
// numberedStates are ascending sorted states for docker tasks

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -130,6 +130,100 @@ func TestMigrateServiceV1ToV2_with_auth(t *testing.T) {
}
}
func TestMigrateServiceLabelState_empty_labels(t *testing.T) {
v0State := map[string]interface{}{
"name": "volume-name",
"task_spec": []interface{}{
map[string]interface{}{
"container_spec": []interface{}{
map[string]interface{}{
"image": "repo:tag",
"mounts": []interface{}{
map[string]interface{}{
"target": "path/to/target",
"type": "bind",
"volume_options": []interface{}{
map[string]interface{}{},
},
},
},
},
},
},
},
}
// first validate that we build that correctly
v0Config := terraform.NewResourceConfigRaw(v0State)
diags := resourceDockerServiceV0().Validate(v0Config)
if diags.HasError() {
t.Error("test precondition failed - attempt to migrate an invalid v0 config")
return
}
v1State := migrateServiceLabels(v0State)
v1Config := terraform.NewResourceConfigRaw(v1State)
diags = resourceDockerService().Validate(v1Config)
if diags.HasError() {
fmt.Println(diags)
t.Error("migrated service config is invalid")
return
}
}
func TestMigrateServiceLabelState_with_labels(t *testing.T) {
v0State := map[string]interface{}{
"name": "volume-name",
"task_spec": []interface{}{
map[string]interface{}{
"container_spec": []interface{}{
map[string]interface{}{
"image": "repo:tag",
"labels": map[string]interface{}{
"type": "container",
"env": "dev",
},
"mounts": []interface{}{
map[string]interface{}{
"target": "path/to/target",
"type": "bind",
"volume_options": []interface{}{
map[string]interface{}{
"labels": map[string]interface{}{
"type": "mount",
},
},
},
},
},
},
},
},
},
"labels": map[string]interface{}{
"foo": "bar",
"env": "dev",
},
}
// first validate that we build that correctly
v0Config := terraform.NewResourceConfigRaw(v0State)
diags := resourceDockerServiceV0().Validate(v0Config)
if diags.HasError() {
t.Error("test precondition failed - attempt to migrate an invalid v0 config")
return
}
v1State := migrateServiceLabels(v0State)
v1Config := terraform.NewResourceConfigRaw(v1State)
diags = resourceDockerService().Validate(v1Config)
if diags.HasError() {
fmt.Println(diags)
t.Error("migrated service config is invalid")
return
}
}
func TestDockerSecretFromRegistryAuth_basic(t *testing.T) {
authConfigs := make(map[string]types.AuthConfig)
authConfigs["https://repo.my-company.com:8787"] = types.AuthConfig{

View file

@ -13,6 +13,12 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
const (
volumeReadRefreshTimeout = 30 * time.Second
volumeReadRefreshWaitBeforeRefreshes = 5 * time.Second
volumeReadRefreshDelay = 2 * time.Second
)
func resourceDockerVolume() *schema.Resource {
return &schema.Resource{
CreateContext: resourceDockerVolumeCreate,
@ -64,41 +70,6 @@ func resourceDockerVolume() *schema.Resource {
}
}
func resourceDockerVolumeV0() *schema.Resource {
return &schema.Resource{
// This is only used for state migration, so the CRUD
// callbacks are no longer relevant
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"labels": {
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
},
"driver": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"driver_opts": {
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
},
"mountpoint": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceDockerVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*ProviderConfig).DockerClient
@ -150,15 +121,15 @@ func resourceDockerVolumeRead(ctx context.Context, d *schema.ResourceData, meta
}
func resourceDockerVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
log.Printf("[INFO] Waiting for volume: '%s' to get removed: max '%v seconds'", d.Id(), 30)
log.Printf("[INFO] Waiting for volume: '%s' to get removed: max '%v seconds'", d.Id(), volumeReadRefreshTimeout)
stateConf := &resource.StateChangeConf{
Pending: []string{"in_use"},
Target: []string{"removed"},
Refresh: resourceDockerVolumeRemoveRefreshFunc(d.Id(), meta),
Timeout: 30 * time.Second,
MinTimeout: 5 * time.Second,
Delay: 2 * time.Second,
Timeout: volumeReadRefreshTimeout,
MinTimeout: volumeReadRefreshWaitBeforeRefreshes,
Delay: volumeReadRefreshDelay,
}
// Wait, catching any errors

View file

@ -0,0 +1,38 @@
package provider
import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
func resourceDockerVolumeV0() *schema.Resource {
return &schema.Resource{
// This is only used for state migration, so the CRUD
// callbacks are no longer relevant
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"labels": {
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
},
"driver": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"driver_opts": {
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
},
"mountpoint": {
Type: schema.TypeString,
Computed: true,
},
},
}
}

View file

@ -1,585 +0,0 @@
package provider
import (
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/swarm"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func flattenTaskSpec(in swarm.TaskSpec) []interface{} {
m := make(map[string]interface{})
if in.ContainerSpec != nil {
m["container_spec"] = flattenContainerSpec(in.ContainerSpec)
}
if in.Resources != nil {
m["resources"] = flattenTaskResources(in.Resources)
}
if in.RestartPolicy != nil {
m["restart_policy"] = flattenTaskRestartPolicy(in.RestartPolicy)
}
if in.Placement != nil {
m["placement"] = flattenTaskPlacement(in.Placement)
}
m["force_update"] = in.ForceUpdate
if len(in.Runtime) > 0 {
m["runtime"] = in.Runtime
}
if len(in.Networks) > 0 {
m["networks"] = flattenTaskNetworks(in.Networks)
}
if in.LogDriver != nil {
m["log_driver"] = flattenTaskLogDriver(in.LogDriver)
}
return []interface{}{m}
}
func flattenServiceMode(in swarm.ServiceMode) []interface{} {
m := make(map[string]interface{})
if in.Replicated != nil {
m["replicated"] = flattenReplicated(in.Replicated)
}
if in.Global != nil {
m["global"] = true
} else {
m["global"] = false
}
return []interface{}{m}
}
func flattenReplicated(in *swarm.ReplicatedService) []interface{} {
out := make([]interface{}, 0)
m := make(map[string]interface{})
if in != nil {
if in.Replicas != nil {
replicas := int(*in.Replicas)
m["replicas"] = replicas
}
}
out = append(out, m)
return out
}
func flattenServiceUpdateOrRollbackConfig(in *swarm.UpdateConfig) []interface{} {
out := make([]interface{}, 0)
if in == nil {
return out
}
m := make(map[string]interface{})
m["parallelism"] = in.Parallelism
m["delay"] = shortDur(in.Delay)
m["failure_action"] = in.FailureAction
m["monitor"] = shortDur(in.Monitor)
m["max_failure_ratio"] = strconv.FormatFloat(float64(in.MaxFailureRatio), 'f', 1, 64)
m["order"] = in.Order
out = append(out, m)
return out
}
func flattenServiceEndpoint(in swarm.Endpoint) []interface{} {
out := make([]interface{}, 0)
m := make(map[string]interface{})
m["mode"] = string(in.Spec.Mode)
m["ports"] = flattenServicePorts(in.Ports)
out = append(out, m)
return out
}
func flattenServiceEndpointSpec(in *swarm.EndpointSpec) []interface{} {
out := make([]interface{}, 0)
m := make(map[string]interface{})
m["mode"] = string(in.Mode)
m["ports"] = flattenServicePorts(in.Ports)
out = append(out, m)
return out
}
///// start TaskSpec
func flattenContainerSpec(in *swarm.ContainerSpec) []interface{} {
out := make([]interface{}, 0)
m := make(map[string]interface{})
if len(in.Image) > 0 {
m["image"] = in.Image
}
if len(in.Labels) > 0 {
m["labels"] = mapToLabelSet(in.Labels)
}
if len(in.Command) > 0 {
m["command"] = in.Command
}
if len(in.Args) > 0 {
m["args"] = in.Args
}
if len(in.Hostname) > 0 {
m["hostname"] = in.Hostname
}
if len(in.Env) > 0 {
m["env"] = mapStringSliceToMap(in.Env)
}
if len(in.User) > 0 {
m["user"] = in.User
}
if len(in.Dir) > 0 {
m["dir"] = in.Dir
}
if len(in.Groups) > 0 {
m["groups"] = in.Groups
}
if in.Privileges != nil {
m["privileges"] = flattenPrivileges(in.Privileges)
}
if in.ReadOnly {
m["read_only"] = in.ReadOnly
}
if len(in.Mounts) > 0 {
m["mounts"] = flattenServiceMounts(in.Mounts)
}
if len(in.StopSignal) > 0 {
m["stop_signal"] = in.StopSignal
}
if in.StopGracePeriod != nil {
m["stop_grace_period"] = shortDur(*in.StopGracePeriod)
}
if in.Healthcheck != nil {
m["healthcheck"] = flattenServiceHealthcheck(in.Healthcheck)
}
if len(in.Hosts) > 0 {
m["hosts"] = flattenServiceHosts(in.Hosts)
}
if in.DNSConfig != nil {
m["dns_config"] = flattenServiceDNSConfig(in.DNSConfig)
}
if len(in.Secrets) > 0 {
m["secrets"] = flattenServiceSecrets(in.Secrets)
}
if len(in.Configs) > 0 {
m["configs"] = flattenServiceConfigs(in.Configs)
}
if len(in.Isolation) > 0 {
m["isolation"] = string(in.Isolation)
}
out = append(out, m)
return out
}
func flattenPrivileges(in *swarm.Privileges) []interface{} {
if in == nil {
return make([]interface{}, 0)
}
out := make([]interface{}, 1)
m := make(map[string]interface{})
if in.CredentialSpec != nil {
credSpec := make([]interface{}, 1)
internal := make(map[string]interface{})
internal["file"] = in.CredentialSpec.File
internal["registry"] = in.CredentialSpec.Registry
credSpec[0] = internal
m["credential_spec"] = credSpec
}
if in.SELinuxContext != nil {
seLinuxContext := make([]interface{}, 1)
internal := make(map[string]interface{})
internal["disable"] = in.SELinuxContext.Disable
internal["user"] = in.SELinuxContext.User
internal["role"] = in.SELinuxContext.Role
internal["type"] = in.SELinuxContext.Type
internal["level"] = in.SELinuxContext.Level
seLinuxContext[0] = internal
m["se_linux_context"] = seLinuxContext
}
out[0] = m
return out
}
func flattenServiceMounts(in []mount.Mount) *schema.Set {
out := make([]interface{}, len(in))
for i, v := range in {
m := make(map[string]interface{})
m["target"] = v.Target
m["source"] = v.Source
m["type"] = string(v.Type)
m["read_only"] = v.ReadOnly
if v.BindOptions != nil {
bindOptions := make([]interface{}, 0)
bindOptionsItem := make(map[string]interface{})
if len(v.BindOptions.Propagation) > 0 {
bindOptionsItem["propagation"] = string(v.BindOptions.Propagation)
}
bindOptions = append(bindOptions, bindOptionsItem)
m["bind_options"] = bindOptions
}
if v.VolumeOptions != nil {
volumeOptions := make([]interface{}, 0)
volumeOptionsItem := make(map[string]interface{})
volumeOptionsItem["no_copy"] = v.VolumeOptions.NoCopy
volumeOptionsItem["labels"] = mapToLabelSet(v.VolumeOptions.Labels)
if v.VolumeOptions.DriverConfig != nil {
if len(v.VolumeOptions.DriverConfig.Name) > 0 {
volumeOptionsItem["driver_name"] = v.VolumeOptions.DriverConfig.Name
}
volumeOptionsItem["driver_options"] = mapStringStringToMapStringInterface(v.VolumeOptions.DriverConfig.Options)
}
volumeOptions = append(volumeOptions, volumeOptionsItem)
m["volume_options"] = volumeOptions
}
if v.TmpfsOptions != nil {
tmpfsOptions := make([]interface{}, 0)
tmpfsOptionsItem := make(map[string]interface{})
tmpfsOptionsItem["size_bytes"] = int(v.TmpfsOptions.SizeBytes)
tmpfsOptionsItem["mode"] = v.TmpfsOptions.Mode.Perm
tmpfsOptions = append(tmpfsOptions, tmpfsOptionsItem)
m["tmpfs_options"] = tmpfsOptions
}
out[i] = m
}
taskSpecResource := resourceDockerService().Schema["task_spec"].Elem.(*schema.Resource)
containerSpecResource := taskSpecResource.Schema["container_spec"].Elem.(*schema.Resource)
mountsResource := containerSpecResource.Schema["mounts"].Elem.(*schema.Resource)
f := schema.HashResource(mountsResource)
return schema.NewSet(f, out)
}
func flattenServiceHealthcheck(in *container.HealthConfig) []interface{} {
if in == nil {
return make([]interface{}, 0)
}
out := make([]interface{}, 1)
m := make(map[string]interface{})
if len(in.Test) > 0 {
m["test"] = in.Test
}
m["interval"] = shortDur(in.Interval)
m["timeout"] = shortDur(in.Timeout)
m["start_period"] = shortDur(in.StartPeriod)
m["retries"] = in.Retries
out[0] = m
return out
}
func flattenServiceHosts(in []string) *schema.Set {
out := make([]interface{}, len(in))
for i, v := range in {
m := make(map[string]interface{})
split := strings.Split(v, ":")
m["host"] = split[0]
m["ip"] = split[1]
out[i] = m
}
taskSpecResource := resourceDockerService().Schema["task_spec"].Elem.(*schema.Resource)
containerSpecResource := taskSpecResource.Schema["container_spec"].Elem.(*schema.Resource)
hostsResource := containerSpecResource.Schema["hosts"].Elem.(*schema.Resource)
f := schema.HashResource(hostsResource)
return schema.NewSet(f, out)
}
func flattenServiceDNSConfig(in *swarm.DNSConfig) []interface{} {
if in == nil {
return make([]interface{}, 0)
}
out := make([]interface{}, 1)
m := make(map[string]interface{})
if len(in.Nameservers) > 0 {
m["nameservers"] = in.Nameservers
}
if len(in.Search) > 0 {
m["search"] = in.Search
}
if len(in.Options) > 0 {
m["options"] = in.Options
}
out[0] = m
return out
}
func flattenServiceSecrets(in []*swarm.SecretReference) *schema.Set {
out := make([]interface{}, len(in))
for i, v := range in {
m := make(map[string]interface{})
m["secret_id"] = v.SecretID
if len(v.SecretName) > 0 {
m["secret_name"] = v.SecretName
}
if v.File != nil {
m["file_name"] = v.File.Name
if len(v.File.UID) > 0 {
m["file_uid"] = v.File.UID
}
if len(v.File.GID) > 0 {
m["file_gid"] = v.File.GID
}
m["file_mode"] = int(v.File.Mode)
}
out[i] = m
}
taskSpecResource := resourceDockerService().Schema["task_spec"].Elem.(*schema.Resource)
containerSpecResource := taskSpecResource.Schema["container_spec"].Elem.(*schema.Resource)
secretsResource := containerSpecResource.Schema["secrets"].Elem.(*schema.Resource)
f := schema.HashResource(secretsResource)
return schema.NewSet(f, out)
}
func flattenServiceConfigs(in []*swarm.ConfigReference) *schema.Set {
out := make([]interface{}, len(in))
for i, v := range in {
m := make(map[string]interface{})
m["config_id"] = v.ConfigID
if len(v.ConfigName) > 0 {
m["config_name"] = v.ConfigName
}
if v.File != nil {
m["file_name"] = v.File.Name
if len(v.File.UID) > 0 {
m["file_uid"] = v.File.UID
}
if len(v.File.GID) > 0 {
m["file_gid"] = v.File.GID
}
m["file_mode"] = int(v.File.Mode)
}
out[i] = m
}
taskSpecResource := resourceDockerService().Schema["task_spec"].Elem.(*schema.Resource)
containerSpecResource := taskSpecResource.Schema["container_spec"].Elem.(*schema.Resource)
configsResource := containerSpecResource.Schema["configs"].Elem.(*schema.Resource)
f := schema.HashResource(configsResource)
return schema.NewSet(f, out)
}
func flattenTaskResources(in *swarm.ResourceRequirements) []interface{} {
out := make([]interface{}, 0)
if in != nil {
m := make(map[string]interface{})
m["limits"] = flattenResourceLimits(in.Limits)
// TODO mvogel: name reservations
m["reservation"] = flattenResourceReservations(in.Reservations)
out = append(out, m)
}
return out
}
func flattenResourceLimits(in *swarm.Limit) []interface{} {
out := make([]interface{}, 0)
if in != nil {
m := make(map[string]interface{})
m["nano_cpus"] = in.NanoCPUs
m["memory_bytes"] = in.MemoryBytes
// TODO mavogel add pids
// m["pids"] = in.Pids
out = append(out, m)
}
return out
}
func flattenResourceReservations(in *swarm.Resources) []interface{} {
out := make([]interface{}, 0)
if in != nil {
m := make(map[string]interface{})
m["nano_cpus"] = in.NanoCPUs
m["memory_bytes"] = in.MemoryBytes
m["generic_resources"] = flattenResourceGenericResource(in.GenericResources)
out = append(out, m)
}
return out
}
func flattenResourceGenericResource(in []swarm.GenericResource) []interface{} {
out := make([]interface{}, 0)
if len(in) > 0 {
m := make(map[string]interface{})
named := make([]string, 0)
discrete := make([]string, 0)
for _, genericResource := range in {
if genericResource.NamedResourceSpec != nil {
named = append(named, genericResource.NamedResourceSpec.Kind+"="+genericResource.NamedResourceSpec.Value)
}
if genericResource.DiscreteResourceSpec != nil {
discrete = append(discrete, genericResource.DiscreteResourceSpec.Kind+"="+strconv.Itoa(int(genericResource.DiscreteResourceSpec.Value)))
}
}
m["named_resources_spec"] = newStringSet(schema.HashString, named)
m["discrete_resources_spec"] = newStringSet(schema.HashString, discrete)
out = append(out, m)
}
return out
}
func flattenTaskRestartPolicy(in *swarm.RestartPolicy) []interface{} {
if in == nil {
return make([]interface{}, 0)
}
out := make([]interface{}, 1)
m := make(map[string]interface{})
if len(in.Condition) > 0 {
m["condition"] = string(in.Condition)
}
if in.Delay != nil {
m["delay"] = shortDur(*in.Delay)
}
if in.MaxAttempts != nil {
mapped := *in.MaxAttempts
m["max_attempts"] = int(mapped)
}
if in.Window != nil {
m["window"] = shortDur(*in.Window)
}
out[0] = m
return out
}
func flattenTaskPlacement(in *swarm.Placement) []interface{} {
if in == nil {
return make([]interface{}, 0)
}
out := make([]interface{}, 1)
m := make(map[string]interface{})
if len(in.Constraints) > 0 {
m["constraints"] = newStringSet(schema.HashString, in.Constraints)
}
if len(in.Preferences) > 0 {
m["prefs"] = flattenPlacementPrefs(in.Preferences)
}
if len(in.Platforms) > 0 {
m["platforms"] = flattenPlacementPlatforms(in.Platforms)
}
m["max_replicas"] = in.MaxReplicas
out[0] = m
return out
}
func flattenPlacementPrefs(in []swarm.PlacementPreference) *schema.Set {
if len(in) == 0 {
return schema.NewSet(schema.HashString, make([]interface{}, 0))
}
out := make([]interface{}, len(in))
for i, v := range in {
out[i] = v.Spread.SpreadDescriptor
}
return schema.NewSet(schema.HashString, out)
}
func flattenPlacementPlatforms(in []swarm.Platform) *schema.Set {
out := make([]interface{}, len(in))
for i, v := range in {
m := make(map[string]interface{})
m["architecture"] = v.Architecture
m["os"] = v.OS
out[i] = m
}
taskSpecResource := resourceDockerService().Schema["task_spec"].Elem.(*schema.Resource)
placementResource := taskSpecResource.Schema["placement"].Elem.(*schema.Resource)
f := schema.HashResource(placementResource)
return schema.NewSet(f, out)
}
func flattenTaskNetworks(in []swarm.NetworkAttachmentConfig) *schema.Set {
out := make([]interface{}, len(in))
for i, v := range in {
out[i] = v.Target
}
return schema.NewSet(schema.HashString, out)
}
func flattenTaskLogDriver(in *swarm.Driver) []interface{} {
if in == nil {
return make([]interface{}, 0)
}
out := make([]interface{}, 1)
m := make(map[string]interface{})
m["name"] = in.Name
if len(in.Options) > 0 {
m["options"] = in.Options
}
out[0] = m
return out
}
///// end TaskSpec
///// start EndpointSpec
func flattenServicePorts(in []swarm.PortConfig) []interface{} {
out := make([]interface{}, len(in))
for i, v := range in {
m := make(map[string]interface{})
m["name"] = v.Name
m["protocol"] = string(v.Protocol)
m["target_port"] = int(v.TargetPort)
m["published_port"] = int(v.PublishedPort)
m["publish_mode"] = string(v.PublishMode)
out[i] = m
}
return out
}
///// end EndpointSpec
// HELPERS
func shortDur(d time.Duration) string {
s := d.String()
if strings.HasSuffix(s, "m0s") {
s = s[:len(s)-2]
}
if strings.HasSuffix(s, "h0m") {
s = s[:len(s)-2]
}
return s
}
func newStringSet(f schema.SchemaSetFunc, in []string) *schema.Set {
out := make([]interface{}, len(in))
for i, v := range in {
out[i] = v
}
return schema.NewSet(f, out)
}
// mapStringSliceToMap maps a slice with '=' delimiter to as map: e.g.
// - 'foo=bar' -> foo = "bar"
// - 'foo=bar?p=baz' -> foo = "bar?p=baz"
func mapStringSliceToMap(in []string) map[string]string {
mapped := make(map[string]string, len(in))
for _, v := range in {
if len(v) > 0 {
splitted := strings.Split(v, "=")
key := splitted[0]
value := strings.Join(splitted[1:], "=")
mapped[key] = value
}
}
return mapped
}
// mapStringStringToMapStringInterface maps a string string map to a string interface map
func mapStringStringToMapStringInterface(in map[string]string) map[string]interface{} {
if len(in) == 0 {
return make(map[string]interface{})
}
mapped := make(map[string]interface{}, len(in))
for k, v := range in {
mapped[k] = v
}
return mapped
}