mirror of
https://github.com/kreuzwerker/terraform-provider-docker.git
synced 2025-12-20 22:59:42 -05:00
Adds docker swarm features to the provider for the Docker Engine 17.09.1 and API Version 1.32. The spec is close to the API. By default, the swarm services are fire and forget. A converging config implements the features of the docker cli to ensure a service and all its replicas are up and running. Furthermore, service can have configs, secrets, networks, mounts and be added to a network.
535 lines
14 KiB
Go
535 lines
14 KiB
Go
package docker
|
|
|
|
import (
|
|
"archive/tar"
|
|
"bytes"
|
|
"errors"
|
|
"fmt"
|
|
"strconv"
|
|
"time"
|
|
|
|
dc "github.com/fsouza/go-dockerclient"
|
|
"github.com/hashicorp/terraform/helper/schema"
|
|
)
|
|
|
|
var (
|
|
creationTime time.Time
|
|
)
|
|
|
|
func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) error {
|
|
var err error
|
|
client := meta.(*ProviderConfig).DockerClient
|
|
|
|
var data Data
|
|
if err := fetchLocalImages(&data, client); err != nil {
|
|
return err
|
|
}
|
|
|
|
image := d.Get("image").(string)
|
|
if _, ok := data.DockerImages[image]; !ok {
|
|
if _, ok := data.DockerImages[image+":latest"]; !ok {
|
|
return fmt.Errorf("Unable to find image %s", image)
|
|
}
|
|
image = image + ":latest"
|
|
}
|
|
|
|
// The awesome, wonderful, splendiferous, sensical
|
|
// Docker API now lets you specify a HostConfig in
|
|
// CreateContainerOptions, but in my testing it still only
|
|
// actually applies HostConfig options set in StartContainer.
|
|
// How cool is that?
|
|
createOpts := dc.CreateContainerOptions{
|
|
Name: d.Get("name").(string),
|
|
Config: &dc.Config{
|
|
Image: image,
|
|
Hostname: d.Get("hostname").(string),
|
|
Domainname: d.Get("domainname").(string),
|
|
},
|
|
}
|
|
|
|
if v, ok := d.GetOk("env"); ok {
|
|
createOpts.Config.Env = stringSetToStringSlice(v.(*schema.Set))
|
|
}
|
|
|
|
if v, ok := d.GetOk("command"); ok {
|
|
createOpts.Config.Cmd = stringListToStringSlice(v.([]interface{}))
|
|
for _, v := range createOpts.Config.Cmd {
|
|
if v == "" {
|
|
return fmt.Errorf("values for command may not be empty")
|
|
}
|
|
}
|
|
}
|
|
|
|
if v, ok := d.GetOk("entrypoint"); ok {
|
|
createOpts.Config.Entrypoint = stringListToStringSlice(v.([]interface{}))
|
|
}
|
|
|
|
if v, ok := d.GetOk("user"); ok {
|
|
createOpts.Config.User = v.(string)
|
|
}
|
|
|
|
exposedPorts := map[dc.Port]struct{}{}
|
|
portBindings := map[dc.Port][]dc.PortBinding{}
|
|
|
|
if v, ok := d.GetOk("ports"); ok {
|
|
exposedPorts, portBindings = portSetToDockerPorts(v.(*schema.Set))
|
|
}
|
|
if len(exposedPorts) != 0 {
|
|
createOpts.Config.ExposedPorts = exposedPorts
|
|
}
|
|
|
|
extraHosts := []string{}
|
|
if v, ok := d.GetOk("host"); ok {
|
|
extraHosts = extraHostsSetToDockerExtraHosts(v.(*schema.Set))
|
|
}
|
|
|
|
extraUlimits := []dc.ULimit{}
|
|
if v, ok := d.GetOk("ulimit"); ok {
|
|
extraUlimits = ulimitsToDockerUlimits(v.(*schema.Set))
|
|
}
|
|
volumes := map[string]struct{}{}
|
|
binds := []string{}
|
|
volumesFrom := []string{}
|
|
|
|
if v, ok := d.GetOk("volumes"); ok {
|
|
volumes, binds, volumesFrom, err = volumeSetToDockerVolumes(v.(*schema.Set))
|
|
if err != nil {
|
|
return fmt.Errorf("Unable to parse volumes: %s", err)
|
|
}
|
|
}
|
|
if len(volumes) != 0 {
|
|
createOpts.Config.Volumes = volumes
|
|
}
|
|
|
|
if v, ok := d.GetOk("labels"); ok {
|
|
createOpts.Config.Labels = mapTypeMapValsToString(v.(map[string]interface{}))
|
|
}
|
|
|
|
hostConfig := &dc.HostConfig{
|
|
Privileged: d.Get("privileged").(bool),
|
|
PublishAllPorts: d.Get("publish_all_ports").(bool),
|
|
RestartPolicy: dc.RestartPolicy{
|
|
Name: d.Get("restart").(string),
|
|
MaximumRetryCount: d.Get("max_retry_count").(int),
|
|
},
|
|
LogConfig: dc.LogConfig{
|
|
Type: d.Get("log_driver").(string),
|
|
},
|
|
}
|
|
|
|
if len(portBindings) != 0 {
|
|
hostConfig.PortBindings = portBindings
|
|
}
|
|
if len(extraHosts) != 0 {
|
|
hostConfig.ExtraHosts = extraHosts
|
|
}
|
|
if len(binds) != 0 {
|
|
hostConfig.Binds = binds
|
|
}
|
|
if len(volumesFrom) != 0 {
|
|
hostConfig.VolumesFrom = volumesFrom
|
|
}
|
|
if len(extraUlimits) != 0 {
|
|
hostConfig.Ulimits = extraUlimits
|
|
}
|
|
|
|
if v, ok := d.GetOk("capabilities"); ok {
|
|
for _, capInt := range v.(*schema.Set).List() {
|
|
capa := capInt.(map[string]interface{})
|
|
hostConfig.CapAdd = stringSetToStringSlice(capa["add"].(*schema.Set))
|
|
hostConfig.CapDrop = stringSetToStringSlice(capa["drop"].(*schema.Set))
|
|
break
|
|
}
|
|
}
|
|
|
|
if v, ok := d.GetOk("devices"); ok {
|
|
hostConfig.Devices = deviceSetToDockerDevices(v.(*schema.Set))
|
|
}
|
|
|
|
if v, ok := d.GetOk("dns"); ok {
|
|
hostConfig.DNS = stringSetToStringSlice(v.(*schema.Set))
|
|
}
|
|
|
|
if v, ok := d.GetOk("dns_opts"); ok {
|
|
hostConfig.DNSOptions = stringSetToStringSlice(v.(*schema.Set))
|
|
}
|
|
|
|
if v, ok := d.GetOk("dns_search"); ok {
|
|
hostConfig.DNSSearch = stringSetToStringSlice(v.(*schema.Set))
|
|
}
|
|
|
|
if v, ok := d.GetOk("links"); ok {
|
|
hostConfig.Links = stringSetToStringSlice(v.(*schema.Set))
|
|
}
|
|
|
|
if v, ok := d.GetOk("memory"); ok {
|
|
hostConfig.Memory = int64(v.(int)) * 1024 * 1024
|
|
}
|
|
|
|
if v, ok := d.GetOk("memory_swap"); ok {
|
|
swap := int64(v.(int))
|
|
if swap > 0 {
|
|
swap = swap * 1024 * 1024
|
|
}
|
|
hostConfig.MemorySwap = swap
|
|
}
|
|
|
|
if v, ok := d.GetOk("cpu_shares"); ok {
|
|
hostConfig.CPUShares = int64(v.(int))
|
|
}
|
|
|
|
if v, ok := d.GetOk("log_opts"); ok {
|
|
hostConfig.LogConfig.Config = mapTypeMapValsToString(v.(map[string]interface{}))
|
|
}
|
|
|
|
if v, ok := d.GetOk("network_mode"); ok {
|
|
hostConfig.NetworkMode = v.(string)
|
|
}
|
|
|
|
createOpts.HostConfig = hostConfig
|
|
|
|
var retContainer *dc.Container
|
|
if retContainer, err = client.CreateContainer(createOpts); err != nil {
|
|
return fmt.Errorf("Unable to create container: %s", err)
|
|
}
|
|
if retContainer == nil {
|
|
return fmt.Errorf("Returned container is nil")
|
|
}
|
|
|
|
d.SetId(retContainer.ID)
|
|
|
|
if v, ok := d.GetOk("networks"); ok {
|
|
var connectionOpts dc.NetworkConnectionOptions
|
|
if v, ok := d.GetOk("network_alias"); ok {
|
|
endpointConfig := &dc.EndpointConfig{}
|
|
endpointConfig.Aliases = stringSetToStringSlice(v.(*schema.Set))
|
|
connectionOpts = dc.NetworkConnectionOptions{Container: retContainer.ID, EndpointConfig: endpointConfig}
|
|
} else {
|
|
connectionOpts = dc.NetworkConnectionOptions{Container: retContainer.ID}
|
|
}
|
|
|
|
for _, rawNetwork := range v.(*schema.Set).List() {
|
|
network := rawNetwork.(string)
|
|
if err := client.ConnectNetwork(network, connectionOpts); err != nil {
|
|
return fmt.Errorf("Unable to connect to network '%s': %s", network, err)
|
|
}
|
|
}
|
|
}
|
|
|
|
if v, ok := d.GetOk("upload"); ok {
|
|
|
|
var mode int64
|
|
for _, upload := range v.(*schema.Set).List() {
|
|
content := upload.(map[string]interface{})["content"].(string)
|
|
file := upload.(map[string]interface{})["file"].(string)
|
|
executable := upload.(map[string]interface{})["executable"].(bool)
|
|
|
|
buf := new(bytes.Buffer)
|
|
tw := tar.NewWriter(buf)
|
|
if executable {
|
|
mode = 0744
|
|
} else {
|
|
mode = 0644
|
|
}
|
|
hdr := &tar.Header{
|
|
Name: file,
|
|
Mode: mode,
|
|
Size: int64(len(content)),
|
|
}
|
|
if err := tw.WriteHeader(hdr); err != nil {
|
|
return fmt.Errorf("Error creating tar archive: %s", err)
|
|
}
|
|
if _, err := tw.Write([]byte(content)); err != nil {
|
|
return fmt.Errorf("Error creating tar archive: %s", err)
|
|
}
|
|
if err := tw.Close(); err != nil {
|
|
return fmt.Errorf("Error creating tar archive: %s", err)
|
|
}
|
|
|
|
uploadOpts := dc.UploadToContainerOptions{
|
|
InputStream: bytes.NewReader(buf.Bytes()),
|
|
Path: "/",
|
|
}
|
|
|
|
if err := client.UploadToContainer(retContainer.ID, uploadOpts); err != nil {
|
|
return fmt.Errorf("Unable to upload volume content: %s", err)
|
|
}
|
|
}
|
|
}
|
|
|
|
creationTime = time.Now()
|
|
if err := client.StartContainer(retContainer.ID, nil); err != nil {
|
|
return fmt.Errorf("Unable to start container: %s", err)
|
|
}
|
|
|
|
return resourceDockerContainerRead(d, meta)
|
|
}
|
|
|
|
func resourceDockerContainerRead(d *schema.ResourceData, meta interface{}) error {
|
|
client := meta.(*ProviderConfig).DockerClient
|
|
|
|
apiContainer, err := fetchDockerContainer(d.Id(), client)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if apiContainer == nil {
|
|
// This container doesn't exist anymore
|
|
d.SetId("")
|
|
return nil
|
|
}
|
|
|
|
var container *dc.Container
|
|
|
|
// TODO fix this with statefunc
|
|
loops := 1 // if it hasn't just been created, don't delay
|
|
if !creationTime.IsZero() {
|
|
loops = 30 // with 500ms spacing, 15 seconds; ought to be plenty
|
|
}
|
|
sleepTime := 500 * time.Millisecond
|
|
|
|
for i := loops; i > 0; i-- {
|
|
container, err = client.InspectContainer(apiContainer.ID)
|
|
if err != nil {
|
|
return fmt.Errorf("Error inspecting container %s: %s", apiContainer.ID, err)
|
|
}
|
|
|
|
if container.State.Running ||
|
|
!container.State.Running && !d.Get("must_run").(bool) {
|
|
break
|
|
}
|
|
|
|
if creationTime.IsZero() { // We didn't just create it, so don't wait around
|
|
return resourceDockerContainerDelete(d, meta)
|
|
}
|
|
|
|
if container.State.FinishedAt.After(creationTime) {
|
|
// It exited immediately, so error out so dependent containers
|
|
// aren't started
|
|
resourceDockerContainerDelete(d, meta)
|
|
return fmt.Errorf("Container %s exited after creation, error was: %s", apiContainer.ID, container.State.Error)
|
|
}
|
|
|
|
time.Sleep(sleepTime)
|
|
}
|
|
|
|
// Handle the case of the for loop above running its course
|
|
if !container.State.Running && d.Get("must_run").(bool) {
|
|
resourceDockerContainerDelete(d, meta)
|
|
return fmt.Errorf("Container %s failed to be in running state", apiContainer.ID)
|
|
}
|
|
|
|
// Read Network Settings
|
|
if container.NetworkSettings != nil {
|
|
d.Set("ip_address", container.NetworkSettings.IPAddress)
|
|
d.Set("ip_prefix_length", container.NetworkSettings.IPPrefixLen)
|
|
d.Set("gateway", container.NetworkSettings.Gateway)
|
|
d.Set("bridge", container.NetworkSettings.Bridge)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func resourceDockerContainerUpdate(d *schema.ResourceData, meta interface{}) error {
|
|
return nil
|
|
}
|
|
|
|
func resourceDockerContainerDelete(d *schema.ResourceData, meta interface{}) error {
|
|
client := meta.(*ProviderConfig).DockerClient
|
|
|
|
// Stop the container before removing if destroy_grace_seconds is defined
|
|
if d.Get("destroy_grace_seconds").(int) > 0 {
|
|
var timeout = uint(d.Get("destroy_grace_seconds").(int))
|
|
if err := client.StopContainer(d.Id(), timeout); err != nil {
|
|
return fmt.Errorf("Error stopping container %s: %s", d.Id(), err)
|
|
}
|
|
}
|
|
|
|
removeOpts := dc.RemoveContainerOptions{
|
|
ID: d.Id(),
|
|
RemoveVolumes: true,
|
|
Force: true,
|
|
}
|
|
|
|
if err := client.RemoveContainer(removeOpts); err != nil {
|
|
return fmt.Errorf("Error deleting container %s: %s", d.Id(), err)
|
|
}
|
|
|
|
d.SetId("")
|
|
return nil
|
|
}
|
|
|
|
func stringListToStringSlice(stringList []interface{}) []string {
|
|
ret := []string{}
|
|
for _, v := range stringList {
|
|
if v == nil {
|
|
ret = append(ret, "")
|
|
continue
|
|
}
|
|
ret = append(ret, v.(string))
|
|
}
|
|
return ret
|
|
}
|
|
|
|
func stringSetToStringSlice(stringSet *schema.Set) []string {
|
|
ret := []string{}
|
|
if stringSet == nil {
|
|
return ret
|
|
}
|
|
for _, envVal := range stringSet.List() {
|
|
ret = append(ret, envVal.(string))
|
|
}
|
|
return ret
|
|
}
|
|
|
|
func mapTypeMapValsToString(typeMap map[string]interface{}) map[string]string {
|
|
mapped := make(map[string]string, len(typeMap))
|
|
for k, v := range typeMap {
|
|
mapped[k] = v.(string)
|
|
}
|
|
return mapped
|
|
}
|
|
|
|
// mapTypeMapValsToStringSlice maps a map to a slice with '=': e.g. foo = "bar" -> 'foo=bar'
|
|
func mapTypeMapValsToStringSlice(typeMap map[string]interface{}) []string {
|
|
mapped := make([]string, len(typeMap))
|
|
for k, v := range typeMap {
|
|
mapped = append(mapped, k+"="+v.(string))
|
|
}
|
|
return mapped
|
|
}
|
|
|
|
func fetchDockerContainer(ID string, client *dc.Client) (*dc.APIContainers, error) {
|
|
apiContainers, err := client.ListContainers(dc.ListContainersOptions{All: true})
|
|
|
|
if err != nil {
|
|
return nil, fmt.Errorf("Error fetching container information from Docker: %s\n", err)
|
|
}
|
|
|
|
for _, apiContainer := range apiContainers {
|
|
if apiContainer.ID == ID {
|
|
return &apiContainer, nil
|
|
}
|
|
}
|
|
|
|
return nil, nil
|
|
}
|
|
|
|
func portSetToDockerPorts(ports *schema.Set) (map[dc.Port]struct{}, map[dc.Port][]dc.PortBinding) {
|
|
retExposedPorts := map[dc.Port]struct{}{}
|
|
retPortBindings := map[dc.Port][]dc.PortBinding{}
|
|
|
|
for _, portInt := range ports.List() {
|
|
port := portInt.(map[string]interface{})
|
|
internal := port["internal"].(int)
|
|
protocol := port["protocol"].(string)
|
|
|
|
exposedPort := dc.Port(strconv.Itoa(internal) + "/" + protocol)
|
|
retExposedPorts[exposedPort] = struct{}{}
|
|
|
|
external, extOk := port["external"].(int)
|
|
ip, ipOk := port["ip"].(string)
|
|
|
|
if extOk {
|
|
portBinding := dc.PortBinding{
|
|
HostPort: strconv.Itoa(external),
|
|
}
|
|
if ipOk {
|
|
portBinding.HostIP = ip
|
|
}
|
|
retPortBindings[exposedPort] = append(retPortBindings[exposedPort], portBinding)
|
|
}
|
|
}
|
|
|
|
return retExposedPorts, retPortBindings
|
|
}
|
|
|
|
func ulimitsToDockerUlimits(extraUlimits *schema.Set) []dc.ULimit {
|
|
retExtraUlimits := []dc.ULimit{}
|
|
|
|
for _, ulimitInt := range extraUlimits.List() {
|
|
ulimits := ulimitInt.(map[string]interface{})
|
|
u := dc.ULimit{
|
|
Name: ulimits["name"].(string),
|
|
Soft: int64(ulimits["soft"].(int)),
|
|
Hard: int64(ulimits["hard"].(int)),
|
|
}
|
|
retExtraUlimits = append(retExtraUlimits, u)
|
|
}
|
|
|
|
return retExtraUlimits
|
|
}
|
|
func extraHostsSetToDockerExtraHosts(extraHosts *schema.Set) []string {
|
|
retExtraHosts := []string{}
|
|
|
|
for _, hostInt := range extraHosts.List() {
|
|
host := hostInt.(map[string]interface{})
|
|
ip := host["ip"].(string)
|
|
hostname := host["host"].(string)
|
|
retExtraHosts = append(retExtraHosts, hostname+":"+ip)
|
|
}
|
|
|
|
return retExtraHosts
|
|
}
|
|
|
|
func volumeSetToDockerVolumes(volumes *schema.Set) (map[string]struct{}, []string, []string, error) {
|
|
retVolumeMap := map[string]struct{}{}
|
|
retHostConfigBinds := []string{}
|
|
retVolumeFromContainers := []string{}
|
|
|
|
for _, volumeInt := range volumes.List() {
|
|
volume := volumeInt.(map[string]interface{})
|
|
fromContainer := volume["from_container"].(string)
|
|
containerPath := volume["container_path"].(string)
|
|
volumeName := volume["volume_name"].(string)
|
|
if len(volumeName) == 0 {
|
|
volumeName = volume["host_path"].(string)
|
|
}
|
|
readOnly := volume["read_only"].(bool)
|
|
|
|
switch {
|
|
case len(fromContainer) == 0 && len(containerPath) == 0:
|
|
return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, errors.New("Volume entry without container path or source container")
|
|
case len(fromContainer) != 0 && len(containerPath) != 0:
|
|
return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, errors.New("Both a container and a path specified in a volume entry")
|
|
case len(fromContainer) != 0:
|
|
retVolumeFromContainers = append(retVolumeFromContainers, fromContainer)
|
|
case len(volumeName) != 0:
|
|
readWrite := "rw"
|
|
if readOnly {
|
|
readWrite = "ro"
|
|
}
|
|
retVolumeMap[containerPath] = struct{}{}
|
|
retHostConfigBinds = append(retHostConfigBinds, volumeName+":"+containerPath+":"+readWrite)
|
|
default:
|
|
retVolumeMap[containerPath] = struct{}{}
|
|
}
|
|
}
|
|
|
|
return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, nil
|
|
}
|
|
|
|
func deviceSetToDockerDevices(devices *schema.Set) []dc.Device {
|
|
retDevices := []dc.Device{}
|
|
for _, deviceInt := range devices.List() {
|
|
deviceMap := deviceInt.(map[string]interface{})
|
|
hostPath := deviceMap["host_path"].(string)
|
|
containerPath := deviceMap["container_path"].(string)
|
|
permissions := deviceMap["permissions"].(string)
|
|
|
|
switch {
|
|
case len(containerPath) == 0:
|
|
containerPath = hostPath
|
|
fallthrough
|
|
case len(permissions) == 0:
|
|
permissions = "rwm"
|
|
}
|
|
|
|
device := dc.Device{
|
|
PathOnHost: hostPath,
|
|
PathInContainer: containerPath,
|
|
CgroupPermissions: permissions,
|
|
}
|
|
retDevices = append(retDevices, device)
|
|
}
|
|
return retDevices
|
|
}
|