Feat/swarm 4 new resources (#40)

Adds docker swarm features to the provider for the Docker Engine 17.09.1 and API Version 1.32. 

The spec is close to the API. By default, the swarm services are fire and forget. A converging config implements the features of the docker cli to ensure a service and all its replicas are up and running. Furthermore, service can have configs, secrets, networks, mounts and be added to a network.
This commit is contained in:
Manuel Vogel 2018-05-16 18:00:04 +02:00 committed by GitHub
parent d7f124ca4e
commit dc824c1030
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
24 changed files with 7536 additions and 20 deletions

View file

@ -10,9 +10,8 @@ before_install:
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
- sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
- sudo apt-get update
- sudo apt-get -y install docker-ce=18.03.0~ce-0~ubuntu
- sudo apt-get -y install docker-ce=17.09.1~ce-0~ubuntu # latest stable without the bug: https://github.com/moby/moby/issues/36661
- docker version
- export TRAVIS="true"
install:
# This script is used by the Travis build to install a cookie for

View file

@ -11,7 +11,7 @@ Requirements
------------
- [Terraform](https://www.terraform.io/downloads.html) 0.10.x
- [Go](https://golang.org/doc/install) 1.8 (to build the provider plugin)
- [Go](https://golang.org/doc/install) 1.9.1 (to build the provider plugin)
Building The Provider
---------------------
@ -56,12 +56,10 @@ $ make test
In order to run the full suite of Acceptance tests, run `make testacc`.
*Note:* Acceptance tests create real resources, and often cost money to run.
*Note:* Acceptance tests create a local registry which will be deleted afterwards.
```sh
$ make testacc
# e.g. run a single acceptance test: e.g. 'TestAccDockerRegistryImage_private' in 'data_source_docker_registry_image_test.go'
go test -v -timeout 30s github.com/terraform-providers/terraform-provider-docker/docker -run ^TestAccDockerRegistryImage_private$
```
In order to extend the provider and test it with `terraform`, build the provider as mentioned above with

View file

@ -11,6 +11,7 @@ import (
"github.com/hashicorp/terraform/terraform"
)
// Provider creates the Docker provider
func Provider() terraform.ResourceProvider {
return &schema.Provider{
Schema: map[string]*schema.Schema{
@ -69,6 +70,7 @@ func Provider() terraform.ResourceProvider {
"password": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Sensitive: true,
ConflictsWith: []string{"registry_auth.config_file"},
DefaultFunc: schema.EnvDefaultFunc("DOCKER_REGISTRY_PASS", ""),
Description: "Password for the registry",
@ -91,6 +93,9 @@ func Provider() terraform.ResourceProvider {
"docker_image": resourceDockerImage(),
"docker_network": resourceDockerNetwork(),
"docker_volume": resourceDockerVolume(),
"docker_config": resourceDockerConfig(),
"docker_secret": resourceDockerSecret(),
"docker_service": resourceDockerService(),
},
DataSourcesMap: map[string]*schema.Resource{

View file

@ -0,0 +1,87 @@
package docker
import (
"encoding/base64"
"log"
"github.com/docker/docker/api/types/swarm"
dc "github.com/fsouza/go-dockerclient"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceDockerConfig() *schema.Resource {
return &schema.Resource{
Create: resourceDockerConfigCreate,
Read: resourceDockerConfigRead,
Delete: resourceDockerConfigDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Description: "User-defined name of the config",
Required: true,
ForceNew: true,
},
"data": &schema.Schema{
Type: schema.TypeString,
Description: "Base64-url-safe-encoded config data",
Required: true,
Sensitive: true,
ForceNew: true,
ValidateFunc: validateStringIsBase64Encoded(),
},
},
}
}
func resourceDockerConfigCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
data, _ := base64.StdEncoding.DecodeString(d.Get("data").(string))
createConfigOpts := dc.CreateConfigOptions{
ConfigSpec: swarm.ConfigSpec{
Annotations: swarm.Annotations{
Name: d.Get("name").(string),
},
Data: data,
},
}
config, err := client.CreateConfig(createConfigOpts)
if err != nil {
return err
}
d.SetId(config.ID)
return resourceDockerConfigRead(d, meta)
}
func resourceDockerConfigRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
config, err := client.InspectConfig(d.Id())
if err != nil {
if _, ok := err.(*dc.NoSuchConfig); ok {
log.Printf("[WARN] Config (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
return err
}
d.SetId(config.ID)
return nil
}
func resourceDockerConfigDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
err := client.RemoveConfig(dc.RemoveConfigOptions{
ID: d.Id(),
})
if err != nil {
return err
}
d.SetId("")
return nil
}

View file

@ -0,0 +1,93 @@
package docker
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDockerConfig_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckDockerConfigDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: `
resource "docker_config" "foo" {
name = "foo-config"
data = "Ymxhc2RzYmxhYmxhMTI0ZHNkd2VzZA=="
}
`,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("docker_config.foo", "name", "foo-config"),
resource.TestCheckResourceAttr("docker_config.foo", "data", "Ymxhc2RzYmxhYmxhMTI0ZHNkd2VzZA=="),
),
},
},
})
}
func TestAccDockerConfig_basicUpdatable(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckDockerConfigDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: `
resource "docker_config" "foo" {
name = "tftest-myconfig-${replace(timestamp(),":", ".")}"
data = "Ymxhc2RzYmxhYmxhMTI0ZHNkd2VzZA=="
lifecycle {
ignore_changes = ["name"]
create_before_destroy = true
}
}
`,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("docker_config.foo", "data", "Ymxhc2RzYmxhYmxhMTI0ZHNkd2VzZA=="),
),
},
resource.TestStep{
Config: `
resource "docker_config" "foo" {
name = "tftest-myconfig2-${replace(timestamp(),":", ".")}"
data = "U3VuIDI1IE1hciAyMDE4IDE0OjQ2OjE5IENFU1QK"
lifecycle {
ignore_changes = ["name"]
create_before_destroy = true
}
}
`,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("docker_config.foo", "data", "U3VuIDI1IE1hciAyMDE4IDE0OjQ2OjE5IENFU1QK"),
),
},
},
})
}
/////////////
// Helpers
/////////////
func testCheckDockerConfigDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*ProviderConfig).DockerClient
for _, rs := range s.RootModule().Resources {
if rs.Type != "configs" {
continue
}
id := rs.Primary.Attributes["id"]
config, err := client.InspectConfig(id)
if err == nil || config != nil {
return fmt.Errorf("Config with id '%s' still exists", id)
}
return nil
}
return nil
}

View file

@ -280,6 +280,7 @@ func resourceDockerContainerRead(d *schema.ResourceData, meta interface{}) error
var container *dc.Container
// TODO fix this with statefunc
loops := 1 // if it hasn't just been created, don't delay
if !creationTime.IsZero() {
loops = 30 // with 500ms spacing, 15 seconds; ought to be plenty
@ -388,6 +389,15 @@ func mapTypeMapValsToString(typeMap map[string]interface{}) map[string]string {
return mapped
}
// mapTypeMapValsToStringSlice maps a map to a slice with '=': e.g. foo = "bar" -> 'foo=bar'
func mapTypeMapValsToStringSlice(typeMap map[string]interface{}) []string {
mapped := make([]string, len(typeMap))
for k, v := range typeMap {
mapped = append(mapped, k+"="+v.(string))
}
return mapped
}
func fetchDockerContainer(ID string, client *dc.Client) (*dc.APIContainers, error) {
apiContainers, err := client.ListContainers(dc.ListContainersOptions{All: true})

View file

@ -126,9 +126,6 @@ func fetchLocalImages(data *Data, client *dc.Client) error {
}
func pullImage(data *Data, client *dc.Client, authConfig *dc.AuthConfigurations, image string) error {
// TODO: Test local registry handling. It should be working
// based on the code that was ported over
pullOpts := parseImageOptions(image)
// If a registry was specified in the image name, try to find auth for it

View file

@ -0,0 +1,89 @@
package docker
import (
"encoding/base64"
"log"
"github.com/docker/docker/api/types/swarm"
dc "github.com/fsouza/go-dockerclient"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceDockerSecret() *schema.Resource {
return &schema.Resource{
Create: resourceDockerSecretCreate,
Read: resourceDockerSecretRead,
Delete: resourceDockerSecretDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Description: "User-defined name of the secret",
Required: true,
ForceNew: true,
},
"data": &schema.Schema{
Type: schema.TypeString,
Description: "User-defined name of the secret",
Required: true,
Sensitive: true,
ForceNew: true,
ValidateFunc: validateStringIsBase64Encoded(),
},
},
}
}
func resourceDockerSecretCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
data, _ := base64.StdEncoding.DecodeString(d.Get("data").(string))
createSecretOpts := dc.CreateSecretOptions{
SecretSpec: swarm.SecretSpec{
Annotations: swarm.Annotations{
Name: d.Get("name").(string),
},
Data: data,
},
}
secret, err := client.CreateSecret(createSecretOpts)
if err != nil {
return err
}
d.SetId(secret.ID)
return resourceDockerSecretRead(d, meta)
}
func resourceDockerSecretRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
secret, err := client.InspectSecret(d.Id())
if err != nil {
if _, ok := err.(*dc.NoSuchSecret); ok {
log.Printf("[WARN] Secret (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
return err
}
d.SetId(secret.ID)
return nil
}
func resourceDockerSecretDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
err := client.RemoveSecret(dc.RemoveSecretOptions{
ID: d.Id(),
})
if err != nil {
return err
}
d.SetId("")
return nil
}

View file

@ -0,0 +1,93 @@
package docker
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDockerSecret_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckDockerSecretDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: `
resource "docker_secret" "foo" {
name = "foo-secret"
data = "Ymxhc2RzYmxhYmxhMTI0ZHNkd2VzZA=="
}
`,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("docker_secret.foo", "name", "foo-secret"),
resource.TestCheckResourceAttr("docker_secret.foo", "data", "Ymxhc2RzYmxhYmxhMTI0ZHNkd2VzZA=="),
),
},
},
})
}
func TestAccDockerSecret_basicUpdateble(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckDockerSecretDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: `
resource "docker_secret" "foo" {
name = "tftest-mysecret-${replace(timestamp(),":", ".")}"
data = "Ymxhc2RzYmxhYmxhMTI0ZHNkd2VzZA=="
lifecycle {
ignore_changes = ["name"]
create_before_destroy = true
}
}
`,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("docker_secret.foo", "data", "Ymxhc2RzYmxhYmxhMTI0ZHNkd2VzZA=="),
),
},
resource.TestStep{
Config: `
resource "docker_secret" "foo" {
name = "tftest-mysecret2-${replace(timestamp(),":", ".")}"
data = "U3VuIDI1IE1hciAyMDE4IDE0OjUzOjIxIENFU1QK"
lifecycle {
ignore_changes = ["name"]
create_before_destroy = true
}
}
`,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("docker_secret.foo", "data", "U3VuIDI1IE1hciAyMDE4IDE0OjUzOjIxIENFU1QK"),
),
},
},
})
}
/////////////
// Helpers
/////////////
func testCheckDockerSecretDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*ProviderConfig).DockerClient
for _, rs := range s.RootModule().Resources {
if rs.Type != "secrets" {
continue
}
id := rs.Primary.Attributes["id"]
secret, err := client.InspectSecret(id)
if err == nil || secret != nil {
return fmt.Errorf("Secret with id '%s' still exists", id)
}
return nil
}
return nil
}

View file

@ -0,0 +1,894 @@
package docker
import (
"github.com/hashicorp/terraform/helper/schema"
)
// resourceDockerService create a docker service
// https://docs.docker.com/engine/api/v1.32/#operation/ServiceCreate
func resourceDockerService() *schema.Resource {
return &schema.Resource{
Create: resourceDockerServiceCreate,
Read: resourceDockerServiceRead,
Update: resourceDockerServiceUpdate,
Delete: resourceDockerServiceDelete,
Exists: resourceDockerServiceExists,
Schema: map[string]*schema.Schema{
"auth": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"server_address": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"username": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
DefaultFunc: schema.EnvDefaultFunc("DOCKER_REGISTRY_USER", ""),
},
"password": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
DefaultFunc: schema.EnvDefaultFunc("DOCKER_REGISTRY_PASS", ""),
Sensitive: true,
},
},
},
},
"name": &schema.Schema{
Type: schema.TypeString,
Description: "Name of the service",
Required: true,
ForceNew: true,
},
"labels": &schema.Schema{
Type: schema.TypeMap,
Description: "User-defined key/value metadata",
Optional: true,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"task_spec": &schema.Schema{
Type: schema.TypeList,
Description: "User modifiable task configuration",
MaxItems: 1,
Required: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"container_spec": &schema.Schema{
Type: schema.TypeList,
Description: "The spec for each container",
Required: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"image": &schema.Schema{
Type: schema.TypeString,
Description: "The image name to use for the containers of the service",
Required: true,
},
"labels": &schema.Schema{
Type: schema.TypeMap,
Description: "User-defined key/value metadata",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"command": &schema.Schema{
Type: schema.TypeList,
Description: "The command to be run in the image",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"args": &schema.Schema{
Type: schema.TypeList,
Description: "Arguments to the command",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"hostname": &schema.Schema{
Type: schema.TypeString,
Description: "The hostname to use for the container, as a valid RFC 1123 hostname",
Optional: true,
},
"env": &schema.Schema{
Type: schema.TypeMap,
Description: "A list of environment variables in the form VAR=\"value\"",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"dir": &schema.Schema{
Type: schema.TypeString,
Description: "The working directory for commands to run in",
Optional: true,
},
"user": &schema.Schema{
Type: schema.TypeString,
Description: "The user inside the container",
Optional: true,
},
"groups": &schema.Schema{
Type: schema.TypeList,
Description: "A list of additional groups that the container process will run as",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"privileges": &schema.Schema{
Type: schema.TypeList,
Description: "Security options for the container",
MaxItems: 1,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"credential_spec": &schema.Schema{
Type: schema.TypeList,
Description: "CredentialSpec for managed service account (Windows only)",
MaxItems: 1,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"file": &schema.Schema{
Type: schema.TypeString,
Description: "Load credential spec from this file",
Optional: true,
},
"registry": &schema.Schema{
Type: schema.TypeString,
Description: "Load credential spec from this value in the Windows registry",
Optional: true,
},
},
},
},
"se_linux_context": &schema.Schema{
Type: schema.TypeList,
Description: "SELinux labels of the container",
MaxItems: 1,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disable": &schema.Schema{
Type: schema.TypeBool,
Description: "Disable SELinux",
Optional: true,
},
"user": &schema.Schema{
Type: schema.TypeString,
Description: "SELinux user label",
Optional: true,
},
"role": &schema.Schema{
Type: schema.TypeString,
Description: "SELinux role label",
Optional: true,
},
"type": &schema.Schema{
Type: schema.TypeString,
Description: "SELinux type label",
Optional: true,
},
"level": &schema.Schema{
Type: schema.TypeString,
Description: "SELinux level label",
Optional: true,
},
},
},
},
},
},
},
"read_only": &schema.Schema{
Type: schema.TypeBool,
Description: "Mount the container's root filesystem as read only",
Optional: true,
},
"mounts": &schema.Schema{
Type: schema.TypeSet,
Description: "Specification for mounts to be added to containers created as part of the service",
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"target": &schema.Schema{
Type: schema.TypeString,
Description: "Container path",
Required: true,
},
"source": &schema.Schema{
Type: schema.TypeString,
Description: "Mount source (e.g. a volume name, a host path)",
Required: true,
},
"type": &schema.Schema{
Type: schema.TypeString,
Description: "The mount type",
Required: true,
ValidateFunc: validateStringMatchesPattern(`^(bind|volume|tmpfs)$`),
},
"read_only": &schema.Schema{
Type: schema.TypeBool,
Description: "Whether the mount should be read-only",
Optional: true,
},
"bind_options": &schema.Schema{
Type: schema.TypeList,
Description: "Optional configuration for the bind type",
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"propagation": &schema.Schema{
Type: schema.TypeString,
Description: "A propagation mode with the value",
Optional: true,
ValidateFunc: validateStringMatchesPattern(`^(private|rprivate|shared|rshared|slave|rslave)$`),
},
},
},
},
"volume_options": &schema.Schema{
Type: schema.TypeList,
Description: "Optional configuration for the volume type",
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"no_copy": &schema.Schema{
Type: schema.TypeBool,
Description: "Populate volume with data from the target",
Optional: true,
},
"labels": &schema.Schema{
Type: schema.TypeMap,
Description: "User-defined key/value metadata",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"driver_name": &schema.Schema{
Type: schema.TypeString,
Description: "Name of the driver to use to create the volume.",
Optional: true,
},
"driver_options": &schema.Schema{
Type: schema.TypeMap,
Description: "key/value map of driver specific options",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
},
"tmpfs_options": &schema.Schema{
Type: schema.TypeList,
Description: "Optional configuration for the tmpfs type",
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"size_bytes": &schema.Schema{
Type: schema.TypeInt,
Description: "The size for the tmpfs mount in bytes",
Optional: true,
},
"mode": &schema.Schema{
Type: schema.TypeInt,
Description: "The permission mode for the tmpfs mount in an integer",
Optional: true,
},
},
},
},
},
},
},
"stop_signal": &schema.Schema{
Type: schema.TypeString,
Description: "Signal to stop the container",
Optional: true,
},
"stop_grace_period": &schema.Schema{
Type: schema.TypeString,
Description: "Amount of time to wait for the container to terminate before forcefully removing it (ms|s|m|h)",
Optional: true,
Computed: true,
ValidateFunc: validateDurationGeq0(),
},
"healthcheck": &schema.Schema{
Type: schema.TypeList,
Description: "A test to perform to check that the container is healthy",
MaxItems: 1,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"test": &schema.Schema{
Type: schema.TypeList,
Description: "The test to perform as list",
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"interval": &schema.Schema{
Type: schema.TypeString,
Description: "Time between running the check (ms|s|m|h)",
Optional: true,
Default: "0s",
ValidateFunc: validateDurationGeq0(),
},
"timeout": &schema.Schema{
Type: schema.TypeString,
Description: "Maximum time to allow one check to run (ms|s|m|h)",
Optional: true,
Default: "0s",
ValidateFunc: validateDurationGeq0(),
},
"start_period": &schema.Schema{
Type: schema.TypeString,
Description: "Start period for the container to initialize before counting retries towards unstable (ms|s|m|h)",
Optional: true,
Default: "0s",
ValidateFunc: validateDurationGeq0(),
},
"retries": &schema.Schema{
Type: schema.TypeInt,
Description: "Consecutive failures needed to report unhealthy",
Optional: true,
Default: 0,
ValidateFunc: validateIntegerGeqThan(0),
},
},
},
},
"hosts": &schema.Schema{
Type: schema.TypeSet,
Description: "A list of hostname/IP mappings to add to the container's hosts file.",
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"ip": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"host": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
},
},
},
"dns_config": &schema.Schema{
Type: schema.TypeList,
Description: "Specification for DNS related configurations in resolver configuration file (resolv.conf)",
MaxItems: 1,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"nameservers": &schema.Schema{
Type: schema.TypeList,
Description: "The IP addresses of the name servers",
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"search": &schema.Schema{
Type: schema.TypeList,
Description: "A search list for host-name lookup",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"options": &schema.Schema{
Type: schema.TypeList,
Description: "A list of internal resolver variables to be modified (e.g., debug, ndots:3, etc.)",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
},
"secrets": &schema.Schema{
Type: schema.TypeSet,
Description: "References to zero or more secrets that will be exposed to the service",
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"secret_id": &schema.Schema{
Type: schema.TypeString,
Description: "ID of the specific secret that we're referencing",
Required: true,
},
"secret_name": &schema.Schema{
Type: schema.TypeString,
Description: "Name of the secret that this references, but this is just provided for lookup/display purposes. The config in the reference will be identified by its ID",
Optional: true,
},
"file_name": &schema.Schema{
Type: schema.TypeString,
Description: "Represents the final filename in the filesystem",
Required: true,
},
},
},
},
"configs": &schema.Schema{
Type: schema.TypeSet,
Description: "References to zero or more configs that will be exposed to the service",
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"config_id": &schema.Schema{
Type: schema.TypeString,
Description: "ID of the specific config that we're referencing",
Required: true,
},
"config_name": &schema.Schema{
Type: schema.TypeString,
Description: "Name of the config that this references, but this is just provided for lookup/display purposes. The config in the reference will be identified by its ID",
Optional: true,
},
"file_name": &schema.Schema{
Type: schema.TypeString,
Description: "Represents the final filename in the filesystem",
Required: true,
},
},
},
},
},
},
},
"resources": &schema.Schema{
Type: schema.TypeList,
Description: "Resource requirements which apply to each individual container created as part of the service",
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"limits": &schema.Schema{
Type: schema.TypeList,
Description: "Describes the resources which can be advertised by a node and requested by a task",
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"nano_cpus": &schema.Schema{
Type: schema.TypeInt,
Description: "CPU shares in units of 1/1e9 (or 10^-9) of the CPU. Should be at least 1000000",
Optional: true,
},
"memory_bytes": &schema.Schema{
Type: schema.TypeInt,
Description: "The amounf of memory in bytes the container allocates",
Optional: true,
},
"generic_resources": &schema.Schema{
Type: schema.TypeList,
Description: "User-defined resources can be either Integer resources (e.g, SSD=3) or String resources (e.g, GPU=UUID1)",
MaxItems: 1,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"named_resources_spec": &schema.Schema{
Type: schema.TypeSet,
Description: "The String resources",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"discrete_resources_spec": &schema.Schema{
Type: schema.TypeSet,
Description: "The Integer resources",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
},
},
},
},
},
},
"reservation": &schema.Schema{
Type: schema.TypeList,
Description: "An object describing the resources which can be advertised by a node and requested by a task",
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"nano_cpus": &schema.Schema{
Description: "CPU shares in units of 1/1e9 (or 10^-9) of the CPU. Should be at least 1000000",
Type: schema.TypeInt,
Optional: true,
},
"memory_bytes": &schema.Schema{
Type: schema.TypeInt,
Description: "The amounf of memory in bytes the container allocates",
Optional: true,
},
"generic_resources": &schema.Schema{
Type: schema.TypeList,
Description: "User-defined resources can be either Integer resources (e.g, SSD=3) or String resources (e.g, GPU=UUID1)",
MaxItems: 1,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"named_resources_spec": &schema.Schema{
Type: schema.TypeSet,
Description: "The String resources",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"discrete_resources_spec": &schema.Schema{
Type: schema.TypeSet,
Description: "The Integer resources",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
},
},
},
},
},
},
},
"restart_policy": &schema.Schema{
Type: schema.TypeMap,
Description: "Specification for the restart policy which applies to containers created as part of this service.",
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"condition": &schema.Schema{
Type: schema.TypeString,
Description: "Condition for restart",
Optional: true,
ValidateFunc: validateStringMatchesPattern(`^(none|on-failure|any)$`),
},
"delay": &schema.Schema{
Type: schema.TypeString,
Description: "Delay between restart attempts (ms|s|m|h)",
Optional: true,
ValidateFunc: validateDurationGeq0(),
},
"max_attempts": &schema.Schema{
Type: schema.TypeInt,
Description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)",
Optional: true,
ValidateFunc: validateIntegerGeqThan(0),
},
"window": &schema.Schema{
Type: schema.TypeString,
Description: "The time window used to evaluate the restart policy (default value is 0, which is unbounded) (ms|s|m|h)",
Optional: true,
ValidateFunc: validateDurationGeq0(),
},
},
},
},
"placement": &schema.Schema{
Type: schema.TypeList,
Description: "The placement preferences",
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"constraints": &schema.Schema{
Type: schema.TypeSet,
Description: "An array of constraints. e.g.: node.role==manager",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"prefs": &schema.Schema{
Type: schema.TypeSet,
Description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence, e.g.: spread=node.role.manager",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"platforms": &schema.Schema{
Type: schema.TypeSet,
Description: "Platforms stores all the platforms that the service's image can run on",
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"architecture": &schema.Schema{
Type: schema.TypeString,
Description: "The architecture, e.g. amd64",
Required: true,
},
"os": &schema.Schema{
Type: schema.TypeString,
Description: "The operation system, e.g. linux",
Required: true,
},
},
},
},
},
},
},
"force_update": &schema.Schema{
Type: schema.TypeInt,
Description: "A counter that triggers an update even if no relevant parameters have been changed. See https://github.com/docker/swarmkit/blob/master/api/specs.proto#L126",
Optional: true,
Computed: true,
ValidateFunc: validateIntegerGeqThan(0),
},
"runtime": &schema.Schema{
Type: schema.TypeString,
Description: "Runtime is the type of runtime specified for the task executor. See https://github.com/moby/moby/blob/master/api/types/swarm/runtime.go",
Optional: true,
Computed: true,
ValidateFunc: validateStringMatchesPattern("^(container|plugin)$"),
},
"networks": &schema.Schema{
Type: schema.TypeSet,
Description: "Ids of the networks in which the container will be put in.",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"log_driver": &schema.Schema{
Type: schema.TypeList,
Description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified",
MaxItems: 1,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Description: "The logging driver to use: one of none|json-file|syslog|journald|gelf|fluentd|awslogs|splunk|etwlogs|gcplogs",
Required: true,
ValidateFunc: validateStringMatchesPattern("(none|json-file|syslog|journald|gelf|fluentd|awslogs|splunk|etwlogs|gcplogs)"),
},
"options": &schema.Schema{
Type: schema.TypeMap,
Description: "The options for the logging driver",
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
},
},
},
},
"mode": &schema.Schema{
Type: schema.TypeList,
Description: "Scheduling mode for the service",
MaxItems: 1,
Optional: true,
Computed: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"replicated": &schema.Schema{
Type: schema.TypeList,
Description: "The replicated service mode",
MaxItems: 1,
Optional: true,
Computed: true,
ConflictsWith: []string{"mode.0.global"},
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"replicas": &schema.Schema{
Type: schema.TypeInt,
Description: "The amount of replicas of the service",
Optional: true,
Default: 1,
ValidateFunc: validateIntegerGeqThan(1),
},
},
},
},
"global": &schema.Schema{
Type: schema.TypeBool,
Description: "The global service mode",
Optional: true,
Default: false,
ConflictsWith: []string{"mode.0.replicated", "converge_config"},
},
},
},
},
"update_config": &schema.Schema{
Type: schema.TypeList,
Description: "Specification for the update strategy of the service",
MaxItems: 1,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"parallelism": &schema.Schema{
Type: schema.TypeInt,
Description: "Maximum number of tasks to be updated in one iteration",
Optional: true,
Default: 1,
ValidateFunc: validateIntegerGeqThan(0),
},
"delay": &schema.Schema{
Type: schema.TypeString,
Description: "Delay between task updates (ns|us|ms|s|m|h)",
Optional: true,
Default: "0s",
ValidateFunc: validateDurationGeq0(),
},
"failure_action": &schema.Schema{
Type: schema.TypeString,
Description: "Action on update failure: pause | continue | rollback",
Optional: true,
Default: "pause",
ValidateFunc: validateStringMatchesPattern("^(pause|continue|rollback)$"),
},
"monitor": &schema.Schema{
Type: schema.TypeString,
Description: "Duration after each task update to monitor for failure (ns|us|ms|s|m|h)",
Optional: true,
Default: "5s",
ValidateFunc: validateDurationGeq0(),
},
"max_failure_ratio": &schema.Schema{
Type: schema.TypeString,
Description: "Failure rate to tolerate during an update",
Optional: true,
Default: "0.0",
ValidateFunc: validateStringIsFloatRatio(),
},
"order": &schema.Schema{
Type: schema.TypeString,
Description: "Update order: either 'stop-first' or 'start-first'",
Optional: true,
Default: "stop-first",
ValidateFunc: validateStringMatchesPattern("^(stop-first|start-first)$"),
},
},
},
},
"rollback_config": &schema.Schema{
Type: schema.TypeList,
Description: "Specification for the rollback strategy of the service",
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"parallelism": &schema.Schema{
Type: schema.TypeInt,
Description: "Maximum number of tasks to be rollbacked in one iteration",
Optional: true,
Default: 1,
ValidateFunc: validateIntegerGeqThan(0),
},
"delay": &schema.Schema{
Type: schema.TypeString,
Description: "Delay between task rollbacks (ns|us|ms|s|m|h)",
Optional: true,
Default: "0s",
ValidateFunc: validateDurationGeq0(),
},
"failure_action": &schema.Schema{
Type: schema.TypeString,
Description: "Action on rollback failure: pause | continue",
Optional: true,
Default: "pause",
ValidateFunc: validateStringMatchesPattern("(pause|continue)"),
},
"monitor": &schema.Schema{
Type: schema.TypeString,
Description: "Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h)",
Optional: true,
Default: "5s",
ValidateFunc: validateDurationGeq0(),
},
"max_failure_ratio": &schema.Schema{
Type: schema.TypeString,
Description: "Failure rate to tolerate during a rollback",
Optional: true,
Default: "0.0",
ValidateFunc: validateStringIsFloatRatio(),
},
"order": &schema.Schema{
Type: schema.TypeString,
Description: "Rollback order: either 'stop-first' or 'start-first'",
Optional: true,
Default: "stop-first",
ValidateFunc: validateStringMatchesPattern("(stop-first|start-first)"),
},
},
},
},
"endpoint_spec": &schema.Schema{
Type: schema.TypeList,
Description: "Properties that can be configured to access and load balance a service",
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"mode": &schema.Schema{
Type: schema.TypeString,
Description: "The mode of resolution to use for internal load balancing between tasks",
Optional: true,
Default: "vip",
ValidateFunc: validateStringMatchesPattern(`^(vip|dnsrr)$`),
},
"ports": &schema.Schema{
Type: schema.TypeSet,
Description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if 'vip' resolution mode is used.",
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Description: "A random name for the port",
Optional: true,
},
"protocol": &schema.Schema{
Type: schema.TypeString,
Description: "Rrepresents the protocol of a port: 'tcp' or 'udp'",
Optional: true,
Default: "tcp",
ValidateFunc: validateStringMatchesPattern(`^(tcp|udp)$`),
},
"target_port": &schema.Schema{
Type: schema.TypeInt,
Description: "The port inside the container",
Required: true,
},
"published_port": &schema.Schema{
Type: schema.TypeInt,
Description: "The port on the swarm hosts. If not set the value of 'target_port' will be used",
Optional: true,
},
"publish_mode": &schema.Schema{
Type: schema.TypeString,
Description: "Represents the mode in which the port is to be published: 'ingress' or 'host'",
Optional: true,
Default: "ingress",
ValidateFunc: validateStringMatchesPattern(`^(host|ingress)$`),
},
},
},
},
},
},
},
"converge_config": &schema.Schema{
Type: schema.TypeList,
Description: "A configuration to ensure that a service converges aka reaches the desired that of all task up and running",
MaxItems: 1,
Optional: true,
ConflictsWith: []string{"mode.0.global"},
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"delay": &schema.Schema{
Type: schema.TypeString,
Description: "The interval to check if the desired state is reached (ms|s). Default: 7s",
Optional: true,
Default: "7s",
ValidateFunc: validateDurationGeq0(),
},
"timeout": &schema.Schema{
Type: schema.TypeString,
Description: "The timeout of the service to reach the desired state (s|m). Default: 3m",
Optional: true,
Default: "3m",
ValidateFunc: validateDurationGeq0(),
},
},
},
},
},
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -3,6 +3,7 @@ package docker
import (
"fmt"
"log"
"strings"
"time"
dc "github.com/fsouza/go-dockerclient"
@ -95,24 +96,30 @@ func resourceDockerVolumeRead(d *schema.ResourceData, meta interface{}) error {
func resourceDockerVolumeDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
// TODO catch error if removal is already in progress + fix with statefunc
if err := client.RemoveVolume(d.Id()); err != nil && err != dc.ErrNoSuchVolume {
if err == dc.ErrVolumeInUse {
loops := 50
loops := 20
sleepTime := 1000 * time.Millisecond
for i := loops; i > 0; i-- {
if err = client.RemoveVolume(d.Id()); err != nil {
log.Printf("[INFO] Volume remove loop: %d of %d due to error: %s", loops-i+1, loops, err)
if err == dc.ErrVolumeInUse {
log.Printf("[INFO] Volume remove loop: %d of %d due to error: %s", loops-i+1, loops, err)
time.Sleep(sleepTime)
continue
}
if err == dc.ErrNoSuchVolume {
break // it's removed
log.Printf("[INFO] Volume successfully removed")
d.SetId("")
return nil
}
if !strings.Contains(err.Error(), "is already in progress") {
// if it's not in use any more (so it's deleted successfully) and another error occurred
return fmt.Errorf("Error deleting volume %s: %s", d.Id(), err)
}
// if it's not in use any more (so it's deleted successfully) and another error occurred
return fmt.Errorf("Error deleting volume %s: %s", d.Id(), err)
}
}
return fmt.Errorf("Error deleting volume %s: %s after %d tries", d.Id(), err, loops)
}
}

View file

@ -0,0 +1,549 @@
package docker
import (
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/swarm"
"github.com/hashicorp/terraform/helper/schema"
)
func flattenTaskSpec(in swarm.TaskSpec) []interface{} {
m := make(map[string]interface{})
if in.ContainerSpec != nil {
m["container_spec"] = flattenContainerSpec(in.ContainerSpec)
}
if in.Resources != nil {
m["resources"] = flattenTaskResources(in.Resources)
}
if in.RestartPolicy != nil {
m["restart_policy"] = flattenTaskRestartPolicy(in.RestartPolicy)
}
if in.Placement != nil {
m["placement"] = flattenTaskPlacement(in.Placement)
}
if in.ForceUpdate >= 0 {
m["force_update"] = in.ForceUpdate
}
if len(in.Runtime) > 0 {
m["runtime"] = in.Runtime
}
if len(in.Networks) > 0 {
m["networks"] = flattenTaskNetworks(in.Networks)
}
if in.LogDriver != nil {
m["log_driver"] = flattenTaskLogDriver(in.LogDriver)
}
return []interface{}{m}
}
func flattenServiceMode(in swarm.ServiceMode) []interface{} {
m := make(map[string]interface{})
if in.Replicated != nil {
m["replicated"] = flattenReplicated(in.Replicated)
}
if in.Global != nil {
m["global"] = true
} else {
m["global"] = false
}
return []interface{}{m}
}
func flattenReplicated(in *swarm.ReplicatedService) []interface{} {
var out = make([]interface{}, 0, 0)
m := make(map[string]interface{})
if in != nil {
if in.Replicas != nil {
replicas := int(*in.Replicas)
m["replicas"] = replicas
}
}
out = append(out, m)
return out
}
func flattenServiceUpdateOrRollbackConfig(in *swarm.UpdateConfig) []interface{} {
var out = make([]interface{}, 0, 0)
if in == nil {
return out
}
m := make(map[string]interface{})
m["parallelism"] = in.Parallelism
m["delay"] = shortDur(in.Delay)
m["failure_action"] = in.FailureAction
m["monitor"] = shortDur(in.Monitor)
m["max_failure_ratio"] = strconv.FormatFloat(float64(in.MaxFailureRatio), 'f', 1, 64)
m["order"] = in.Order
out = append(out, m)
return out
}
func flattenServiceEndpointSpec(in swarm.EndpointSpec) []interface{} {
var out = make([]interface{}, 0, 0)
m := make(map[string]interface{})
if len(in.Mode) > 0 {
m["mode"] = in.Mode
}
if len(in.Ports) > 0 {
m["ports"] = flattenServicePorts(in.Ports)
}
out = append(out, m)
return out
}
///// start TaskSpec
func flattenContainerSpec(in *swarm.ContainerSpec) []interface{} {
var out = make([]interface{}, 0, 0)
m := make(map[string]interface{})
if len(in.Image) > 0 {
m["image"] = in.Image
}
if len(in.Labels) > 0 {
m["labels"] = in.Labels
}
if len(in.Command) > 0 {
m["command"] = in.Command
}
if len(in.Args) > 0 {
m["args"] = in.Args
}
if len(in.Hostname) > 0 {
m["hostname"] = in.Hostname
}
if len(in.Env) > 0 {
m["env"] = mapStringSliceToMap(in.Env)
}
if len(in.User) > 0 {
m["user"] = in.User
}
if len(in.Dir) > 0 {
m["dir"] = in.Dir
}
if len(in.Groups) > 0 {
m["groups"] = in.Groups
}
if in.Privileges != nil {
m["privileges"] = flattenPrivileges(in.Privileges)
}
if in.ReadOnly {
m["read_only"] = in.ReadOnly
}
if len(in.Mounts) > 0 {
m["mounts"] = flattenServiceMounts(in.Mounts)
}
if len(in.StopSignal) > 0 {
m["stop_signal"] = in.StopSignal
}
if in.StopGracePeriod != nil {
m["stop_grace_period"] = shortDur(*in.StopGracePeriod)
}
if in.Healthcheck != nil {
m["healthcheck"] = flattenServiceHealthcheck(in.Healthcheck)
}
if len(in.Hosts) > 0 {
m["hosts"] = flattenServiceHosts(in.Hosts)
}
if in.DNSConfig != nil {
m["dns_config"] = flattenServiceDNSConfig(in.DNSConfig)
}
if len(in.Secrets) > 0 {
m["secrets"] = flattenServiceSecrets(in.Secrets)
}
if len(in.Configs) > 0 {
m["configs"] = flattenServiceConfigs(in.Configs)
}
out = append(out, m)
return out
}
func flattenPrivileges(in *swarm.Privileges) []interface{} {
if in == nil {
return make([]interface{}, 0, 0)
}
var out = make([]interface{}, 1, 1)
m := make(map[string]interface{})
if in.CredentialSpec != nil {
credSpec := make([]interface{}, 1, 1)
internal := make(map[string]interface{})
internal["file"] = in.CredentialSpec.File
internal["registry"] = in.CredentialSpec.Registry
credSpec[0] = internal
m["credential_spec"] = credSpec
}
if in.SELinuxContext != nil {
seLinuxContext := make([]interface{}, 1, 1)
internal := make(map[string]interface{})
internal["disable"] = in.SELinuxContext.Disable
internal["user"] = in.SELinuxContext.User
internal["role"] = in.SELinuxContext.Role
internal["type"] = in.SELinuxContext.Type
internal["level"] = in.SELinuxContext.Level
seLinuxContext[0] = internal
m["se_linux_context"] = seLinuxContext
}
out[0] = m
return out
}
func flattenServiceMounts(in []mount.Mount) *schema.Set {
var out = make([]interface{}, len(in), len(in))
for i, v := range in {
m := make(map[string]interface{})
m["target"] = v.Target
m["source"] = v.Source
m["type"] = string(v.Type)
m["read_only"] = v.ReadOnly
if v.BindOptions != nil {
bindOptions := make([]interface{}, 0, 0)
bindOptionsItem := make(map[string]interface{}, 0)
if len(v.BindOptions.Propagation) > 0 {
bindOptionsItem["propagation"] = string(v.BindOptions.Propagation)
}
bindOptions = append(bindOptions, bindOptionsItem)
m["bind_options"] = bindOptions
}
if v.VolumeOptions != nil {
volumeOptions := make([]interface{}, 0, 0)
volumeOptionsItem := make(map[string]interface{}, 0)
volumeOptionsItem["no_copy"] = v.VolumeOptions.NoCopy
volumeOptionsItem["labels"] = mapStringStringToMapStringInterface(v.VolumeOptions.Labels)
if v.VolumeOptions.DriverConfig != nil {
if len(v.VolumeOptions.DriverConfig.Name) > 0 {
volumeOptionsItem["driver_name"] = v.VolumeOptions.DriverConfig.Name
}
volumeOptionsItem["driver_options"] = mapStringStringToMapStringInterface(v.VolumeOptions.DriverConfig.Options)
}
volumeOptions = append(volumeOptions, volumeOptionsItem)
m["volume_options"] = volumeOptions
}
if v.TmpfsOptions != nil {
tmpfsOptions := make([]interface{}, 0, 0)
tmpfsOptionsItem := make(map[string]interface{}, 0)
tmpfsOptionsItem["size_bytes"] = int(v.TmpfsOptions.SizeBytes)
tmpfsOptionsItem["mode"] = v.TmpfsOptions.Mode.Perm
tmpfsOptions = append(tmpfsOptions, tmpfsOptionsItem)
m["tmpfs_options"] = tmpfsOptions
}
out[i] = m
}
taskSpecResource := resourceDockerService().Schema["task_spec"].Elem.(*schema.Resource)
containerSpecResource := taskSpecResource.Schema["container_spec"].Elem.(*schema.Resource)
mountsResource := containerSpecResource.Schema["mounts"].Elem.(*schema.Resource)
f := schema.HashResource(mountsResource)
return schema.NewSet(f, out)
}
func flattenServiceHealthcheck(in *container.HealthConfig) []interface{} {
if in == nil {
return make([]interface{}, 0, 0)
}
var out = make([]interface{}, 1, 1)
m := make(map[string]interface{})
if len(in.Test) > 0 {
m["test"] = in.Test
}
m["interval"] = shortDur(in.Interval)
m["timeout"] = shortDur(in.Timeout)
m["start_period"] = shortDur(in.StartPeriod)
m["retries"] = in.Retries
out[0] = m
return out
}
func flattenServiceHosts(in []string) *schema.Set {
var out = make([]interface{}, len(in), len(in))
for i, v := range in {
m := make(map[string]interface{})
split := strings.Split(v, ":")
m["host"] = split[0]
m["ip"] = split[1]
out[i] = m
}
taskSpecResource := resourceDockerService().Schema["task_spec"].Elem.(*schema.Resource)
containerSpecResource := taskSpecResource.Schema["container_spec"].Elem.(*schema.Resource)
hostsResource := containerSpecResource.Schema["hosts"].Elem.(*schema.Resource)
f := schema.HashResource(hostsResource)
return schema.NewSet(f, out)
}
func flattenServiceDNSConfig(in *swarm.DNSConfig) []interface{} {
if in == nil {
return make([]interface{}, 0, 0)
}
var out = make([]interface{}, 1, 1)
m := make(map[string]interface{})
if len(in.Nameservers) > 0 {
m["nameservers"] = in.Nameservers
}
if len(in.Search) > 0 {
m["search"] = in.Search
}
if len(in.Options) > 0 {
m["options"] = in.Options
}
out[0] = m
return out
}
func flattenServiceSecrets(in []*swarm.SecretReference) *schema.Set {
var out = make([]interface{}, len(in), len(in))
for i, v := range in {
m := make(map[string]interface{})
m["secret_id"] = v.SecretID
if len(v.SecretName) > 0 {
m["secret_name"] = v.SecretName
}
if v.File != nil {
m["file_name"] = v.File.Name
}
out[i] = m
}
taskSpecResource := resourceDockerService().Schema["task_spec"].Elem.(*schema.Resource)
containerSpecResource := taskSpecResource.Schema["container_spec"].Elem.(*schema.Resource)
secretsResource := containerSpecResource.Schema["secrets"].Elem.(*schema.Resource)
f := schema.HashResource(secretsResource)
return schema.NewSet(f, out)
}
func flattenServiceConfigs(in []*swarm.ConfigReference) *schema.Set {
var out = make([]interface{}, len(in), len(in))
for i, v := range in {
m := make(map[string]interface{})
m["config_id"] = v.ConfigID
if len(v.ConfigName) > 0 {
m["config_name"] = v.ConfigName
}
if v.File != nil {
m["file_name"] = v.File.Name
}
out[i] = m
}
taskSpecResource := resourceDockerService().Schema["task_spec"].Elem.(*schema.Resource)
containerSpecResource := taskSpecResource.Schema["container_spec"].Elem.(*schema.Resource)
configsResource := containerSpecResource.Schema["configs"].Elem.(*schema.Resource)
f := schema.HashResource(configsResource)
return schema.NewSet(f, out)
}
func flattenTaskResources(in *swarm.ResourceRequirements) []interface{} {
var out = make([]interface{}, 0, 0)
if in != nil {
m := make(map[string]interface{})
m["limits"] = flattenResourceLimitsOrReservations(in.Limits)
m["reservation"] = flattenResourceLimitsOrReservations(in.Reservations)
out = append(out, m)
}
return out
}
func flattenResourceLimitsOrReservations(in *swarm.Resources) []interface{} {
var out = make([]interface{}, 0, 0)
if in != nil {
m := make(map[string]interface{})
m["nano_cpus"] = in.NanoCPUs
m["memory_bytes"] = in.MemoryBytes
m["generic_resources"] = flattenResourceGenericResource(in.GenericResources)
out = append(out, m)
}
return out
}
func flattenResourceGenericResource(in []swarm.GenericResource) []interface{} {
var out = make([]interface{}, 0, 0)
if in != nil && len(in) > 0 {
m := make(map[string]interface{})
named := make([]string, 0)
discrete := make([]string, 0)
for _, genericResource := range in {
if genericResource.NamedResourceSpec != nil {
named = append(named, genericResource.NamedResourceSpec.Kind+"="+genericResource.NamedResourceSpec.Value)
}
if genericResource.DiscreteResourceSpec != nil {
discrete = append(discrete, genericResource.DiscreteResourceSpec.Kind+"="+strconv.Itoa(int(genericResource.DiscreteResourceSpec.Value)))
}
}
m["named_resources_spec"] = newStringSet(schema.HashString, named)
m["discrete_resources_spec"] = newStringSet(schema.HashString, discrete)
out = append(out, m)
}
return out
}
func flattenTaskRestartPolicy(in *swarm.RestartPolicy) map[string]interface{} {
m := make(map[string]interface{})
if len(in.Condition) > 0 {
m["condition"] = string(in.Condition)
}
if in.Delay != nil {
m["delay"] = shortDur(*in.Delay)
}
if in.MaxAttempts != nil {
mapped := *in.MaxAttempts
m["max_attempts"] = strconv.Itoa(int(mapped))
}
if in.Window != nil {
m["window"] = shortDur(*in.Window)
}
return m
}
func flattenTaskPlacement(in *swarm.Placement) []interface{} {
if in == nil {
return make([]interface{}, 0, 0)
}
var out = make([]interface{}, 1, 1)
m := make(map[string]interface{})
if len(in.Constraints) > 0 {
m["constraints"] = newStringSet(schema.HashString, in.Constraints)
}
if len(in.Preferences) > 0 {
m["prefs"] = flattenPlacementPrefs(in.Preferences)
}
if len(in.Platforms) > 0 {
m["platforms"] = flattenPlacementPlatforms(in.Platforms)
}
out[0] = m
return out
}
func flattenPlacementPrefs(in []swarm.PlacementPreference) *schema.Set {
if in == nil || len(in) == 0 {
return schema.NewSet(schema.HashString, make([]interface{}, 0, 0))
}
var out = make([]interface{}, len(in), len(in))
for i, v := range in {
out[i] = v.Spread.SpreadDescriptor
}
return schema.NewSet(schema.HashString, out)
}
func flattenPlacementPlatforms(in []swarm.Platform) *schema.Set {
var out = make([]interface{}, len(in), len(in))
for i, v := range in {
m := make(map[string]interface{})
m["architecture"] = v.Architecture
m["os"] = v.OS
out[i] = m
}
taskSpecResource := resourceDockerService().Schema["task_spec"].Elem.(*schema.Resource)
placementResource := taskSpecResource.Schema["placement"].Elem.(*schema.Resource)
f := schema.HashResource(placementResource)
return schema.NewSet(f, out)
}
func flattenTaskNetworks(in []swarm.NetworkAttachmentConfig) *schema.Set {
var out = make([]interface{}, len(in), len(in))
for i, v := range in {
out[i] = v.Target
}
return schema.NewSet(schema.HashString, out)
}
func flattenTaskLogDriver(in *swarm.Driver) []interface{} {
if in == nil {
return make([]interface{}, 0, 0)
}
var out = make([]interface{}, 1, 1)
m := make(map[string]interface{})
m["name"] = in.Name
if len(in.Options) > 0 {
m["options"] = in.Options
}
out[0] = m
return out
}
///// end TaskSpec
///// start EndpointSpec
func flattenServicePorts(in []swarm.PortConfig) *schema.Set {
var out = make([]interface{}, len(in), len(in))
for i, v := range in {
m := make(map[string]interface{})
if len(v.Name) > 0 {
m["name"] = v.Name
}
m["protocol"] = string(v.Protocol)
m["target_port"] = int(v.TargetPort)
if v.PublishedPort > 0 {
m["published_port"] = int(v.PublishedPort)
}
m["publish_mode"] = string(v.PublishMode)
out[i] = m
}
endpointSpecResource := resourceDockerService().Schema["endpoint_spec"].Elem.(*schema.Resource)
portsResource := endpointSpecResource.Schema["ports"].Elem.(*schema.Resource)
f := schema.HashResource(portsResource)
return schema.NewSet(f, out)
}
///// end EndpointSpec
// HELPERS
func shortDur(d time.Duration) string {
s := d.String()
if strings.HasSuffix(s, "m0s") {
s = s[:len(s)-2]
}
if strings.HasSuffix(s, "h0m") {
s = s[:len(s)-2]
}
return s
}
func newStringSet(f schema.SchemaSetFunc, in []string) *schema.Set {
var out = make([]interface{}, len(in), len(in))
for i, v := range in {
out[i] = v
}
return schema.NewSet(f, out)
}
// mapStringSliceToMap maps a slice with '=' delimiter to as map: e.g. 'foo=bar' -> foo = "bar"
func mapStringSliceToMap(in []string) map[string]string {
mapped := make(map[string]string, len(in))
for _, v := range in {
if len(v) > 0 {
splitted := strings.Split(v, "=")
key := splitted[0]
value := splitted[1]
mapped[key] = value
}
}
return mapped
}
// mapStringStringToMapStringInterface maps a string string map to a string interface map
func mapStringStringToMapStringInterface(in map[string]string) map[string]interface{} {
if in == nil || len(in) == 0 {
return make(map[string]interface{}, 0)
}
mapped := make(map[string]interface{}, len(in))
for k, v := range in {
mapped[k] = v
}
return mapped
}

View file

@ -4,6 +4,7 @@ import (
"encoding/base64"
"fmt"
"regexp"
"strconv"
"time"
"github.com/hashicorp/terraform/helper/schema"
@ -46,6 +47,34 @@ func validateFloatRatio() schema.SchemaValidateFunc {
}
}
func validateStringIsFloatRatio() schema.SchemaValidateFunc {
return func(v interface{}, k string) (ws []string, errors []error) {
switch v.(type) {
case string:
stringValue := v.(string)
value, err := strconv.ParseFloat(stringValue, 64)
if err != nil {
errors = append(errors, fmt.Errorf(
"%q is not a float", k))
}
if value < 0.0 || value > 1.0 {
errors = append(errors, fmt.Errorf(
"%q has to be between 0.0 and 1.0", k))
}
case int:
value := float64(v.(int))
if value < 0.0 || value > 1.0 {
errors = append(errors, fmt.Errorf(
"%q has to be between 0.0 and 1.0", k))
}
default:
errors = append(errors, fmt.Errorf(
"%q is not a string", k))
}
return
}
}
func validateDurationGeq0() schema.SchemaValidateFunc {
return func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)

View file

@ -50,6 +50,42 @@ func TestValidateFloatRatio(t *testing.T) {
t.Fatalf("%v should be an invalid float greater than 1.0", v)
}
}
func TestValidateStringIsFloatRatio(t *testing.T) {
v := "0.9"
if _, error := validateStringIsFloatRatio()(v, "name"); error != nil {
t.Fatalf("%v should be a float between 0.0 and 1.0", v)
}
v = "-4.5"
if _, error := validateStringIsFloatRatio()(v, "name"); error == nil {
t.Fatalf("%v should be an invalid float smaller than 0.0", v)
}
v = "1.1"
if _, error := validateStringIsFloatRatio()(v, "name"); error == nil {
t.Fatalf("%v should be an invalid float greater than 1.0", v)
}
v = "false"
if _, error := validateStringIsFloatRatio()(v, "name"); error == nil {
t.Fatalf("%v should be an invalid float because it is a bool in a string", v)
}
w := false
if _, error := validateStringIsFloatRatio()(w, "name"); error == nil {
t.Fatalf("%v should be an invalid float because it is a bool", v)
}
i := 0
if _, error := validateStringIsFloatRatio()(i, "name"); error != nil {
t.Fatalf("%v should be a valid float because int can be casted", v)
}
i = 1
if _, error := validateStringIsFloatRatio()(i, "name"); error != nil {
t.Fatalf("%v should be a valid float because int can be casted", v)
}
i = 4
if _, error := validateStringIsFloatRatio()(i, "name"); error == nil {
t.Fatalf("%v should be an invalid float because it is an int out of range", v)
}
}
func TestValidateDurationGeq0(t *testing.T) {
v := "1ms"
if _, error := validateDurationGeq0()(v, "name"); error != nil {

View file

@ -17,13 +17,12 @@ setup() {
}
run() {
# Run the acc test suite
TF_ACC=1 go test ./docker -v -timeout 120m
# for a single test
# TF_LOG=INFO TF_ACC=1 go test -v github.com/terraform-providers/terraform-provider-docker/docker -run ^TestAccDockerContainer_basic$ -timeout 360s
# for a single test comment the previous line and uncomment the next line
#TF_LOG=INFO TF_ACC=1 go test -v github.com/terraform-providers/terraform-provider-docker/docker -run ^TestAccDockerService_full$ -timeout 360s
# keep the return for the scripts to fail and clean properly
# keep the return value for the scripts to fail and clean properly
return $?
}
@ -43,7 +42,7 @@ cleanup() {
for r in $(docker $resource ls -f 'name=tftest-' -q); do docker $resource rm "$r"; done
echo "### removed $resource ###"
done
for i in $(docker images -aq 127.0.0.1:5000/tftest-service); do docker rmi -f "$i"; done
for i in $(docker images -aq 127.0.0.1:15000/tftest-service); do docker rmi -f "$i"; done
echo "### removed service images ###"
}

View file

@ -8,4 +8,4 @@ var handleRequest = function (request, response) {
response.end(configs.prefix + ' - Hello World!');
};
var www = http.createServer(handleRequest);
www.listen(8085); // changed here on purpose
www.listen(8085); // changed here on purpose

27
vendor/golang.org/x/sys/LICENSE~HEAD generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -39,6 +39,22 @@
</li>
</ul>
</li>
<li<%= sidebar_current("docs-docker-resource-swarm") %>>
<a href="#">Swarm Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-docker-resource-config") %>>
<a href="/docs/providers/docker/r/config.html">docker_config</a>
</li>
<li<%= sidebar_current("docs-docker-resource-secret") %>>
<a href="/docs/providers/docker/r/secret.html">docker_secret</a>
</li>
<li<%= sidebar_current("docs-docker-resource-service") %>>
<a href="/docs/providers/docker/r/service.html">docker_service</a>
</li>
</ul>
</li>
</ul>
</div>
<% end %>

View file

@ -0,0 +1,99 @@
---
layout: "docker"
page_title: "Docker: docker_config"
sidebar_current: "docs-docker-resource-config"
description: |-
Manages the configs of a Docker service in a swarm.
---
# docker\_config
Manages the configuration of a Docker service in a swarm.
## Example Usage
## Basic
```hcl
# Creates a config
resource "docker_config" "foo_config" {
name = "foo_config"
data = "ewogICJzZXJIfQo="
}
```
### Advanced
#### Dynamically set config with a template
In this example you can use the `${var.foo_port}` variable to dynamically
set the `${port}` variable in the `foo.configs.json.tpl` template and create
the data of the `foo_config` with the help of the `base64encode` interpolation
function.
File `foo.config.json.tpl`
```json
{
"server": {
"public_port": ${port}
}
}
```
File `main.tf`
```hcl
# Creates the template in renders the variable
data "template_file" "foo_config_tpl" {
template = "${file("foo.config.json.tpl")}"
vars {
port = "${var.foo_port}"
}
}
# Creates the config
resource "docker_config" "foo_config" {
name = "foo_config"
data = "${base64encode(data.template_file.foo_config_tpl.rendered)}"
}
```
#### Update config with no downtime
To update a `config`, Terraform will destroy the existing resource and create a replacement. To effectively use a `docker_config` resource with a `docker_service` resource, it's recommended to specify `create_before_destroy` in a `lifecycle` block. Provide a uniqie `name` attribute, for example
with one of the interpolation functions `uuid` or `timestamp` as shown
in the example below. The reason is [moby-35803](https://github.com/moby/moby/issues/35803).
```hcl
resource "docker_config" "service_config" {
name = "${var.service_name}-config-${replace(timestamp(),":", ".")}"
data = "${base64encode(data.template_file.service_config_tpl.rendered)}"
lifecycle {
ignore_changes = ["name"]
create_before_destroy = true
}
}
resource "docker_service" "service" {
# ...
configs = [
{
config_id = "${docker_config.service_config.id}"
config_name = "${docker_config.service_config.name}"
file_name = "/root/configs/configs.json"
},
]
}
```
## Argument Reference
The following arguments are supported:
* `name` - (Required, string) The name of the Docker config.
* `data` - (Required, string) The base64 encoded data of the config.
## Attributes Reference
The following attributes are exported in addition to the above configuration:
* `id` (string)

View file

@ -0,0 +1,64 @@
---
layout: "docker"
page_title: "Docker: docker_secret"
sidebar_current: "docs-docker-resource-secret"
description: |-
Manages the secrets of a Docker service in a swarm.
---
# docker\_secret
Manages the secrets of a Docker service in a swarm.
## Example Usage
### Basic
```hcl
# Creates a secret
resource "docker_secret" "foo_secret" {
name = "foo_secret"
data = "ewogICJzZXJsaasIfQo="
}
```
#### Update secret with no downtime
To update a `secret`, Terraform will destroy the existing resource and create a replacement. To effectively use a `docker_secret` resource with a `docker_service` resource, it's recommended to specify `create_before_destroy` in a `lifecycle` block. Provide a unique `name` attribute, for example
with one of the interpolation functions `uuid` or `timestamp` as shown
in the example below. The reason is [moby-35803](https://github.com/moby/moby/issues/35803).
```hcl
resource "docker_secret" "service_secret" {
name = "${var.service_name}-secret-${replace(timestamp(),":", ".")}"
data = "${base64encode(data.template_file.service_secret_tpl.rendered)}"
lifecycle {
ignore_changes = ["name"]
create_before_destroy = true
}
}
resource "docker_service" "service" {
# ...
secrets = [
{
secret_id = "${docker_secret.service_secret.id}"
secret_name = "${docker_secret.service_secret.name}"
file_name = "/root/configs/configs.json"
},
]
}
```
## Argument Reference
The following arguments are supported:
* `name` - (Required, string) The name of the Docker secret.
* `data` - (Required, string) The base64 encoded data of the secret.
## Attributes Reference
The following attributes are exported in addition to the above configuration:
* `id` (string)

View file

@ -0,0 +1,558 @@
---
layout: "docker"
page_title: "Docker: docker_service"
sidebar_current: "docs-docker-resource-service"
description: |-
Manages the lifecycle of a Docker service.
---
# docker\_service
This resource manages the lifecycle of a Docker service. By default, the creation, update and delete of services are detached.
With the [Converge Config](#convergeconfig) the behavior of the `docker cli` is imitated to guarantee that
for example, all tasks of a service are running or successfully updated or to inform `terraform` that a service could not
be updated and was successfully rolled back.
## Example Usage
The following examples show the basic and advanced usage of the
Docker Service resource assuming the host machine is already part of a Swarm.
### Basic
The following configuration starts a Docker Service with
- the given image,
- 1 replica
- exposes the port `8080` in `vip` mode to the host machine
- moreover, uses the `container` runtime
```hcl
resource "docker_service" "foo" {
name = "foo-service"
task_spec {
container_spec {
image = "repo.mycompany.com:8080/foo-service:v1"
}
}
endpoint_spec {
ports {
target_port = "8080"
}
}
}
```
The following command is the equivalent:
```bash
$ docker service create -d -p 8080 --name foo-service repo.mycompany.com:8080/foo-service:v1
```
### Advanced
The following configuration shows the full capabilities of a Docker Service. Currently, the [Docker API 1.32](https://docs.docker.com/engine/api/v1.32) is implemented.
```hcl
resource "docker_volume" "test_volume" {
name = "tftest-volume"
}
resource "docker_config" "service_config" {
name = "tftest-full-myconfig"
data = "ewogICJwcmVmaXgiOiAiMTIzIgp9"
}
resource "docker_secret" "service_secret" {
name = "tftest-mysecret"
data = "ewogICJrZXkiOiAiUVdFUlRZIgp9"
}
resource "docker_network" "test_network" {
name = "tftest-network"
driver = "overlay"
}
resource "docker_service" "foo" {
name = "tftest-service-basic"
task_spec {
container_spec {
image = "repo.mycompany.com:8080/foo-service:v1"
labels {
foo = "bar"
}
command = ["ls"]
args = ["-las"]
hostname = "my-fancy-service"
env {
MYFOO = "BAR"
}
dir = "/root"
user = "root"
groups = ["docker", "foogroup"]
privileges {
se_linux_context {
disable = true
user = "user-label"
role = "role-label"
type = "type-label"
level = "level-label"
}
}
read_only = true
mounts = [
{
target = "/mount/test"
source = "${docker_volume.test_volume.name}"
type = "volume"
read_only = true
bind_options {
propagation = "private"
}
},
]
stop_signal = "SIGTERM"
stop_grace_period = "10s"
healthcheck {
test = ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval = "5s"
timeout = "2s"
retries = 4
}
hosts {
host = "testhost"
ip = "10.0.1.0"
}
dns_config {
nameservers = ["8.8.8.8"]
search = ["example.org"]
options = ["timeout:3"]
}
secrets = [
{
secret_id = "${docker_secret.service_secret.id}"
secret_name = "${docker_secret.service_secret.name}"
file_name = "/secrets.json"
},
]
configs = [
{
config_id = "${docker_config.service_config.id}"
config_name = "${docker_config.service_config.name}"
file_name = "/configs.json"
},
]
}
resources {
limits {
nano_cpus = 1000000
memory_bytes = 536870912
generic_resources {
named_resources_spec = [
"GPU=UUID1"
]
discrete_resources_spec = [
"SSD=3"
]
}
}
reservation {
nano_cpus = 1000000
memory_bytes = 536870912
generic_resources {
named_resources_spec = [
"GPU=UUID1"
}
discrete_resources_spec = [
"SSD=3"
]
}
}
}
restart_policy {
condition = "on-failure"
delay = "3s"
max_attempts = 4
window = "10s"
}
placement {
constraints = [
"node.role==manager",
]
prefs = [
"spread=node.role.manager",
]
}
force_update = 0
runtime = "container"
networks = ["${docker_network.test_network.id}"]
log_driver {
name = "json-file"
options {
max-size = "10m"
max-file = "3"
}
}
}
mode {
replicated {
replicas = 2
}
}
update_config {
parallelism = 2
delay = "10s"
failure_action = "pause"
monitor = "5s"
max_failure_ratio = "0.1"
order = "start-first"
}
rollback_config {
parallelism = 2
delay = "5ms"
failure_action = "pause"
monitor = "10h"
max_failure_ratio = "0.9"
order = "stop-first"
}
endpoint_spec {
mode = "vip"
ports {
name = "random"
protocol = "tcp"
target_port = "8080"
published_port = "8080"
publish_mode = "ingress"
}
}
}
```
See also the `TestAccDockerService_full` test or all the other tests for a complete overview.
## Argument Reference
The following arguments are supported:
* `auth` - (Optional, block) See [Auth](#auth) below for details.
* `name` - (Required, string) The name of the Docker service.
* `task_spec` - (Required, block) See [TaskSpec](#task-spec) below for details.
* `mode` - (Optional, block) See [Mode](#mode) below for details.
* `update_config` - (Optional, block) See [UpdateConfig](#update-rollback-config) below for details.
* `rollback_config` - (Optional, block) See [RollbackConfig](#update-rollback-config) below for details.
* `endpoint_spec` - (Optional, block) See [EndpointSpec](#endpoint-spec) below for details.
* `converge_config` - (Optional, block) See [Converge Config](#converge-config) below for details.
<a id="auth"></a>
### Auth
`auth` can be used additionally to the `registry_auth`. If both properties are given the `auth` wins and overwrites the auth of the provider.
* `server_address` - (Required, string) The address of the registry server
* `username` - (Optional, string) The username to use for authenticating to the registry. If this is blank, the `DOCKER_REGISTRY_USER` is also be checked.
* `password` - (Optional, string) The password to use for authenticating to the registry. If this is blank, the `DOCKER_REGISTRY_PASS` is also be checked.
<!-- start task-spec -->
<a id="task-spec"></a>
### TaskSpec
`task_spec` is a block within the configuration that can be repeated only **once** to specify the mode configuration for the service. The `task_spec` block is the user modifiable task configuration and supports the following:
* `container_spec` (Required, block) See [ContainerSpec](#container-spec) below for details.
* `resources` (Optional, block) See [Resources](#resources) below for details.
* `restart_policy` (Optional, block) See [Restart Policy](#restart-policy) below for details.
* `placement` (Optional, block) See [Placement](#placement) below for details.
* `force_update` (Optional, int) A counter that triggers an update even if no relevant parameters have been changed. See [Docker Spec](https://github.com/docker/swarmkit/blob/master/api/specs.proto#L126).
* `runtime` (Optional, string) Runtime is the type of runtime specified for the task executor. See [Docker Runtime](https://github.com/moby/moby/blob/master/api/types/swarm/runtime.go).
* `networks` - (Optional, set of strings) Ids of the networks in which the container will be put in.
* `log_driver` - (Optional, block) See [Log Driver](#log-driver) below for details.
<!-- start task-container-spec -->
<a id="container-spec"></a>
#### ContainerSpec
`container_spec` is a block within the configuration that can be repeated only **once** to specify the mode configuration for the service. The `container_spec` block is the spec for each container and supports the following:
* `image` - (Required, string) The image used to create the Docker service.
* `labels` - (Optional, map of string/string key/value pairs) User-defined key/value metadata.
* `command` - (Optional, list of strings) The command to be run in the image.
* `args` - (Optional, list of strings) Arguments to the command.
* `hostname` - (Optional, string) The hostname to use for the container, as a valid RFC 1123 hostname.
* `env` - (Optional, map of string/string) A list of environment variables in the form VAR=value.
* `dir` - (Optional, string) The working directory for commands to run in.
* `user` - (Optional, string) The user inside the container.
* `groups` - (Optional, list of strings) A list of additional groups that the container process will run as.
* `privileges` (Optional, block) See [Privileges](#privileges) below for details.
* `read_only` - (Optional, bool) Mount the container's root filesystem as read only.
* `mounts` - (Optional, set of blocks) See [Mounts](#mounts) below for details.
* `stop_signal` - (Optional, string) Signal to stop the container.
* `stop_grace_period` - (Optional, string) Amount of time to wait for the container to terminate before forcefully removing it `(ms|s|m|h)`.
* `healthcheck` - (Optional, block) See [Healthcheck](#healthcheck) below for details.
* `host` - (Optional, map of string/string) A list of hostname/IP mappings to add to the container's hosts file.
* `ip` - (Required string) The ip
* `host` - (Required string) The hostname
* `dns_config` - (Optional, block) See [DNS Config](#dnsconfig) below for details.
* `secrets` - (Optional, set of blocks) See [Secrets](#secrets) below for details.
* `configs` - (Optional, set of blocks) See [Configs](#configs) below for details.
<a id="privileges"></a>
#### Privileges
`privileges` is a block within the configuration that can be repeated only **once** to specify the mode configuration for the service. The `privileges` block holds the security options for the container and supports the following:
* `credential_spec` - (Optional, block) For managed service account (Windows only)
* `file` - (Optional, string) Load credential spec from this file.
* `registry` - (Optional, string) Load credential spec from this value in the Windows registry.
* `se_linux_context` - (Optional, block) SELinux labels of the container
* `disable` - (Optional, bool) Disable SELinux
* `user` - (Optional, string) SELinux user label
* `role` - (Optional, string) SELinux role label
* `type` - (Optional, string) SELinux type label
* `level` - (Optional, string) SELinux level label
<a id="mounts"></a>
#### Mounts
`mount` is a block within the configuration that can be repeated to specify
the extra mount mappings for the container. Each `mount` block is the Specification for mounts to be added to containers created as part of the service and supports
the following:
* `target` - (Required, string) The container path.
* `source` - (Required, string) The mount source (e.g., a volume name, a host path)
* `type` - (Required, string) The mount type: valid values are `bind|volume|tmpfs`.
* `read_only` - (Optional, string) Whether the mount should be read-only
* `bind_options` - (Optional, map) Optional configuration for the `bind` type.
* `propagation` - (Optional, string) A propagation mode with the value.
* `volume_options` - (Optional, map) Optional configuration for the `volume` type.
* `no_copy` - (Optional, string) Whether to populate volume with data from the target.
* `labels` - (Optional, map of key/value pairs) Adding labels.
* `driver_config` - (Optional, map) The name of the driver to create the volume.
* `name` - (Optional, string) The name of the driver to create the volume.
* `options` - (Optional, map of key/value pairs) Options for the driver.
* `tmpf_options` - (Optional, map) Optional configuration for the `tmpf` type.
* `size_bytes` - (Optional, int) The size for the tmpfs mount in bytes.
* `mode` - (Optional, int) The permission mode for the tmpfs mount in an integer.
<a id="healthcheck"></a>
#### Healthcheck
`healthcheck` is a block within the configuration that can be repeated only **once** to specify the extra healthcheck configuration for the containers of the service. The `healthcheck` block is a test to perform to check that the container is healthy and supports the following:
* `test` - (Required, list of strings) Command to run to check health. For example, to run `curl -f http://localhost/health` set the
command to be `["CMD", "curl", "-f", "http://localhost/health"]`.
* `interval` - (Optional, string) Time between running the check `(ms|s|m|h)`. Default: `0s`.
* `timeout` - (Optional, string) Maximum time to allow one check to run `(ms|s|m|h)`. Default: `0s`.
* `start_period` - (Optional, string) Start period for the container to initialize before counting retries towards unstable `(ms|s|m|h)`. Default: `0s`.
* `start_period` - Start period for the container to initialize before counting retries towards unstable `(ms|s|m|h)`. Default: `0s`.
* `retries` - (Optional, int) Consecutive failures needed to report unhealthy. Default: `0`.
<a id="dnsconfig"></a>
### DNS Config
`dns_config` is a block within the configuration that can be repeated only **once** to specify the extra DNS configuration for the containers of the service. The `dns_config` block supports the following:
* `nameservers` - (Required, list of strings) The IP addresses of the name servers, for example, `8.8.8.8`
* `search` - (Optional, list of strings)A search list for host-name lookup.
* `options` - (Optional, list of strings) A list of internal resolver variables to be modified, for example, `debug`, `ndots:3`
<a id="secrets"></a>
### Secrets
`secrets` is a block within the configuration that can be repeated to specify
the extra mount mappings for the container. Each `secrets` block is a reference to a secret that will be exposed to the service and supports the following:
* `secret_id` - (Required, string) ConfigID represents the ID of the specific secret.
* `secret_name` - (Optional, string) The name of the secret that this references, but internally it is just provided for lookup/display purposes
* `file_name` - (Required, string) Represents the final filename in the filesystem. The specific target file that the secret data is written within the docker container, e.g. `/root/secret/secret.json`
<a id="configs"></a>
### Configs
`configs` is a block within the configuration that can be repeated to specify
the extra mount mappings for the container. Each `configs` is a reference to a secret that is exposed to the service and supports the following:
* `config_id` - (Required, string) ConfigID represents the ID of the specific config.
* `config_name` - (Optional, string) The name of the config that this references, but internally it is just provided for lookup/display purposes
* `file_name` - (Required, string) Represents the final filename in the filesystem. The specific target file that the config data is written within the docker container, e.g. `/root/config/config.json`
<!-- end task-container-spec -->
<!-- start task-resources-spec -->
<a id="resources"></a>
#### Resources
`resources` is a block within the configuration that can be repeated only **once** to specify the mode configuration for the service. The `resources` block represents the requirements which apply to each container created as part of the service and supports the following:
* `limits` - (Optional, list of strings) Describes the resources which can be advertised by a node and requested by a task.
* `nano_cpus` (Optional, int) CPU shares in units of 1/1e9 (or 10^-9) of the CPU. Should be at least 1000000
* `memory_bytes` (Optional, int) The amount of memory in bytes the container allocates
* `generic_resources` (Optional, map) User-defined resources can be either Integer resources (e.g, SSD=3) or String resources (e.g, GPU=UUID1)
* `named_resources_spec` (Optional, set of string) The String resources, delimited by `=`
* `discrete_resources_spec` (Optional, set of string) The Integer resources, delimited by `=`
* `reservation` - (Optional, list of strings) An object describing the resources which can be advertised by a node and requested by a task.
* `nano_cpus` (Optional, int) CPU shares in units of 1/1e9 (or 10^-9) of the CPU. Should be at least 1000000
* `memory_bytes` (Optional, int) The amount of memory in bytes the container allocates
* `generic_resources` (Optional, map) User-defined resources can be either Integer resources (e.g, SSD=3) or String resources (e.g, GPU=UUID1)
* `named_resources_spec` (Optional, set of string) The String resources
* `discrete_resources_spec` (Optional, set of string) The Integer resources
<!-- end task-resources-spec -->
<!-- start task-restart-policy-spec -->
<a id="restart_policy"></a>
#### Restart Policy
`restart_policy` is a block within the configuration that can be repeated only **once** to specify the mode configuration for the service. The `restart_policy` block specifies the restart policy which applies to containers created as part of this service and supports the following:
* `condition` (Optional, string) Condition for restart: `(none|on-failure|any)`
* `delay` (Optional, string) Delay between restart attempts `(ms|s|m|h)`
* `max_attempts` (Optional, string) Maximum attempts to restart a given container before giving up (default value is `0`, which is ignored)
* `window` (Optional, string) The time window used to evaluate the restart policy (default value is `0`, which is unbounded) `(ms|s|m|h)`
<!-- end task-restart-policy-spec -->
<!-- start task-placement-spec -->
<a id="placement"></a>
#### Placement
`placement` is a block within the configuration that can be repeated only **once** to specify the mode configuration for the service. The `placement` block specifies the placement preferences and supports the following:
* `constraints` (Optional, set of strings) An array of constraints. e.g.: `node.role==manager`
* `prefs` (Optional, set of string) Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence, e.g.: `spread=node.role.manager`
* `platforms` (Optional, set of) Platforms stores all the platforms that the service's image can run on
* `architecture` (Required, string) The architecture, e.g., `amd64`
* `os` (Required, string) The operation system, e.g., `linux`
<!-- end task-placement-spec -->
<!-- end log-driver-spec -->
<a id="log-driver"></a>
### Log Driver
`log_driver` is a block within the configuration that can be repeated only **once** to specify the extra log_driver configuration for the containers of the service. The `log_driver` specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified. The block supports the following:
* `name` - (Required, string) The logging driver to use. Either `(none|json-file|syslog|journald|gelf|fluentd|awslogs|splunk|etwlogs|gcplogs)`.
* `options` - (Optional, a map of strings and strings) The options for the logging driver, e.g.
```hcl
options {
awslogs-region = "us-west-2"
awslogs-group = "dev/foo-service"
}
```
<!-- end log-driver-spec -->
<!-- end task-spec -->
<a id="mode"></a>
### Mode
`mode` is a block within the configuration that can be repeated only **once** to specify the mode configuration for the service. The `mode` block supports the following:
* `global` - (Optional, bool) set it to `true` to run the service in the global mode
```hcl
resource "docker_service" "foo" {
...
mode {
global = true
}
...
}
```
* `replicated` - (Optional, map), which contains atm only the amount of `replicas`
```hcl
resource "docker_service" "foo" {
...
mode {
replicated {
replicas = 2
}
}
...
}
```
~> **NOTE on `mode`:** if neither `global` nor `replicated` is specified, the service
is started in `replicated` mode with 1 replica. A change of service mode is not possible. The service has to be destroyed an recreated in the new mode.
<a id="update-rollback-config"></a>
### UpdateConfig and RollbackConfig
`update_config` or `rollback_config` is a block within the configuration that can be repeated only **once** to specify the extra update configuration for the containers of the service. The `update_config` `rollback_config` block supports the following:
* `parallelism` - (Optional, int) The maximum number of tasks to be updated in one iteration simultaneously (0 to update all at once).
* `delay` - (Optional, int) Delay between updates `(ns|us|ms|s|m|h)`, e.g. `5s`.
* `failure_action` - (Optional, int) Action on update failure: `pause|continue|rollback`.
* `monitor` - (Optional, int) Duration after each task update to monitor for failure `(ns|us|ms|s|m|h)`
* `max_failure_ratio` - (Optional, string) The failure rate to tolerate during an update as `float`. **Important:** the `float`need to be wrapped in a `string` to avoid internal
casting and precision errors.
* `order` - (Optional, int) Update order either 'stop-first' or 'start-first'.
<a id="endpoint-spec"></a>
### EndpointSpec
`endpoint_spec` is a block within the configuration that can be repeated only **once** to specify properties that can be configured to access and load balance a service. The block supports the following:
* `mode` - (Optional, string) The mode of resolution to use for internal load balancing between tasks. `(vip|dnsrr)`. Default: `vip`.
* `ports` - (Optional, block) See [Ports](#ports) below for details.
<a id="ports"></a>
#### Ports
`ports` is a block within the configuration that can be repeated to specify
the port mappings of the container. Each `ports` block supports
the following:
* `name` - (Optional, string) A random name for the port.
* `protocol` - (Optional, string) Protocol that can be used over this port: `tcp|ucp`. Default: `tcp`.
* `target_port` - (Required, int) Port inside the container.
* `published_port` - (Required, int) The port on the swarm hosts. If not set the value of `target_port` will be used.
* `publish_mode` - (Optional, string) Represents the mode in which the port is to be published: `ingress|host`
<a id="converge-config"></a>
### Converge Config
`converge_config` is a block within the configuration that can be repeated only **once** to specify the extra Converging configuration for the containers of the service. This is the same behavior as the `docker cli`. By adding this configuration, it is monitored with the
given interval that, e.g., all tasks/replicas of a service are up and healthy
The `converge_config` block supports the following:
* `delay` - (Optional, string) Time between each the check to check docker endpoint `(ms|s|m|h)`. For example, to check if
all tasks are up when a service is created, or to check if all tasks are successfully updated on an update. Default: `7s`.
* `timeout` - (Optional, string) The timeout of the service to reach the desired state `(s|m)`. Default: `3m`.
## Attributes Reference
The following attributes are exported in addition to the above configuration:
* `id` (string)