diff --git a/.travis.yml b/.travis.yml index 48ffb56f..e687ff83 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,9 +10,8 @@ before_install: - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - - sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - sudo apt-get update -- sudo apt-get -y install docker-ce=18.03.0~ce-0~ubuntu +- sudo apt-get -y install docker-ce=17.09.1~ce-0~ubuntu # latest stable without the bug: https://github.com/moby/moby/issues/36661 - docker version -- export TRAVIS="true" install: # This script is used by the Travis build to install a cookie for diff --git a/README.md b/README.md index e17bda39..c75123ed 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ Requirements ------------ - [Terraform](https://www.terraform.io/downloads.html) 0.10.x -- [Go](https://golang.org/doc/install) 1.8 (to build the provider plugin) +- [Go](https://golang.org/doc/install) 1.9.1 (to build the provider plugin) Building The Provider --------------------- @@ -56,12 +56,10 @@ $ make test In order to run the full suite of Acceptance tests, run `make testacc`. -*Note:* Acceptance tests create real resources, and often cost money to run. +*Note:* Acceptance tests create a local registry which will be deleted afterwards. ```sh $ make testacc -# e.g. run a single acceptance test: e.g. 'TestAccDockerRegistryImage_private' in 'data_source_docker_registry_image_test.go' -go test -v -timeout 30s github.com/terraform-providers/terraform-provider-docker/docker -run ^TestAccDockerRegistryImage_private$ ``` In order to extend the provider and test it with `terraform`, build the provider as mentioned above with diff --git a/docker/provider.go b/docker/provider.go index fab0fdbe..782cda31 100644 --- a/docker/provider.go +++ b/docker/provider.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/terraform/terraform" ) +// Provider creates the Docker provider func Provider() terraform.ResourceProvider { return &schema.Provider{ Schema: map[string]*schema.Schema{ @@ -69,6 +70,7 @@ func Provider() terraform.ResourceProvider { "password": &schema.Schema{ Type: schema.TypeString, Optional: true, + Sensitive: true, ConflictsWith: []string{"registry_auth.config_file"}, DefaultFunc: schema.EnvDefaultFunc("DOCKER_REGISTRY_PASS", ""), Description: "Password for the registry", @@ -91,6 +93,9 @@ func Provider() terraform.ResourceProvider { "docker_image": resourceDockerImage(), "docker_network": resourceDockerNetwork(), "docker_volume": resourceDockerVolume(), + "docker_config": resourceDockerConfig(), + "docker_secret": resourceDockerSecret(), + "docker_service": resourceDockerService(), }, DataSourcesMap: map[string]*schema.Resource{ diff --git a/docker/resource_docker_config.go b/docker/resource_docker_config.go new file mode 100644 index 00000000..b4dd2f75 --- /dev/null +++ b/docker/resource_docker_config.go @@ -0,0 +1,87 @@ +package docker + +import ( + "encoding/base64" + "log" + + "github.com/docker/docker/api/types/swarm" + dc "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceDockerConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceDockerConfigCreate, + Read: resourceDockerConfigRead, + Delete: resourceDockerConfigDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Description: "User-defined name of the config", + Required: true, + ForceNew: true, + }, + + "data": &schema.Schema{ + Type: schema.TypeString, + Description: "Base64-url-safe-encoded config data", + Required: true, + Sensitive: true, + ForceNew: true, + ValidateFunc: validateStringIsBase64Encoded(), + }, + }, + } +} + +func resourceDockerConfigCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ProviderConfig).DockerClient + data, _ := base64.StdEncoding.DecodeString(d.Get("data").(string)) + + createConfigOpts := dc.CreateConfigOptions{ + ConfigSpec: swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: d.Get("name").(string), + }, + Data: data, + }, + } + + config, err := client.CreateConfig(createConfigOpts) + if err != nil { + return err + } + d.SetId(config.ID) + + return resourceDockerConfigRead(d, meta) +} + +func resourceDockerConfigRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ProviderConfig).DockerClient + config, err := client.InspectConfig(d.Id()) + + if err != nil { + if _, ok := err.(*dc.NoSuchConfig); ok { + log.Printf("[WARN] Config (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return err + } + d.SetId(config.ID) + return nil +} + +func resourceDockerConfigDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ProviderConfig).DockerClient + err := client.RemoveConfig(dc.RemoveConfigOptions{ + ID: d.Id(), + }) + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/docker/resource_docker_config_test.go b/docker/resource_docker_config_test.go new file mode 100644 index 00000000..517c2bbe --- /dev/null +++ b/docker/resource_docker_config_test.go @@ -0,0 +1,93 @@ +package docker + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccDockerConfig_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckDockerConfigDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_config" "foo" { + name = "foo-config" + data = "Ymxhc2RzYmxhYmxhMTI0ZHNkd2VzZA==" + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("docker_config.foo", "name", "foo-config"), + resource.TestCheckResourceAttr("docker_config.foo", "data", "Ymxhc2RzYmxhYmxhMTI0ZHNkd2VzZA=="), + ), + }, + }, + }) +} +func TestAccDockerConfig_basicUpdatable(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckDockerConfigDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_config" "foo" { + name = "tftest-myconfig-${replace(timestamp(),":", ".")}" + data = "Ymxhc2RzYmxhYmxhMTI0ZHNkd2VzZA==" + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("docker_config.foo", "data", "Ymxhc2RzYmxhYmxhMTI0ZHNkd2VzZA=="), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_config" "foo" { + name = "tftest-myconfig2-${replace(timestamp(),":", ".")}" + data = "U3VuIDI1IE1hciAyMDE4IDE0OjQ2OjE5IENFU1QK" + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("docker_config.foo", "data", "U3VuIDI1IE1hciAyMDE4IDE0OjQ2OjE5IENFU1QK"), + ), + }, + }, + }) +} + +///////////// +// Helpers +///////////// +func testCheckDockerConfigDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*ProviderConfig).DockerClient + for _, rs := range s.RootModule().Resources { + if rs.Type != "configs" { + continue + } + + id := rs.Primary.Attributes["id"] + config, err := client.InspectConfig(id) + + if err == nil || config != nil { + return fmt.Errorf("Config with id '%s' still exists", id) + } + return nil + } + return nil +} diff --git a/docker/resource_docker_container_funcs.go b/docker/resource_docker_container_funcs.go index f9abd674..fd3340df 100644 --- a/docker/resource_docker_container_funcs.go +++ b/docker/resource_docker_container_funcs.go @@ -280,6 +280,7 @@ func resourceDockerContainerRead(d *schema.ResourceData, meta interface{}) error var container *dc.Container + // TODO fix this with statefunc loops := 1 // if it hasn't just been created, don't delay if !creationTime.IsZero() { loops = 30 // with 500ms spacing, 15 seconds; ought to be plenty @@ -388,6 +389,15 @@ func mapTypeMapValsToString(typeMap map[string]interface{}) map[string]string { return mapped } +// mapTypeMapValsToStringSlice maps a map to a slice with '=': e.g. foo = "bar" -> 'foo=bar' +func mapTypeMapValsToStringSlice(typeMap map[string]interface{}) []string { + mapped := make([]string, len(typeMap)) + for k, v := range typeMap { + mapped = append(mapped, k+"="+v.(string)) + } + return mapped +} + func fetchDockerContainer(ID string, client *dc.Client) (*dc.APIContainers, error) { apiContainers, err := client.ListContainers(dc.ListContainersOptions{All: true}) diff --git a/docker/resource_docker_image_funcs.go b/docker/resource_docker_image_funcs.go index 5bffbf13..d38ad010 100644 --- a/docker/resource_docker_image_funcs.go +++ b/docker/resource_docker_image_funcs.go @@ -126,9 +126,6 @@ func fetchLocalImages(data *Data, client *dc.Client) error { } func pullImage(data *Data, client *dc.Client, authConfig *dc.AuthConfigurations, image string) error { - // TODO: Test local registry handling. It should be working - // based on the code that was ported over - pullOpts := parseImageOptions(image) // If a registry was specified in the image name, try to find auth for it diff --git a/docker/resource_docker_secret.go b/docker/resource_docker_secret.go new file mode 100644 index 00000000..5ff17540 --- /dev/null +++ b/docker/resource_docker_secret.go @@ -0,0 +1,89 @@ +package docker + +import ( + "encoding/base64" + "log" + + "github.com/docker/docker/api/types/swarm" + dc "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceDockerSecret() *schema.Resource { + return &schema.Resource{ + Create: resourceDockerSecretCreate, + Read: resourceDockerSecretRead, + Delete: resourceDockerSecretDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Description: "User-defined name of the secret", + Required: true, + ForceNew: true, + }, + + "data": &schema.Schema{ + Type: schema.TypeString, + Description: "User-defined name of the secret", + Required: true, + Sensitive: true, + ForceNew: true, + ValidateFunc: validateStringIsBase64Encoded(), + }, + }, + } +} + +func resourceDockerSecretCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ProviderConfig).DockerClient + data, _ := base64.StdEncoding.DecodeString(d.Get("data").(string)) + + createSecretOpts := dc.CreateSecretOptions{ + SecretSpec: swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: d.Get("name").(string), + }, + Data: data, + }, + } + + secret, err := client.CreateSecret(createSecretOpts) + if err != nil { + return err + } + + d.SetId(secret.ID) + + return resourceDockerSecretRead(d, meta) +} + +func resourceDockerSecretRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ProviderConfig).DockerClient + secret, err := client.InspectSecret(d.Id()) + + if err != nil { + if _, ok := err.(*dc.NoSuchSecret); ok { + log.Printf("[WARN] Secret (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return err + } + d.SetId(secret.ID) + return nil +} + +func resourceDockerSecretDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ProviderConfig).DockerClient + err := client.RemoveSecret(dc.RemoveSecretOptions{ + ID: d.Id(), + }) + + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/docker/resource_docker_secret_test.go b/docker/resource_docker_secret_test.go new file mode 100644 index 00000000..d8bf6ebd --- /dev/null +++ b/docker/resource_docker_secret_test.go @@ -0,0 +1,93 @@ +package docker + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccDockerSecret_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckDockerSecretDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_secret" "foo" { + name = "foo-secret" + data = "Ymxhc2RzYmxhYmxhMTI0ZHNkd2VzZA==" + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("docker_secret.foo", "name", "foo-secret"), + resource.TestCheckResourceAttr("docker_secret.foo", "data", "Ymxhc2RzYmxhYmxhMTI0ZHNkd2VzZA=="), + ), + }, + }, + }) +} +func TestAccDockerSecret_basicUpdateble(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckDockerSecretDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_secret" "foo" { + name = "tftest-mysecret-${replace(timestamp(),":", ".")}" + data = "Ymxhc2RzYmxhYmxhMTI0ZHNkd2VzZA==" + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("docker_secret.foo", "data", "Ymxhc2RzYmxhYmxhMTI0ZHNkd2VzZA=="), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_secret" "foo" { + name = "tftest-mysecret2-${replace(timestamp(),":", ".")}" + data = "U3VuIDI1IE1hciAyMDE4IDE0OjUzOjIxIENFU1QK" + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("docker_secret.foo", "data", "U3VuIDI1IE1hciAyMDE4IDE0OjUzOjIxIENFU1QK"), + ), + }, + }, + }) +} + +///////////// +// Helpers +///////////// +func testCheckDockerSecretDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*ProviderConfig).DockerClient + for _, rs := range s.RootModule().Resources { + if rs.Type != "secrets" { + continue + } + + id := rs.Primary.Attributes["id"] + secret, err := client.InspectSecret(id) + + if err == nil || secret != nil { + return fmt.Errorf("Secret with id '%s' still exists", id) + } + return nil + } + return nil +} diff --git a/docker/resource_docker_service.go b/docker/resource_docker_service.go new file mode 100644 index 00000000..c0bb7540 --- /dev/null +++ b/docker/resource_docker_service.go @@ -0,0 +1,894 @@ +package docker + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +// resourceDockerService create a docker service +// https://docs.docker.com/engine/api/v1.32/#operation/ServiceCreate +func resourceDockerService() *schema.Resource { + return &schema.Resource{ + Create: resourceDockerServiceCreate, + Read: resourceDockerServiceRead, + Update: resourceDockerServiceUpdate, + Delete: resourceDockerServiceDelete, + Exists: resourceDockerServiceExists, + + Schema: map[string]*schema.Schema{ + "auth": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "server_address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "username": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("DOCKER_REGISTRY_USER", ""), + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("DOCKER_REGISTRY_PASS", ""), + Sensitive: true, + }, + }, + }, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Description: "Name of the service", + Required: true, + ForceNew: true, + }, + "labels": &schema.Schema{ + Type: schema.TypeMap, + Description: "User-defined key/value metadata", + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "task_spec": &schema.Schema{ + Type: schema.TypeList, + Description: "User modifiable task configuration", + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container_spec": &schema.Schema{ + Type: schema.TypeList, + Description: "The spec for each container", + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image": &schema.Schema{ + Type: schema.TypeString, + Description: "The image name to use for the containers of the service", + Required: true, + }, + "labels": &schema.Schema{ + Type: schema.TypeMap, + Description: "User-defined key/value metadata", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "command": &schema.Schema{ + Type: schema.TypeList, + Description: "The command to be run in the image", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "args": &schema.Schema{ + Type: schema.TypeList, + Description: "Arguments to the command", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "hostname": &schema.Schema{ + Type: schema.TypeString, + Description: "The hostname to use for the container, as a valid RFC 1123 hostname", + Optional: true, + }, + "env": &schema.Schema{ + Type: schema.TypeMap, + Description: "A list of environment variables in the form VAR=\"value\"", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "dir": &schema.Schema{ + Type: schema.TypeString, + Description: "The working directory for commands to run in", + Optional: true, + }, + "user": &schema.Schema{ + Type: schema.TypeString, + Description: "The user inside the container", + Optional: true, + }, + "groups": &schema.Schema{ + Type: schema.TypeList, + Description: "A list of additional groups that the container process will run as", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "privileges": &schema.Schema{ + Type: schema.TypeList, + Description: "Security options for the container", + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "credential_spec": &schema.Schema{ + Type: schema.TypeList, + Description: "CredentialSpec for managed service account (Windows only)", + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "file": &schema.Schema{ + Type: schema.TypeString, + Description: "Load credential spec from this file", + Optional: true, + }, + "registry": &schema.Schema{ + Type: schema.TypeString, + Description: "Load credential spec from this value in the Windows registry", + Optional: true, + }, + }, + }, + }, + "se_linux_context": &schema.Schema{ + Type: schema.TypeList, + Description: "SELinux labels of the container", + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disable": &schema.Schema{ + Type: schema.TypeBool, + Description: "Disable SELinux", + Optional: true, + }, + "user": &schema.Schema{ + Type: schema.TypeString, + Description: "SELinux user label", + Optional: true, + }, + "role": &schema.Schema{ + Type: schema.TypeString, + Description: "SELinux role label", + Optional: true, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Description: "SELinux type label", + Optional: true, + }, + "level": &schema.Schema{ + Type: schema.TypeString, + Description: "SELinux level label", + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "read_only": &schema.Schema{ + Type: schema.TypeBool, + Description: "Mount the container's root filesystem as read only", + Optional: true, + }, + "mounts": &schema.Schema{ + Type: schema.TypeSet, + Description: "Specification for mounts to be added to containers created as part of the service", + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target": &schema.Schema{ + Type: schema.TypeString, + Description: "Container path", + Required: true, + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Description: "Mount source (e.g. a volume name, a host path)", + Required: true, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Description: "The mount type", + Required: true, + ValidateFunc: validateStringMatchesPattern(`^(bind|volume|tmpfs)$`), + }, + "read_only": &schema.Schema{ + Type: schema.TypeBool, + Description: "Whether the mount should be read-only", + Optional: true, + }, + "bind_options": &schema.Schema{ + Type: schema.TypeList, + Description: "Optional configuration for the bind type", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "propagation": &schema.Schema{ + Type: schema.TypeString, + Description: "A propagation mode with the value", + Optional: true, + ValidateFunc: validateStringMatchesPattern(`^(private|rprivate|shared|rshared|slave|rslave)$`), + }, + }, + }, + }, + "volume_options": &schema.Schema{ + Type: schema.TypeList, + Description: "Optional configuration for the volume type", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "no_copy": &schema.Schema{ + Type: schema.TypeBool, + Description: "Populate volume with data from the target", + Optional: true, + }, + "labels": &schema.Schema{ + Type: schema.TypeMap, + Description: "User-defined key/value metadata", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "driver_name": &schema.Schema{ + Type: schema.TypeString, + Description: "Name of the driver to use to create the volume.", + Optional: true, + }, + "driver_options": &schema.Schema{ + Type: schema.TypeMap, + Description: "key/value map of driver specific options", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "tmpfs_options": &schema.Schema{ + Type: schema.TypeList, + Description: "Optional configuration for the tmpfs type", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size_bytes": &schema.Schema{ + Type: schema.TypeInt, + Description: "The size for the tmpfs mount in bytes", + Optional: true, + }, + "mode": &schema.Schema{ + Type: schema.TypeInt, + Description: "The permission mode for the tmpfs mount in an integer", + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "stop_signal": &schema.Schema{ + Type: schema.TypeString, + Description: "Signal to stop the container", + Optional: true, + }, + "stop_grace_period": &schema.Schema{ + Type: schema.TypeString, + Description: "Amount of time to wait for the container to terminate before forcefully removing it (ms|s|m|h)", + Optional: true, + Computed: true, + ValidateFunc: validateDurationGeq0(), + }, + "healthcheck": &schema.Schema{ + Type: schema.TypeList, + Description: "A test to perform to check that the container is healthy", + MaxItems: 1, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "test": &schema.Schema{ + Type: schema.TypeList, + Description: "The test to perform as list", + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "interval": &schema.Schema{ + Type: schema.TypeString, + Description: "Time between running the check (ms|s|m|h)", + Optional: true, + Default: "0s", + ValidateFunc: validateDurationGeq0(), + }, + "timeout": &schema.Schema{ + Type: schema.TypeString, + Description: "Maximum time to allow one check to run (ms|s|m|h)", + Optional: true, + Default: "0s", + ValidateFunc: validateDurationGeq0(), + }, + "start_period": &schema.Schema{ + Type: schema.TypeString, + Description: "Start period for the container to initialize before counting retries towards unstable (ms|s|m|h)", + Optional: true, + Default: "0s", + ValidateFunc: validateDurationGeq0(), + }, + "retries": &schema.Schema{ + Type: schema.TypeInt, + Description: "Consecutive failures needed to report unhealthy", + Optional: true, + Default: 0, + ValidateFunc: validateIntegerGeqThan(0), + }, + }, + }, + }, + "hosts": &schema.Schema{ + Type: schema.TypeSet, + Description: "A list of hostname/IP mappings to add to the container's hosts file.", + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "host": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + }, + "dns_config": &schema.Schema{ + Type: schema.TypeList, + Description: "Specification for DNS related configurations in resolver configuration file (resolv.conf)", + MaxItems: 1, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nameservers": &schema.Schema{ + Type: schema.TypeList, + Description: "The IP addresses of the name servers", + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "search": &schema.Schema{ + Type: schema.TypeList, + Description: "A search list for host-name lookup", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "options": &schema.Schema{ + Type: schema.TypeList, + Description: "A list of internal resolver variables to be modified (e.g., debug, ndots:3, etc.)", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "secrets": &schema.Schema{ + Type: schema.TypeSet, + Description: "References to zero or more secrets that will be exposed to the service", + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_id": &schema.Schema{ + Type: schema.TypeString, + Description: "ID of the specific secret that we're referencing", + Required: true, + }, + "secret_name": &schema.Schema{ + Type: schema.TypeString, + Description: "Name of the secret that this references, but this is just provided for lookup/display purposes. The config in the reference will be identified by its ID", + Optional: true, + }, + "file_name": &schema.Schema{ + Type: schema.TypeString, + Description: "Represents the final filename in the filesystem", + Required: true, + }, + }, + }, + }, + "configs": &schema.Schema{ + Type: schema.TypeSet, + Description: "References to zero or more configs that will be exposed to the service", + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "config_id": &schema.Schema{ + Type: schema.TypeString, + Description: "ID of the specific config that we're referencing", + Required: true, + }, + "config_name": &schema.Schema{ + Type: schema.TypeString, + Description: "Name of the config that this references, but this is just provided for lookup/display purposes. The config in the reference will be identified by its ID", + Optional: true, + }, + "file_name": &schema.Schema{ + Type: schema.TypeString, + Description: "Represents the final filename in the filesystem", + Required: true, + }, + }, + }, + }, + }, + }, + }, + "resources": &schema.Schema{ + Type: schema.TypeList, + Description: "Resource requirements which apply to each individual container created as part of the service", + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "limits": &schema.Schema{ + Type: schema.TypeList, + Description: "Describes the resources which can be advertised by a node and requested by a task", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nano_cpus": &schema.Schema{ + Type: schema.TypeInt, + Description: "CPU shares in units of 1/1e9 (or 10^-9) of the CPU. Should be at least 1000000", + Optional: true, + }, + "memory_bytes": &schema.Schema{ + Type: schema.TypeInt, + Description: "The amounf of memory in bytes the container allocates", + Optional: true, + }, + "generic_resources": &schema.Schema{ + Type: schema.TypeList, + Description: "User-defined resources can be either Integer resources (e.g, SSD=3) or String resources (e.g, GPU=UUID1)", + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "named_resources_spec": &schema.Schema{ + Type: schema.TypeSet, + Description: "The String resources", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "discrete_resources_spec": &schema.Schema{ + Type: schema.TypeSet, + Description: "The Integer resources", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + }, + }, + }, + }, + }, + "reservation": &schema.Schema{ + Type: schema.TypeList, + Description: "An object describing the resources which can be advertised by a node and requested by a task", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nano_cpus": &schema.Schema{ + Description: "CPU shares in units of 1/1e9 (or 10^-9) of the CPU. Should be at least 1000000", + Type: schema.TypeInt, + Optional: true, + }, + "memory_bytes": &schema.Schema{ + Type: schema.TypeInt, + Description: "The amounf of memory in bytes the container allocates", + Optional: true, + }, + "generic_resources": &schema.Schema{ + Type: schema.TypeList, + Description: "User-defined resources can be either Integer resources (e.g, SSD=3) or String resources (e.g, GPU=UUID1)", + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "named_resources_spec": &schema.Schema{ + Type: schema.TypeSet, + Description: "The String resources", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "discrete_resources_spec": &schema.Schema{ + Type: schema.TypeSet, + Description: "The Integer resources", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "restart_policy": &schema.Schema{ + Type: schema.TypeMap, + Description: "Specification for the restart policy which applies to containers created as part of this service.", + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "condition": &schema.Schema{ + Type: schema.TypeString, + Description: "Condition for restart", + Optional: true, + ValidateFunc: validateStringMatchesPattern(`^(none|on-failure|any)$`), + }, + "delay": &schema.Schema{ + Type: schema.TypeString, + Description: "Delay between restart attempts (ms|s|m|h)", + Optional: true, + ValidateFunc: validateDurationGeq0(), + }, + "max_attempts": &schema.Schema{ + Type: schema.TypeInt, + Description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)", + Optional: true, + ValidateFunc: validateIntegerGeqThan(0), + }, + "window": &schema.Schema{ + Type: schema.TypeString, + Description: "The time window used to evaluate the restart policy (default value is 0, which is unbounded) (ms|s|m|h)", + Optional: true, + ValidateFunc: validateDurationGeq0(), + }, + }, + }, + }, + "placement": &schema.Schema{ + Type: schema.TypeList, + Description: "The placement preferences", + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "constraints": &schema.Schema{ + Type: schema.TypeSet, + Description: "An array of constraints. e.g.: node.role==manager", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "prefs": &schema.Schema{ + Type: schema.TypeSet, + Description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence, e.g.: spread=node.role.manager", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "platforms": &schema.Schema{ + Type: schema.TypeSet, + Description: "Platforms stores all the platforms that the service's image can run on", + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "architecture": &schema.Schema{ + Type: schema.TypeString, + Description: "The architecture, e.g. amd64", + Required: true, + }, + "os": &schema.Schema{ + Type: schema.TypeString, + Description: "The operation system, e.g. linux", + Required: true, + }, + }, + }, + }, + }, + }, + }, + "force_update": &schema.Schema{ + Type: schema.TypeInt, + Description: "A counter that triggers an update even if no relevant parameters have been changed. See https://github.com/docker/swarmkit/blob/master/api/specs.proto#L126", + Optional: true, + Computed: true, + ValidateFunc: validateIntegerGeqThan(0), + }, + "runtime": &schema.Schema{ + Type: schema.TypeString, + Description: "Runtime is the type of runtime specified for the task executor. See https://github.com/moby/moby/blob/master/api/types/swarm/runtime.go", + Optional: true, + Computed: true, + ValidateFunc: validateStringMatchesPattern("^(container|plugin)$"), + }, + "networks": &schema.Schema{ + Type: schema.TypeSet, + Description: "Ids of the networks in which the container will be put in.", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "log_driver": &schema.Schema{ + Type: schema.TypeList, + Description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified", + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Description: "The logging driver to use: one of none|json-file|syslog|journald|gelf|fluentd|awslogs|splunk|etwlogs|gcplogs", + Required: true, + ValidateFunc: validateStringMatchesPattern("(none|json-file|syslog|journald|gelf|fluentd|awslogs|splunk|etwlogs|gcplogs)"), + }, + "options": &schema.Schema{ + Type: schema.TypeMap, + Description: "The options for the logging driver", + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + "mode": &schema.Schema{ + Type: schema.TypeList, + Description: "Scheduling mode for the service", + MaxItems: 1, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "replicated": &schema.Schema{ + Type: schema.TypeList, + Description: "The replicated service mode", + MaxItems: 1, + Optional: true, + Computed: true, + ConflictsWith: []string{"mode.0.global"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "replicas": &schema.Schema{ + Type: schema.TypeInt, + Description: "The amount of replicas of the service", + Optional: true, + Default: 1, + ValidateFunc: validateIntegerGeqThan(1), + }, + }, + }, + }, + "global": &schema.Schema{ + Type: schema.TypeBool, + Description: "The global service mode", + Optional: true, + Default: false, + ConflictsWith: []string{"mode.0.replicated", "converge_config"}, + }, + }, + }, + }, + "update_config": &schema.Schema{ + Type: schema.TypeList, + Description: "Specification for the update strategy of the service", + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "parallelism": &schema.Schema{ + Type: schema.TypeInt, + Description: "Maximum number of tasks to be updated in one iteration", + Optional: true, + Default: 1, + ValidateFunc: validateIntegerGeqThan(0), + }, + "delay": &schema.Schema{ + Type: schema.TypeString, + Description: "Delay between task updates (ns|us|ms|s|m|h)", + Optional: true, + Default: "0s", + ValidateFunc: validateDurationGeq0(), + }, + "failure_action": &schema.Schema{ + Type: schema.TypeString, + Description: "Action on update failure: pause | continue | rollback", + Optional: true, + Default: "pause", + ValidateFunc: validateStringMatchesPattern("^(pause|continue|rollback)$"), + }, + "monitor": &schema.Schema{ + Type: schema.TypeString, + Description: "Duration after each task update to monitor for failure (ns|us|ms|s|m|h)", + Optional: true, + Default: "5s", + ValidateFunc: validateDurationGeq0(), + }, + "max_failure_ratio": &schema.Schema{ + Type: schema.TypeString, + Description: "Failure rate to tolerate during an update", + Optional: true, + Default: "0.0", + ValidateFunc: validateStringIsFloatRatio(), + }, + "order": &schema.Schema{ + Type: schema.TypeString, + Description: "Update order: either 'stop-first' or 'start-first'", + Optional: true, + Default: "stop-first", + ValidateFunc: validateStringMatchesPattern("^(stop-first|start-first)$"), + }, + }, + }, + }, + "rollback_config": &schema.Schema{ + Type: schema.TypeList, + Description: "Specification for the rollback strategy of the service", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "parallelism": &schema.Schema{ + Type: schema.TypeInt, + Description: "Maximum number of tasks to be rollbacked in one iteration", + Optional: true, + Default: 1, + ValidateFunc: validateIntegerGeqThan(0), + }, + "delay": &schema.Schema{ + Type: schema.TypeString, + Description: "Delay between task rollbacks (ns|us|ms|s|m|h)", + Optional: true, + Default: "0s", + ValidateFunc: validateDurationGeq0(), + }, + "failure_action": &schema.Schema{ + Type: schema.TypeString, + Description: "Action on rollback failure: pause | continue", + Optional: true, + Default: "pause", + ValidateFunc: validateStringMatchesPattern("(pause|continue)"), + }, + "monitor": &schema.Schema{ + Type: schema.TypeString, + Description: "Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h)", + Optional: true, + Default: "5s", + ValidateFunc: validateDurationGeq0(), + }, + "max_failure_ratio": &schema.Schema{ + Type: schema.TypeString, + Description: "Failure rate to tolerate during a rollback", + Optional: true, + Default: "0.0", + ValidateFunc: validateStringIsFloatRatio(), + }, + "order": &schema.Schema{ + Type: schema.TypeString, + Description: "Rollback order: either 'stop-first' or 'start-first'", + Optional: true, + Default: "stop-first", + ValidateFunc: validateStringMatchesPattern("(stop-first|start-first)"), + }, + }, + }, + }, + "endpoint_spec": &schema.Schema{ + Type: schema.TypeList, + Description: "Properties that can be configured to access and load balance a service", + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": &schema.Schema{ + Type: schema.TypeString, + Description: "The mode of resolution to use for internal load balancing between tasks", + Optional: true, + Default: "vip", + ValidateFunc: validateStringMatchesPattern(`^(vip|dnsrr)$`), + }, + "ports": &schema.Schema{ + Type: schema.TypeSet, + Description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if 'vip' resolution mode is used.", + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Description: "A random name for the port", + Optional: true, + }, + "protocol": &schema.Schema{ + Type: schema.TypeString, + Description: "Rrepresents the protocol of a port: 'tcp' or 'udp'", + Optional: true, + Default: "tcp", + ValidateFunc: validateStringMatchesPattern(`^(tcp|udp)$`), + }, + "target_port": &schema.Schema{ + Type: schema.TypeInt, + Description: "The port inside the container", + Required: true, + }, + "published_port": &schema.Schema{ + Type: schema.TypeInt, + Description: "The port on the swarm hosts. If not set the value of 'target_port' will be used", + Optional: true, + }, + "publish_mode": &schema.Schema{ + Type: schema.TypeString, + Description: "Represents the mode in which the port is to be published: 'ingress' or 'host'", + Optional: true, + Default: "ingress", + ValidateFunc: validateStringMatchesPattern(`^(host|ingress)$`), + }, + }, + }, + }, + }, + }, + }, + "converge_config": &schema.Schema{ + Type: schema.TypeList, + Description: "A configuration to ensure that a service converges aka reaches the desired that of all task up and running", + MaxItems: 1, + Optional: true, + ConflictsWith: []string{"mode.0.global"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delay": &schema.Schema{ + Type: schema.TypeString, + Description: "The interval to check if the desired state is reached (ms|s). Default: 7s", + Optional: true, + Default: "7s", + ValidateFunc: validateDurationGeq0(), + }, + "timeout": &schema.Schema{ + Type: schema.TypeString, + Description: "The timeout of the service to reach the desired state (s|m). Default: 3m", + Optional: true, + Default: "3m", + ValidateFunc: validateDurationGeq0(), + }, + }, + }, + }, + }, + } +} diff --git a/docker/resource_docker_service_funcs.go b/docker/resource_docker_service_funcs.go new file mode 100644 index 00000000..5830d03a --- /dev/null +++ b/docker/resource_docker_service_funcs.go @@ -0,0 +1,1379 @@ +package docker + +import ( + "context" + "encoding/json" + "fmt" + "log" + "os" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + dc "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +type convergeConfig struct { + timeout time.Duration + timeoutRaw string + delay time.Duration +} + +///////////////// +// TF CRUD funcs +///////////////// +func resourceDockerServiceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + client := meta.(*ProviderConfig).DockerClient + if client == nil { + return false, nil + } + + apiService, err := fetchDockerService(d.Id(), d.Get("name").(string), client) + if err != nil { + return false, err + } + if apiService == nil { + return false, nil + } + + return true, nil +} + +func resourceDockerServiceCreate(d *schema.ResourceData, meta interface{}) error { + var err error + client := meta.(*ProviderConfig).DockerClient + + serviceSpec, err := createServiceSpec(d) + if err != nil { + return err + } + + createOpts := dc.CreateServiceOptions{ + ServiceSpec: serviceSpec, + } + + if v, ok := d.GetOk("auth"); ok { + createOpts.Auth = authToServiceAuth(v.(map[string]interface{})) + } else { + createOpts.Auth = fromRegistryAuth(d.Get("task_spec.0.container_spec.0.image").(string), meta.(*ProviderConfig).AuthConfigs.Configs) + } + + service, err := client.CreateService(createOpts) + if err != nil { + return err + } + if v, ok := d.GetOk("converge_config"); ok { + convergeConfig := createConvergeConfig(v.([]interface{})) + log.Printf("[INFO] Waiting for Service '%s' to be created with timeout: %v", service.ID, convergeConfig.timeoutRaw) + timeout, _ := time.ParseDuration(convergeConfig.timeoutRaw) + stateConf := &resource.StateChangeConf{ + Pending: serviceCreatePendingStates, + Target: []string{"running", "complete"}, + Refresh: resourceDockerServiceCreateRefreshFunc(service.ID, meta), + Timeout: timeout, + MinTimeout: 5 * time.Second, + Delay: convergeConfig.delay, + } + + // Wait, catching any errors + _, err := stateConf.WaitForState() + if err != nil { + // the service will be deleted in case it cannot be converged + if deleteErr := deleteService(service.ID, d, client); deleteErr != nil { + return deleteErr + } + if strings.Contains(err.Error(), "timeout while waiting for state") { + return &DidNotConvergeError{ServiceID: service.ID, Timeout: convergeConfig.timeout} + } + return err + } + } + + return resourceDockerServiceRead(d, meta) +} + +func resourceDockerServiceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ProviderConfig).DockerClient + + apiService, err := fetchDockerService(d.Id(), d.Get("name").(string), client) + if err != nil { + return err + } + if apiService == nil { + d.SetId("") + return nil + } + + service, err := client.InspectService(apiService.ID) + if err != nil { + return fmt.Errorf("Error inspecting service %s: %s", apiService.ID, err) + } + + jsonObj, _ := json.Marshal(service) + log.Printf("[DEBUG] Docker service inspect: %s", jsonObj) + + d.SetId(service.ID) + d.Set("name", service.Spec.Name) + d.Set("labels", service.Spec.Labels) + + if err = d.Set("task_spec", flattenTaskSpec(service.Spec.TaskTemplate)); err != nil { + log.Printf("[WARN] failed to set task spec from API: %s", err) + } + if err = d.Set("mode", flattenServiceMode(service.Spec.Mode)); err != nil { + log.Printf("[WARN] failed to set mode from API: %s", err) + } + if err := d.Set("update_config", flattenServiceUpdateOrRollbackConfig(service.Spec.UpdateConfig)); err != nil { + log.Printf("[WARN] failed to set update_config from API: %s", err) + } + if err = d.Set("rollback_config", flattenServiceUpdateOrRollbackConfig(service.Spec.RollbackConfig)); err != nil { + log.Printf("[WARN] failed to set rollback_config from API: %s", err) + } + if err = d.Set("endpoint_spec", flattenServiceEndpointSpec(service.Endpoint.Spec)); err != nil { + log.Printf("[WARN] failed to set endpoint spec from API: %s", err) + } + + return nil +} + +func resourceDockerServiceUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ProviderConfig).DockerClient + + service, err := client.InspectService(d.Id()) + if err != nil { + return err + } + + serviceSpec, err := createServiceSpec(d) + if err != nil { + return err + } + + updateOpts := dc.UpdateServiceOptions{ + ServiceSpec: serviceSpec, + Version: service.Version.Index, + } + + if v, ok := d.GetOk("auth"); ok { + updateOpts.Auth = authToServiceAuth(v.(map[string]interface{})) + } else { + updateOpts.Auth = fromRegistryAuth(d.Get("task_spec.0.container_spec.0.image").(string), meta.(*ProviderConfig).AuthConfigs.Configs) + } + + if err = client.UpdateService(d.Id(), updateOpts); err != nil { + return err + } + + if v, ok := d.GetOk("converge_config"); ok { + convergeConfig := createConvergeConfig(v.([]interface{})) + log.Printf("[INFO] Waiting for Service '%s' to be updated with timeout: %v", service.ID, convergeConfig.timeoutRaw) + timeout, _ := time.ParseDuration(convergeConfig.timeoutRaw) + stateConf := &resource.StateChangeConf{ + Pending: serviceUpdatePendingStates, + Target: []string{"completed"}, + Refresh: resourceDockerServiceUpdateRefreshFunc(service.ID, meta), + Timeout: timeout, + MinTimeout: 5 * time.Second, + Delay: 7 * time.Second, + } + + // Wait, catching any errors + state, err := stateConf.WaitForState() + log.Printf("[INFO] ###### State awaited: %v with error: %v", state, err) + if err != nil { + if strings.Contains(err.Error(), "timeout while waiting for state") { + log.Printf("######## did not converge error...") + return &DidNotConvergeError{ServiceID: service.ID, Timeout: convergeConfig.timeout} + } + log.Printf("######## OTHER converge error...") + return err + } + } + + return resourceDockerServiceRead(d, meta) +} + +func resourceDockerServiceDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ProviderConfig).DockerClient + + if err := deleteService(d.Id(), d, client); err != nil { + return err + } + + d.SetId("") + return nil +} + +///////////////// +// Helpers +///////////////// +// fetchDockerService fetches a service by its name or id +func fetchDockerService(ID string, name string, client *dc.Client) (*swarm.Service, error) { + apiServices, err := client.ListServices(dc.ListServicesOptions{}) + + if err != nil { + return nil, fmt.Errorf("Error fetching service information from Docker: %s", err) + } + + for _, apiService := range apiServices { + if apiService.ID == ID || apiService.Spec.Name == name { + return &apiService, nil + } + } + + return nil, nil +} + +// deleteService deletes the service with the given id +func deleteService(serviceID string, d *schema.ResourceData, client *dc.Client) error { + // get containerIDs of the running service because they do not exist after the service is deleted + serviceContainerIds := make([]string, 0) + if _, ok := d.GetOk("task_spec.0.container_spec.0.stop_grace_period"); ok { + filter := make(map[string][]string) + filter["service"] = []string{d.Get("name").(string)} + tasks, err := client.ListTasks(dc.ListTasksOptions{ + Filters: filter, + }) + if err != nil { + return err + } + for _, t := range tasks { + task, _ := client.InspectTask(t.ID) + log.Printf("[INFO] Found container ['%s'] for destroying: '%s'", task.Status.State, task.Status.ContainerStatus.ContainerID) + if strings.TrimSpace(task.Status.ContainerStatus.ContainerID) != "" && task.Status.State != swarm.TaskStateShutdown { + serviceContainerIds = append(serviceContainerIds, task.Status.ContainerStatus.ContainerID) + } + } + } + + // delete the service + log.Printf("[INFO] Deleting service: '%s'", serviceID) + removeOpts := dc.RemoveServiceOptions{ + ID: serviceID, + } + + if err := client.RemoveService(removeOpts); err != nil { + if _, ok := err.(*dc.NoSuchService); ok { + log.Printf("[WARN] Service (%s) not found, removing from state", serviceID) + d.SetId("") + return nil + } + return fmt.Errorf("Error deleting service %s: %s", serviceID, err) + } + + // destroy each container after a grace period if specified + if v, ok := d.GetOk("task_spec.0.container_spec.0.stop_grace_period"); ok { + for _, containerID := range serviceContainerIds { + destroyGraceSeconds, _ := time.ParseDuration(v.(string)) + log.Printf("[INFO] Waiting for container: '%s' to exit: max %v", containerID, destroyGraceSeconds) + ctx, cancel := context.WithTimeout(context.Background(), destroyGraceSeconds) + defer cancel() + exitCode, _ := client.WaitContainerWithContext(containerID, ctx) + log.Printf("[INFO] Container exited with code [%v]: '%s'", exitCode, containerID) + + removeOpts := dc.RemoveContainerOptions{ + ID: containerID, + RemoveVolumes: true, + Force: true, + } + + log.Printf("[INFO] Removing container: '%s'", containerID) + if err := client.RemoveContainer(removeOpts); err != nil { + if !(strings.Contains(err.Error(), "No such container") || strings.Contains(err.Error(), "is already in progress")) { + return fmt.Errorf("Error deleting container %s: %s", containerID, err) + } + } + } + } + + return nil +} + +//////// Convergers + +// DidNotConvergeError is the error returned when a the service does not converge in +// the defined time +type DidNotConvergeError struct { + ServiceID string + Timeout time.Duration + Err error +} + +// Error the custom error if a service does not converge +func (err *DidNotConvergeError) Error() string { + if err.Err != nil { + return err.Err.Error() + } + return "Service with ID (" + err.ServiceID + ") did not converge after " + err.Timeout.String() +} + +// resourceDockerServiceCreateRefreshFunc refreshes the state of a service when it is created and needs to converge +func resourceDockerServiceCreateRefreshFunc( + serviceID string, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + client := meta.(*ProviderConfig).DockerClient + ctx := context.Background() + + var updater progressUpdater + + if updater == nil { + updater = &replicatedConsoleLogUpdater{} + } + + filter := make(map[string][]string) + filter["service"] = []string{serviceID} + filter["desired-state"] = []string{"running"} + + getUpToDateTasks := func() ([]swarm.Task, error) { + return client.ListTasks(dc.ListTasksOptions{ + Filters: filter, + Context: ctx, + }) + } + var service *swarm.Service + service, err := client.InspectService(serviceID) + if err != nil { + return nil, "", err + } + + tasks, err := getUpToDateTasks() + if err != nil { + return nil, "", err + } + + activeNodes, err := getActiveNodes(ctx, client) + if err != nil { + return nil, "", err + } + + serviceCreateStatus, err := updater.update(service, tasks, activeNodes, false) + if err != nil { + return nil, "", err + } + + if serviceCreateStatus { + return service.ID, "running", nil + } + + return service.ID, "creating", nil + } +} + +// resourceDockerServiceUpdateRefreshFunc refreshes the state of a service when it is updated and needs to converge +func resourceDockerServiceUpdateRefreshFunc( + serviceID string, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + client := meta.(*ProviderConfig).DockerClient + ctx := context.Background() + + var ( + updater progressUpdater + rollback bool + ) + + if updater == nil { + updater = &replicatedConsoleLogUpdater{} + } + rollback = false + + filter := make(map[string][]string) + filter["service"] = []string{serviceID} + filter["desired-state"] = []string{"running"} + + getUpToDateTasks := func() ([]swarm.Task, error) { + return client.ListTasks(dc.ListTasksOptions{ + Filters: filter, + Context: ctx, + }) + } + var service *swarm.Service + service, err := client.InspectService(serviceID) + if err != nil { + return nil, "", err + } + + if service.UpdateStatus != nil { + log.Printf("######## update status: %v", service.UpdateStatus.State) + switch service.UpdateStatus.State { + case swarm.UpdateStateUpdating: + rollback = false + case swarm.UpdateStateCompleted: + return service.ID, "completed", nil + case swarm.UpdateStateRollbackStarted: + rollback = true + case swarm.UpdateStateRollbackCompleted: + return nil, "", fmt.Errorf("service rollback completed: %s", service.UpdateStatus.Message) + case swarm.UpdateStatePaused: + return nil, "", fmt.Errorf("service update paused: %s", service.UpdateStatus.Message) + case swarm.UpdateStateRollbackPaused: + return nil, "", fmt.Errorf("service rollback paused: %s", service.UpdateStatus.Message) + } + } + + tasks, err := getUpToDateTasks() + if err != nil { + return nil, "", err + } + + activeNodes, err := getActiveNodes(ctx, client) + if err != nil { + return nil, "", err + } + + isUpdateCompleted, err := updater.update(service, tasks, activeNodes, rollback) + if err != nil { + return nil, "", err + } + + if isUpdateCompleted { + if rollback { + return nil, "", fmt.Errorf("service rollback completed: %s", service.UpdateStatus.Message) + } + return service.ID, "completed", nil + } + + return service.ID, "updating", nil + } +} + +// getActiveNodes gets the actives nodes withon a swarm +func getActiveNodes(ctx context.Context, client *dc.Client) (map[string]struct{}, error) { + nodes, err := client.ListNodes(dc.ListNodesOptions{Context: ctx}) + if err != nil { + return nil, err + } + + activeNodes := make(map[string]struct{}) + for _, n := range nodes { + if n.Status.State != swarm.NodeStateDown { + activeNodes[n.ID] = struct{}{} + } + } + return activeNodes, nil +} + +// progressUpdater interface for progressive task updates +type progressUpdater interface { + update(service *swarm.Service, tasks []swarm.Task, activeNodes map[string]struct{}, rollback bool) (bool, error) +} + +// replicatedConsoleLogUpdater console log updater for replicated services +type replicatedConsoleLogUpdater struct { + // used for mapping slots to a contiguous space + // this also causes progress bars to appear in order + slotMap map[int]int + + initialized bool + done bool +} + +// update is the concrete implementation of updating replicated services +func (u *replicatedConsoleLogUpdater) update(service *swarm.Service, tasks []swarm.Task, activeNodes map[string]struct{}, rollback bool) (bool, error) { + if service.Spec.Mode.Replicated == nil || service.Spec.Mode.Replicated.Replicas == nil { + return false, fmt.Errorf("no replica count") + } + replicas := *service.Spec.Mode.Replicated.Replicas + + if !u.initialized { + u.slotMap = make(map[int]int) + u.initialized = true + } + + // get the task for each slot. there can be multiple slots on one node + tasksBySlot := u.tasksBySlot(tasks, activeNodes) + + // if a converged state is reached, check if is still converged. + if u.done { + for _, task := range tasksBySlot { + if task.Status.State != swarm.TaskStateRunning { + u.done = false + break + } + } + } + + running := uint64(0) + + // map the slots to keep track of their state individually + for _, task := range tasksBySlot { + mappedSlot := u.slotMap[task.Slot] + if mappedSlot == 0 { + mappedSlot = len(u.slotMap) + 1 + u.slotMap[task.Slot] = mappedSlot + } + + // if a task is in the desired state count it as running + if !terminalState(task.DesiredState) && task.Status.State == swarm.TaskStateRunning { + running++ + } + } + + // check if all tasks the same amount of tasks is running than replicas defined + if !u.done { + log.Printf("[INFO] ... progress: [%v/%v] - rollback: %v", running, replicas, rollback) + if running == replicas { + log.Printf("[INFO] DONE: all %v replicas running", running) + u.done = true + } + } + + return running == replicas, nil +} + +// tasksBySlot maps the tasks to slots on active nodes. There can be multiple slots on active nodes. +// A task is analogous to a “slot” where (on a node) the scheduler places a container. +func (u *replicatedConsoleLogUpdater) tasksBySlot(tasks []swarm.Task, activeNodes map[string]struct{}) map[int]swarm.Task { + // if there are multiple tasks with the same slot number, favor the one + // with the *lowest* desired state. This can happen in restart + // scenarios. + tasksBySlot := make(map[int]swarm.Task) + for _, task := range tasks { + if numberedStates[task.DesiredState] == 0 || numberedStates[task.Status.State] == 0 { + continue + } + if existingTask, ok := tasksBySlot[task.Slot]; ok { + if numberedStates[existingTask.DesiredState] < numberedStates[task.DesiredState] { + continue + } + // if the desired states match, observed state breaks + // ties. This can happen with the "start first" service + // update mode. + if numberedStates[existingTask.DesiredState] == numberedStates[task.DesiredState] && + numberedStates[existingTask.Status.State] <= numberedStates[task.Status.State] { + continue + } + } + // if the task is on a node and this node is active, then map this task to a slot + if task.NodeID != "" { + if _, nodeActive := activeNodes[task.NodeID]; !nodeActive { + continue + } + } + tasksBySlot[task.Slot] = task + } + + return tasksBySlot +} + +// terminalState determines if the given state is a terminal state +// meaninig 'higher' than running (see numberedStates) +func terminalState(state swarm.TaskState) bool { + return numberedStates[state] > numberedStates[swarm.TaskStateRunning] +} + +//////// Mappers +// createServiceSpec creates the service spec: https://docs.docker.com/engine/api/v1.32/#operation/ServiceCreate +func createServiceSpec(d *schema.ResourceData) (swarm.ServiceSpec, error) { + + serviceSpec := swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: d.Get("name").(string), + }, + } + + labels, err := createServiceLabels(d) + if err != nil { + return serviceSpec, err + } + serviceSpec.Labels = labels + + taskTemplate, err := createServiceTaskSpec(d) + if err != nil { + return serviceSpec, err + } + serviceSpec.TaskTemplate = taskTemplate + + mode, err := createServiceMode(d) + if err != nil { + return serviceSpec, err + } + serviceSpec.Mode = mode + + updateConfig, err := createServiceUpdateConfig(d) + if err != nil { + return serviceSpec, err + } + serviceSpec.UpdateConfig = updateConfig + + rollbackConfig, err := createServiceRollbackConfig(d) + if err != nil { + return serviceSpec, err + } + serviceSpec.RollbackConfig = rollbackConfig + + endpointSpec, err := createServiceEndpointSpec(d) + if err != nil { + return serviceSpec, err + } + serviceSpec.EndpointSpec = endpointSpec + + return serviceSpec, nil +} + +// createServiceLabels creates the labels for the service +func createServiceLabels(d *schema.ResourceData) (map[string]string, error) { + if v, ok := d.GetOk("labels"); ok { + return mapTypeMapValsToString(v.(map[string]interface{})), nil + } + return nil, nil +} + +// == start taskSpec +// createServiceTaskSpec creates the task template for the service +func createServiceTaskSpec(d *schema.ResourceData) (swarm.TaskSpec, error) { + taskSpec := swarm.TaskSpec{} + if v, ok := d.GetOk("task_spec"); ok { + if len(v.([]interface{})) > 0 { + for _, rawTaskSpec := range v.([]interface{}) { + rawTaskSpec := rawTaskSpec.(map[string]interface{}) + + if rawContainerSpec, ok := rawTaskSpec["container_spec"]; ok { + containerSpec, err := createContainerSpec(rawContainerSpec) + if err != nil { + return taskSpec, err + } + taskSpec.ContainerSpec = containerSpec + } + + if rawResourcesSpec, ok := rawTaskSpec["resources"]; ok { + resources, err := createResources(rawResourcesSpec) + if err != nil { + return taskSpec, err + } + taskSpec.Resources = resources + } + if rawRestartPolicySpec, ok := rawTaskSpec["restart_policy"]; ok { + restartPolicy, err := createRestartPolicy(rawRestartPolicySpec) + if err != nil { + return taskSpec, err + } + taskSpec.RestartPolicy = restartPolicy + } + if rawPlacementSpec, ok := rawTaskSpec["placement"]; ok { + placement, err := createPlacement(rawPlacementSpec) + if err != nil { + return taskSpec, err + } + taskSpec.Placement = placement + } + if rawForceUpdate, ok := rawTaskSpec["force_update"]; ok { + taskSpec.ForceUpdate = uint64(rawForceUpdate.(int)) + } + if rawRuntimeSpec, ok := rawTaskSpec["runtime"]; ok { + taskSpec.Runtime = swarm.RuntimeType(rawRuntimeSpec.(string)) + } + if rawNetworksSpec, ok := rawTaskSpec["networks"]; ok { + networks, err := createServiceNetworks(rawNetworksSpec) + if err != nil { + return taskSpec, err + } + taskSpec.Networks = networks + } + if rawLogDriverSpec, ok := rawTaskSpec["log_driver"]; ok { + logDriver, err := createLogDriver(rawLogDriverSpec) + if err != nil { + return taskSpec, err + } + taskSpec.LogDriver = logDriver + } + } + } + } + return taskSpec, nil +} + +// createContainerSpec creates the container spec +func createContainerSpec(v interface{}) (*swarm.ContainerSpec, error) { + containerSpec := swarm.ContainerSpec{} + if len(v.([]interface{})) > 0 { + for _, rawContainerSpec := range v.([]interface{}) { + rawContainerSpec := rawContainerSpec.(map[string]interface{}) + if value, ok := rawContainerSpec["image"]; ok { + containerSpec.Image = value.(string) + } + if value, ok := rawContainerSpec["labels"]; ok { + containerSpec.Labels = mapTypeMapValsToString(value.(map[string]interface{})) + } + if value, ok := rawContainerSpec["command"]; ok { + containerSpec.Command = stringListToStringSlice(value.([]interface{})) + } + if value, ok := rawContainerSpec["args"]; ok { + containerSpec.Args = stringListToStringSlice(value.([]interface{})) + } + if value, ok := rawContainerSpec["hostname"]; ok { + containerSpec.Hostname = value.(string) + } + if value, ok := rawContainerSpec["env"]; ok { + containerSpec.Env = mapTypeMapValsToStringSlice(value.(map[string]interface{})) + } + if value, ok := rawContainerSpec["dir"]; ok { + containerSpec.Dir = value.(string) + } + if value, ok := rawContainerSpec["user"]; ok { + containerSpec.User = value.(string) + } + if value, ok := rawContainerSpec["groups"]; ok { + containerSpec.Groups = stringListToStringSlice(value.([]interface{})) + } + if value, ok := rawContainerSpec["privileges"]; ok { + if len(value.([]interface{})) > 0 { + containerSpec.Privileges = &swarm.Privileges{} + + for _, rawPrivilegesSpec := range value.([]interface{}) { + rawPrivilegesSpec := rawPrivilegesSpec.(map[string]interface{}) + + if value, ok := rawPrivilegesSpec["credential_spec"]; ok { + if len(value.([]interface{})) > 0 { + containerSpec.Privileges.CredentialSpec = &swarm.CredentialSpec{} + for _, rawCredentialSpec := range value.([]interface{}) { + rawCredentialSpec := rawCredentialSpec.(map[string]interface{}) + if value, ok := rawCredentialSpec["file"]; ok { + containerSpec.Privileges.CredentialSpec.File = value.(string) + } + if value, ok := rawCredentialSpec["registry"]; ok { + containerSpec.Privileges.CredentialSpec.File = value.(string) + } + } + } + } + if value, ok := rawPrivilegesSpec["se_linux_context"]; ok { + if len(value.([]interface{})) > 0 { + containerSpec.Privileges.SELinuxContext = &swarm.SELinuxContext{} + for _, rawSELinuxContext := range value.([]interface{}) { + rawSELinuxContext := rawSELinuxContext.(map[string]interface{}) + if value, ok := rawSELinuxContext["disable"]; ok { + containerSpec.Privileges.SELinuxContext.Disable = value.(bool) + } + if value, ok := rawSELinuxContext["user"]; ok { + containerSpec.Privileges.SELinuxContext.User = value.(string) + } + if value, ok := rawSELinuxContext["role"]; ok { + containerSpec.Privileges.SELinuxContext.Role = value.(string) + } + if value, ok := rawSELinuxContext["type"]; ok { + containerSpec.Privileges.SELinuxContext.Type = value.(string) + } + if value, ok := rawSELinuxContext["level"]; ok { + containerSpec.Privileges.SELinuxContext.Level = value.(string) + } + } + } + } + } + } + } + if value, ok := rawContainerSpec["read_only"]; ok { + containerSpec.ReadOnly = value.(bool) + } + if value, ok := rawContainerSpec["mounts"]; ok { + mounts := []mount.Mount{} + + for _, rawMount := range value.(*schema.Set).List() { + rawMount := rawMount.(map[string]interface{}) + mountType := mount.Type(rawMount["type"].(string)) + mountInstance := mount.Mount{ + Type: mountType, + Target: rawMount["target"].(string), + Source: rawMount["source"].(string), + } + if value, ok := rawMount["read_only"]; ok { + mountInstance.ReadOnly = value.(bool) + } + + if mountType == mount.TypeBind { + if value, ok := rawMount["bind_options"]; ok { + if len(value.([]interface{})) > 0 { + mountInstance.BindOptions = &mount.BindOptions{} + for _, rawBindOptions := range value.([]interface{}) { + rawBindOptions := rawBindOptions.(map[string]interface{}) + if value, ok := rawBindOptions["propagation"]; ok { + mountInstance.BindOptions.Propagation = mount.Propagation(value.(string)) + } + } + } + } + } else if mountType == mount.TypeVolume { + if value, ok := rawMount["volume_options"]; ok { + if len(value.([]interface{})) > 0 { + mountInstance.VolumeOptions = &mount.VolumeOptions{} + for _, rawVolumeOptions := range value.([]interface{}) { + rawVolumeOptions := rawVolumeOptions.(map[string]interface{}) + if value, ok := rawVolumeOptions["no_copy"]; ok { + mountInstance.VolumeOptions.NoCopy = value.(bool) + } + if value, ok := rawVolumeOptions["labels"]; ok { + mountInstance.VolumeOptions.Labels = mapTypeMapValsToString(value.(map[string]interface{})) + } + // because it is not possible to nest maps + if value, ok := rawVolumeOptions["driver_name"]; ok { + if mountInstance.VolumeOptions.DriverConfig == nil { + mountInstance.VolumeOptions.DriverConfig = &mount.Driver{} + } + mountInstance.VolumeOptions.DriverConfig.Name = value.(string) + } + if value, ok := rawVolumeOptions["driver_options"]; ok { + if mountInstance.VolumeOptions.DriverConfig == nil { + mountInstance.VolumeOptions.DriverConfig = &mount.Driver{} + } + mountInstance.VolumeOptions.DriverConfig.Options = mapTypeMapValsToString(value.(map[string]interface{})) + } + } + } + } + } else if mountType == mount.TypeTmpfs { + if value, ok := rawMount["tmpfs_options"]; ok { + if len(value.([]interface{})) > 0 { + mountInstance.TmpfsOptions = &mount.TmpfsOptions{} + for _, rawTmpfsOptions := range value.([]interface{}) { + rawTmpfsOptions := rawTmpfsOptions.(map[string]interface{}) + if value, ok := rawTmpfsOptions["size_bytes"]; ok { + mountInstance.TmpfsOptions.SizeBytes = value.(int64) + } + if value, ok := rawTmpfsOptions["mode"]; ok { + mountInstance.TmpfsOptions.Mode = os.FileMode(value.(int)) + } + } + } + } + } + + mounts = append(mounts, mountInstance) + } + + containerSpec.Mounts = mounts + } + if value, ok := rawContainerSpec["stop_signal"]; ok { + containerSpec.StopSignal = value.(string) + } + if value, ok := rawContainerSpec["stop_grace_period"]; ok { + parsed, _ := time.ParseDuration(value.(string)) + containerSpec.StopGracePeriod = &parsed + } + if value, ok := rawContainerSpec["healthcheck"]; ok { + containerSpec.Healthcheck = &container.HealthConfig{} + if len(value.([]interface{})) > 0 { + for _, rawHealthCheck := range value.([]interface{}) { + rawHealthCheck := rawHealthCheck.(map[string]interface{}) + if testCommand, ok := rawHealthCheck["test"]; ok { + containerSpec.Healthcheck.Test = stringListToStringSlice(testCommand.([]interface{})) + } + if rawInterval, ok := rawHealthCheck["interval"]; ok { + containerSpec.Healthcheck.Interval, _ = time.ParseDuration(rawInterval.(string)) + } + if rawTimeout, ok := rawHealthCheck["timeout"]; ok { + containerSpec.Healthcheck.Timeout, _ = time.ParseDuration(rawTimeout.(string)) + } + if rawStartPeriod, ok := rawHealthCheck["start_period"]; ok { + containerSpec.Healthcheck.StartPeriod, _ = time.ParseDuration(rawStartPeriod.(string)) + } + if rawRetries, ok := rawHealthCheck["retries"]; ok { + containerSpec.Healthcheck.Retries, _ = rawRetries.(int) + } + } + } + } + if value, ok := rawContainerSpec["hosts"]; ok { + containerSpec.Hosts = extraHostsSetToDockerExtraHosts(value.(*schema.Set)) + } + if value, ok := rawContainerSpec["dns_config"]; ok { + containerSpec.DNSConfig = &swarm.DNSConfig{} + if len(v.([]interface{})) > 0 { + for _, rawDNSConfig := range value.([]interface{}) { + if rawDNSConfig != nil { + rawDNSConfig := rawDNSConfig.(map[string]interface{}) + if nameservers, ok := rawDNSConfig["nameservers"]; ok { + containerSpec.DNSConfig.Nameservers = stringListToStringSlice(nameservers.([]interface{})) + } + if search, ok := rawDNSConfig["search"]; ok { + containerSpec.DNSConfig.Search = stringListToStringSlice(search.([]interface{})) + } + if options, ok := rawDNSConfig["options"]; ok { + containerSpec.DNSConfig.Options = stringListToStringSlice(options.([]interface{})) + } + } + } + } + } + if value, ok := rawContainerSpec["secrets"]; ok { + secrets := []*swarm.SecretReference{} + + for _, rawSecret := range value.(*schema.Set).List() { + rawSecret := rawSecret.(map[string]interface{}) + secret := swarm.SecretReference{ + SecretID: rawSecret["secret_id"].(string), + File: &swarm.SecretReferenceFileTarget{ + Name: rawSecret["file_name"].(string), + UID: "0", + GID: "0", + Mode: os.FileMode(0444), + }, + } + if value, ok := rawSecret["secret_name"]; ok { + secret.SecretName = value.(string) + } + secrets = append(secrets, &secret) + } + containerSpec.Secrets = secrets + } + if value, ok := rawContainerSpec["configs"]; ok { + configs := []*swarm.ConfigReference{} + + for _, rawConfig := range value.(*schema.Set).List() { + rawConfig := rawConfig.(map[string]interface{}) + config := swarm.ConfigReference{ + ConfigID: rawConfig["config_id"].(string), + File: &swarm.ConfigReferenceFileTarget{ + Name: rawConfig["file_name"].(string), + UID: "0", + GID: "0", + Mode: os.FileMode(0444), + }, + } + if value, ok := rawConfig["config_name"]; ok { + config.ConfigName = value.(string) + } + configs = append(configs, &config) + } + containerSpec.Configs = configs + } + } + } + + return &containerSpec, nil +} + +// createResources creates the resource requirements for the service +func createResources(v interface{}) (*swarm.ResourceRequirements, error) { + resources := swarm.ResourceRequirements{} + if len(v.([]interface{})) > 0 { + for _, rawResourcesSpec := range v.([]interface{}) { + if rawResourcesSpec != nil { + rawResourcesSpec := rawResourcesSpec.(map[string]interface{}) + if value, ok := rawResourcesSpec["limits"]; ok { + if len(value.([]interface{})) > 0 { + resources.Limits = &swarm.Resources{} + for _, rawLimitsSpec := range value.([]interface{}) { + rawLimitsSpec := rawLimitsSpec.(map[string]interface{}) + if value, ok := rawLimitsSpec["nano_cpus"]; ok { + resources.Limits.NanoCPUs = int64(value.(int)) + } + if value, ok := rawLimitsSpec["memory_bytes"]; ok { + resources.Limits.MemoryBytes = int64(value.(int)) + } + if value, ok := rawLimitsSpec["generic_resources"]; ok { + resources.Limits.GenericResources, _ = createGenericResources(value) + } + } + } + } + if value, ok := rawResourcesSpec["reservation"]; ok { + if len(value.([]interface{})) > 0 { + resources.Reservations = &swarm.Resources{} + for _, rawReservationSpec := range value.([]interface{}) { + rawReservationSpec := rawReservationSpec.(map[string]interface{}) + if value, ok := rawReservationSpec["nano_cpus"]; ok { + resources.Reservations.NanoCPUs = int64(value.(int)) + } + if value, ok := rawReservationSpec["memory_bytes"]; ok { + resources.Reservations.MemoryBytes = int64(value.(int)) + } + if value, ok := rawReservationSpec["generic_resources"]; ok { + resources.Reservations.GenericResources, _ = createGenericResources(value) + } + } + } + } + } + } + } + return &resources, nil +} + +// createGenericResources creates generic resources for a container +func createGenericResources(value interface{}) ([]swarm.GenericResource, error) { + genericResources := make([]swarm.GenericResource, 0) + if len(value.([]interface{})) > 0 { + for _, rawGenericResource := range value.([]interface{}) { + rawGenericResource := rawGenericResource.(map[string]interface{}) + + if rawNamedResources, ok := rawGenericResource["named_resources_spec"]; ok { + for _, rawNamedResource := range rawNamedResources.(*schema.Set).List() { + namedGenericResource := &swarm.NamedGenericResource{} + splitted := strings.Split(rawNamedResource.(string), "=") + namedGenericResource.Kind = splitted[0] + namedGenericResource.Value = splitted[1] + + genericResource := swarm.GenericResource{} + genericResource.NamedResourceSpec = namedGenericResource + genericResources = append(genericResources, genericResource) + } + } + if rawDiscreteResources, ok := rawGenericResource["discrete_resources_spec"]; ok { + for _, rawDiscreteResource := range rawDiscreteResources.(*schema.Set).List() { + discreteGenericResource := &swarm.DiscreteGenericResource{} + splitted := strings.Split(rawDiscreteResource.(string), "=") + discreteGenericResource.Kind = splitted[0] + discreteGenericResource.Value, _ = strconv.ParseInt(splitted[1], 10, 64) + + genericResource := swarm.GenericResource{} + genericResource.DiscreteResourceSpec = discreteGenericResource + genericResources = append(genericResources, genericResource) + } + } + } + } + return genericResources, nil +} + +// createRestartPolicy creates the restart poliyc of the service +func createRestartPolicy(v interface{}) (*swarm.RestartPolicy, error) { + restartPolicy := swarm.RestartPolicy{} + rawRestartPolicy := v.(map[string]interface{}) + + if v, ok := rawRestartPolicy["condition"]; ok { + restartPolicy.Condition = swarm.RestartPolicyCondition(v.(string)) + } + if v, ok := rawRestartPolicy["delay"]; ok { + parsed, _ := time.ParseDuration(v.(string)) + restartPolicy.Delay = &parsed + } + if v, ok := rawRestartPolicy["max_attempts"]; ok { + parsed, _ := strconv.ParseInt(v.(string), 10, 64) + mapped := uint64(parsed) + restartPolicy.MaxAttempts = &mapped + } + if v, ok := rawRestartPolicy["window"]; ok { + parsed, _ := time.ParseDuration(v.(string)) + restartPolicy.Window = &parsed + } + return &restartPolicy, nil +} + +// createPlacement creates the placement strategy for the service +func createPlacement(v interface{}) (*swarm.Placement, error) { + placement := swarm.Placement{} + if len(v.([]interface{})) > 0 { + for _, rawPlacement := range v.([]interface{}) { + if rawPlacement != nil { + rawPlacement := rawPlacement.(map[string]interface{}) + if v, ok := rawPlacement["constraints"]; ok { + placement.Constraints = stringSetToStringSlice(v.(*schema.Set)) + } + if v, ok := rawPlacement["prefs"]; ok { + placement.Preferences = stringSetToPlacementPrefs(v.(*schema.Set)) + } + if v, ok := rawPlacement["platforms"]; ok { + placement.Platforms = mapSetToPlacementPlatforms(v.(*schema.Set)) + } + } + } + } + + return &placement, nil +} + +// createServiceNetworks creates the networks the service will be attachted to +func createServiceNetworks(v interface{}) ([]swarm.NetworkAttachmentConfig, error) { + networks := []swarm.NetworkAttachmentConfig{} + if len(v.(*schema.Set).List()) > 0 { + for _, rawNetwork := range v.(*schema.Set).List() { + network := swarm.NetworkAttachmentConfig{ + Target: rawNetwork.(string), + } + networks = append(networks, network) + } + } + return networks, nil +} + +// createLogDriver creates the log driver for the service +func createLogDriver(v interface{}) (*swarm.Driver, error) { + logDriver := swarm.Driver{} + if len(v.([]interface{})) > 0 { + for _, rawLogging := range v.([]interface{}) { + rawLogging := rawLogging.(map[string]interface{}) + if rawName, ok := rawLogging["name"]; ok { + logDriver.Name = rawName.(string) + } + if rawOptions, ok := rawLogging["options"]; ok { + logDriver.Options = mapTypeMapValsToString(rawOptions.(map[string]interface{})) + } + return &logDriver, nil + } + } + return nil, nil +} + +// == end taskSpec + +// createServiceMode creates the mode the service will run in +func createServiceMode(d *schema.ResourceData) (swarm.ServiceMode, error) { + serviceMode := swarm.ServiceMode{} + if v, ok := d.GetOk("mode"); ok { + // because its a list + if len(v.([]interface{})) > 0 { + for _, rawMode := range v.([]interface{}) { + // with a map + rawMode := rawMode.(map[string]interface{}) + + if rawReplicatedMode, replModeOk := rawMode["replicated"]; replModeOk { + // with a list + if len(rawReplicatedMode.([]interface{})) > 0 { + for _, rawReplicatedModeInt := range rawReplicatedMode.([]interface{}) { + // which is a map + rawReplicatedModeMap := rawReplicatedModeInt.(map[string]interface{}) + log.Printf("[INFO] Setting service mode to 'replicated'") + serviceMode.Replicated = &swarm.ReplicatedService{} + if testReplicas, testReplicasOk := rawReplicatedModeMap["replicas"]; testReplicasOk { + log.Printf("[INFO] Setting %v replicas", testReplicas) + replicas := uint64(testReplicas.(int)) + serviceMode.Replicated.Replicas = &replicas + } + } + } + } + if rawGlobalMode, globalModeOk := rawMode["global"]; globalModeOk && rawGlobalMode.(bool) { + log.Printf("[INFO] Setting service mode to 'global' is %v", rawGlobalMode) + serviceMode.Global = &swarm.GlobalService{} + } + } + } + } + return serviceMode, nil +} + +// createServiceUpdateConfig creates the service update config +func createServiceUpdateConfig(d *schema.ResourceData) (*swarm.UpdateConfig, error) { + if v, ok := d.GetOk("update_config"); ok { + return createUpdateOrRollbackConfig(v.([]interface{})) + } + return nil, nil +} + +// createServiceRollbackConfig create the service rollback config +func createServiceRollbackConfig(d *schema.ResourceData) (*swarm.UpdateConfig, error) { + if v, ok := d.GetOk("rollback_config"); ok { + return createUpdateOrRollbackConfig(v.([]interface{})) + } + return nil, nil +} + +// == start endpointSpec +// createServiceEndpointSpec creates the spec for the endpoint +func createServiceEndpointSpec(d *schema.ResourceData) (*swarm.EndpointSpec, error) { + endpointSpec := swarm.EndpointSpec{} + if v, ok := d.GetOk("endpoint_spec"); ok { + if len(v.([]interface{})) > 0 { + for _, rawEndpointSpec := range v.([]interface{}) { + if rawEndpointSpec != nil { + rawEndpointSpec := rawEndpointSpec.(map[string]interface{}) + if value, ok := rawEndpointSpec["mode"]; ok { + endpointSpec.Mode = swarm.ResolutionMode(value.(string)) + } + if value, ok := rawEndpointSpec["ports"]; ok { + endpointSpec.Ports = portSetToServicePorts(value) + } + } + } + } + } + + return &endpointSpec, nil +} + +// portSetToServicePorts maps a set of ports to portConfig +func portSetToServicePorts(v interface{}) []swarm.PortConfig { + retPortConfigs := []swarm.PortConfig{} + if len(v.(*schema.Set).List()) > 0 { + for _, portInt := range v.(*schema.Set).List() { + portConfig := swarm.PortConfig{} + rawPort := portInt.(map[string]interface{}) + if value, ok := rawPort["name"]; ok { + portConfig.Name = value.(string) + } + if value, ok := rawPort["protocol"]; ok { + portConfig.Protocol = swarm.PortConfigProtocol(value.(string)) + } + if value, ok := rawPort["target_port"]; ok { + portConfig.TargetPort = uint32(value.(int)) + } + if externalPort, ok := rawPort["published_port"]; ok { + portConfig.PublishedPort = uint32(externalPort.(int)) + } else { + // If the external port is not specified we use the internal port for it + portConfig.PublishedPort = portConfig.TargetPort + } + if value, ok := rawPort["publish_mode"]; ok { + portConfig.PublishMode = swarm.PortConfigPublishMode(value.(string)) + } + + retPortConfigs = append(retPortConfigs, portConfig) + } + } + + return retPortConfigs +} + +// == end endpointSpec + +// createUpdateOrRollbackConfig create the configuration for and update or rollback +func createUpdateOrRollbackConfig(config []interface{}) (*swarm.UpdateConfig, error) { + updateConfig := swarm.UpdateConfig{} + if len(config) > 0 { + sc := config[0].(map[string]interface{}) + if v, ok := sc["parallelism"]; ok { + updateConfig.Parallelism = uint64(v.(int)) + } + if v, ok := sc["delay"]; ok { + updateConfig.Delay, _ = time.ParseDuration(v.(string)) + } + if v, ok := sc["failure_action"]; ok { + updateConfig.FailureAction = v.(string) + } + if v, ok := sc["monitor"]; ok { + updateConfig.Monitor, _ = time.ParseDuration(v.(string)) + } + if v, ok := sc["max_failure_ratio"]; ok { + value, _ := strconv.ParseFloat(v.(string), 64) + updateConfig.MaxFailureRatio = float32(value) + } + if v, ok := sc["order"]; ok { + updateConfig.Order = v.(string) + } + } + + return &updateConfig, nil +} + +// createConvergeConfig creates the configuration for converging +func createConvergeConfig(config []interface{}) *convergeConfig { + plainConvergeConfig := &convergeConfig{} + if len(config) > 0 { + for _, rawConvergeConfig := range config { + rawConvergeConfig := rawConvergeConfig.(map[string]interface{}) + if delay, ok := rawConvergeConfig["delay"]; ok { + plainConvergeConfig.delay, _ = time.ParseDuration(delay.(string)) + } + if timeout, ok := rawConvergeConfig["timeout"]; ok { + plainConvergeConfig.timeoutRaw, _ = timeout.(string) + plainConvergeConfig.timeout, _ = time.ParseDuration(timeout.(string)) + } + } + } + return plainConvergeConfig +} + +// authToServiceAuth maps the auth to AuthConfiguration +func authToServiceAuth(auth map[string]interface{}) dc.AuthConfiguration { + if auth["username"] != nil && len(auth["username"].(string)) > 0 && auth["password"] != nil && len(auth["password"].(string)) > 0 { + return dc.AuthConfiguration{ + Username: auth["username"].(string), + Password: auth["password"].(string), + ServerAddress: auth["server_address"].(string), + } + } + + return dc.AuthConfiguration{} +} + +// fromRegistryAuth extract the desired AuthConfiguration for the given image +func fromRegistryAuth(image string, configs map[string]dc.AuthConfiguration) dc.AuthConfiguration { + // Remove normalized prefixes to simlify substring + image = strings.Replace(strings.Replace(image, "http://", "", 1), "https://", "", 1) + // Get the registry with optional port + lastBin := strings.Index(image, "/") + // No auth given and image name has no slash like 'alpine:3.1' + if lastBin != -1 { + serverAddress := image[0:lastBin] + if fromRegistryAuth, ok := configs[normalizeRegistryAddress(serverAddress)]; ok { + return fromRegistryAuth + } + } + + return dc.AuthConfiguration{} +} + +// stringSetToPlacementPrefs maps a string set to PlacementPreference +func stringSetToPlacementPrefs(stringSet *schema.Set) []swarm.PlacementPreference { + ret := []swarm.PlacementPreference{} + if stringSet == nil { + return ret + } + for _, envVal := range stringSet.List() { + ret = append(ret, swarm.PlacementPreference{ + Spread: &swarm.SpreadOver{ + SpreadDescriptor: envVal.(string), + }, + }) + } + return ret +} + +// mapSetToPlacementPlatforms maps a string set to Platform +func mapSetToPlacementPlatforms(stringSet *schema.Set) []swarm.Platform { + ret := []swarm.Platform{} + if stringSet == nil { + return ret + } + + for _, rawPlatform := range stringSet.List() { + rawPlatform := rawPlatform.(map[string]interface{}) + ret = append(ret, swarm.Platform{ + Architecture: rawPlatform["architecture"].(string), + OS: rawPlatform["os"].(string), + }) + } + + return ret +} + +//////// States + +// numberedStates are ascending sorted states for docker tasks +// meaning they appear internally in this order in the statemachine +var ( + numberedStates = map[swarm.TaskState]int64{ + swarm.TaskStateNew: 1, + swarm.TaskStateAllocated: 2, + swarm.TaskStatePending: 3, + swarm.TaskStateAssigned: 4, + swarm.TaskStateAccepted: 5, + swarm.TaskStatePreparing: 6, + swarm.TaskStateReady: 7, + swarm.TaskStateStarting: 8, + swarm.TaskStateRunning: 9, + + // The following states are not actually shown in progress + // output, but are used internally for ordering. + swarm.TaskStateComplete: 10, + swarm.TaskStateShutdown: 11, + swarm.TaskStateFailed: 12, + swarm.TaskStateRejected: 13, + } + + longestState int +) + +// serviceCreatePendingStates are the pending states for the creation of a service +var serviceCreatePendingStates = []string{ + "new", + "allocated", + "pending", + "assigned", + "accepted", + "preparing", + "ready", + "starting", + "creating", + "paused", +} + +// serviceUpdatePendingStates are the pending states for the update of a service +var serviceUpdatePendingStates = []string{ + "creating", + "updating", +} diff --git a/docker/resource_docker_service_test.go b/docker/resource_docker_service_test.go new file mode 100644 index 00000000..8ba14c35 --- /dev/null +++ b/docker/resource_docker_service_test.go @@ -0,0 +1,3488 @@ +package docker + +import ( + "fmt" + "os" + "regexp" + "testing" + + dc "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +// ---------------------------------------- +// ----------- UNIT TESTS ----------- +// ---------------------------------------- + +func TestDockerSecretFromRegistryAuth_basic(t *testing.T) { + authConfigs := make(map[string]dc.AuthConfiguration) + authConfigs["https://repo.my-company.com:8787"] = dc.AuthConfiguration{ + Username: "myuser", + Password: "mypass", + Email: "", + ServerAddress: "repo.my-company.com:8787", + } + + foundAuthConfig := fromRegistryAuth("repo.my-company.com:8787/my_image", authConfigs) + checkAttribute(t, "Username", foundAuthConfig.Username, "myuser") + checkAttribute(t, "Password", foundAuthConfig.Password, "mypass") + checkAttribute(t, "Email", foundAuthConfig.Email, "") + checkAttribute(t, "ServerAddress", foundAuthConfig.ServerAddress, "repo.my-company.com:8787") +} + +func TestDockerSecretFromRegistryAuth_multiple(t *testing.T) { + authConfigs := make(map[string]dc.AuthConfiguration) + authConfigs["https://repo.my-company.com:8787"] = dc.AuthConfiguration{ + Username: "myuser", + Password: "mypass", + Email: "", + ServerAddress: "repo.my-company.com:8787", + } + authConfigs["https://nexus.my-fancy-company.com"] = dc.AuthConfiguration{ + Username: "myuser33", + Password: "mypass123", + Email: "test@example.com", + ServerAddress: "nexus.my-fancy-company.com", + } + + foundAuthConfig := fromRegistryAuth("nexus.my-fancy-company.com/the_image", authConfigs) + checkAttribute(t, "Username", foundAuthConfig.Username, "myuser33") + checkAttribute(t, "Password", foundAuthConfig.Password, "mypass123") + checkAttribute(t, "Email", foundAuthConfig.Email, "test@example.com") + checkAttribute(t, "ServerAddress", foundAuthConfig.ServerAddress, "nexus.my-fancy-company.com") + + foundAuthConfig = fromRegistryAuth("alpine:3.1", authConfigs) + checkAttribute(t, "Username", foundAuthConfig.Username, "") + checkAttribute(t, "Password", foundAuthConfig.Password, "") + checkAttribute(t, "Email", foundAuthConfig.Email, "") + checkAttribute(t, "ServerAddress", foundAuthConfig.ServerAddress, "") +} + +func checkAttribute(t *testing.T, name, actual, expected string) error { + if actual != expected { + t.Fatalf("bad authconfig attribute for '%q'\nExpected: %s\n Got: %s", name, expected, actual) + } + + return nil +} + +// ---------------------------------------- +// ----------- ACCEPTANCE TESTS ----------- +// ---------------------------------------- +// Fire and Forget +var serviceIDRegex = regexp.MustCompile(`[A-Za-z0-9_\+\.-]+`) + +func TestAccDockerService_minimal(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-basic" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + } + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-basic"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + ), + }, + }, + }) +} +func TestAccDockerService_full(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_volume" "test_volume" { + name = "tftest-volume" + } + + resource "docker_config" "service_config" { + name = "tftest-full-myconfig" + data = "ewogICJwcmVmaXgiOiAiMTIzIgp9" + } + + resource "docker_secret" "service_secret" { + name = "tftest-mysecret" + data = "ewogICJrZXkiOiAiUVdFUlRZIgp9" + } + + resource "docker_network" "test_network" { + name = "tftest-network" + driver = "overlay" + } + + resource "docker_service" "foo" { + name = "tftest-service-basic" + + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + + labels { + foo = "bar" + } + + command = ["ls"] + args = ["-las"] + hostname = "my-fancy-service" + + env { + MYFOO = "BAR" + } + + dir = "/root" + user = "root" + groups = ["docker", "foogroup"] + + privileges { + se_linux_context { + disable = true + user = "user-label" + role = "role-label" + type = "type-label" + level = "level-label" + } + } + + read_only = true + + mounts = [ + { + target = "/mount/test" + source = "${docker_volume.test_volume.name}" + type = "volume" + read_only = true + + volume_options { + no_copy = true + labels { + foo = "bar" + } + driver_name = "random-driver" + driver_options { + op1 = "val1" + } + } + }, + ] + + stop_signal = "SIGTERM" + stop_grace_period = "10s" + + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "5s" + timeout = "2s" + retries = 4 + } + + hosts { + host = "testhost" + ip = "10.0.1.0" + } + + dns_config { + nameservers = ["8.8.8.8"] + search = ["example.org"] + options = ["timeout:3"] + } + + secrets = [ + { + secret_id = "${docker_secret.service_secret.id}" + secret_name = "${docker_secret.service_secret.name}" + file_name = "/secrets.json" + }, + ] + + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/configs.json" + }, + ] + } + + resources { + limits { + nano_cpus = 1000000 + memory_bytes = 536870912 + } + } + + restart_policy { + condition = "on-failure" + delay = "3s" + max_attempts = 4 + window = "10s" + } + + placement { + constraints = [ + "node.role==manager", + ] + + prefs = [ + "spread=node.role.manager", + ] + } + + force_update = 0 + runtime = "container" + networks = ["${docker_network.test_network.id}"] + + log_driver { + name = "json-file" + + options { + max-size = "10m" + max-file = "3" + } + } + } + + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 2 + delay = "10s" + failure_action = "pause" + monitor = "5s" + max_failure_ratio = "0.1" + order = "start-first" + } + + rollback_config { + parallelism = 2 + delay = "5ms" + failure_action = "pause" + monitor = "10h" + max_failure_ratio = "0.9" + order = "stop-first" + } + + endpoint_spec { + mode = "vip" + + ports { + name = "random" + protocol = "tcp" + target_port = "8080" + published_port = "8080" + publish_mode = "ingress" + } + } + } + + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-basic"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.labels.foo", "bar"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.command.0", "ls"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.args.0", "-las"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.hostname", "my-fancy-service"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.env.MYFOO", "BAR"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.dir", "/root"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.user", "root"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.groups.0", "docker"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.groups.1", "foogroup"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.privileges.0.se_linux_context.0.disable", "true"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.privileges.0.se_linux_context.0.user", "user-label"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.privileges.0.se_linux_context.0.role", "role-label"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.privileges.0.se_linux_context.0.type", "type-label"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.privileges.0.se_linux_context.0.level", "level-label"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.read_only", "true"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.mounts.816078185.target", "/mount/test"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.mounts.816078185.source", "tftest-volume"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.mounts.816078185.type", "volume"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.mounts.816078185.read_only", "true"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.mounts.816078185.volume_options.0.no_copy", "true"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.mounts.816078185.volume_options.0.labels.foo", "bar"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.mounts.816078185.volume_options.0.driver_name", "random-driver"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.mounts.816078185.volume_options.0.driver_options.op1", "val1"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.stop_signal", "SIGTERM"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.stop_grace_period", "10s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "5s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "2s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.hosts.1878413705.host", "testhost"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.hosts.1878413705.ip", "10.0.1.0"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.dns_config.0.nameservers.0", "8.8.8.8"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.dns_config.0.search.0", "example.org"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.dns_config.0.options.0", "timeout:3"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.configs.#", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.secrets.#", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.resources.0.limits.0.nano_cpus", "1000000"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.resources.0.limits.0.memory_bytes", "536870912"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.restart_policy.condition", "on-failure"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.restart_policy.delay", "3s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.restart_policy.max_attempts", "4"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.restart_policy.window", "10s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.placement.0.constraints.4248571116", "node.role==manager"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.placement.0.prefs.1751004438", "spread=node.role.manager"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.force_update", "0"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.networks.#", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.log_driver.0.name", "json-file"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.log_driver.0.options.max-file", "3"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.log_driver.0.options.max-size", "10m"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "10s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "5s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "rollback_config.0.parallelism", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "rollback_config.0.delay", "5ms"), + resource.TestCheckResourceAttr("docker_service.foo", "rollback_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "rollback_config.0.monitor", "10h"), + resource.TestCheckResourceAttr("docker_service.foo", "rollback_config.0.max_failure_ratio", "0.9"), + resource.TestCheckResourceAttr("docker_service.foo", "rollback_config.0.order", "stop-first"), + ), + }, + }, + }) +} + +func TestAccDockerService_partialReplicated(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-basic" + task_spec { + container_spec = { + image = "127.0.0.1:15000/tftest-service:v1" + } + } + mode {} + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-basic"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "1"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-basic" + task_spec { + container_spec = { + image = "127.0.0.1:15000/tftest-service:v1" + } + } + mode { + replicated {} + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-basic"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "1"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-basic" + task_spec { + container_spec = { + image = "127.0.0.1:15000/tftest-service:v1" + } + } + mode { + replicated { + replicas = 2 + } + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-basic"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + ), + }, + }, + }) +} + +func TestAccDockerService_basicGlobal(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-basic" + task_spec { + container_spec = { + image = "127.0.0.1:15000/tftest-service:v1" + } + } + mode { + global = true + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-basic"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.global", "true"), + ), + }, + }, + }) +} + +func TestAccDockerService_GlobalAndReplicated(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-basic" + task_spec { + container_spec = { + image = "127.0.0.1:15000/tftest-service:v1" + } + } + mode { + replicated { + replicas = 2 + } + global = true + } + } + `, + ExpectError: regexp.MustCompile(`.*conflicts with.*`), + }, + }, + }) +} +func TestAccDockerService_GlobalWithConvergeConfig(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-basic" + task_spec { + container_spec = { + image = "127.0.0.1:15000/tftest-service:v1" + } + } + mode { + global = true + } + converge_config { + delay = "7s" + timeout = "10s" + } + } + `, + ExpectError: regexp.MustCompile(`.*conflicts with.*`), + }, + }, + }) +} + +func TestAccDockerService_updateImage(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-fnf-service-up-image" + task_spec { + container_spec = { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 2 + } + } + } + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8080" + } + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-fnf-service-up-image"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.stop_grace_period", "10s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "2"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-fnf-service-up-image" + task_spec { + container_spec = { + image = "127.0.0.1:15000/tftest-service:v2" + stop_grace_period = "10s" + + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 2 + } + } + } + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8080" + } + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-fnf-service-up-image"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v2"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.stop_grace_period", "10s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "2"), + ), + }, + }, + }) +} + +func TestAccDockerService_updateConfigReplicasImageAndHealthIncreaseAndDecreaseReplicas(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_config" "service_config" { + name = "tftest-myconfig-${uuid()}" + data = "ewogICJwcmVmaXgiOiAiMTIzIgp9" + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + + resource "docker_service" "foo" { + name = "tftest-fnf-service-up-crihiadr" + + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/configs.json" + }, + ] + + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + start_period = "0s" + retries = 2 + } + + stop_grace_period = "10s" + } + } + + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8081" + } + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-fnf-service-up-crihiadr"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "2"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_config" "service_config" { + name = "tftest-myconfig-${uuid()}" + data = "ewogICJwcmVmaXgiOiAiNTY3Igp9" # UPDATED to prefix: 567 + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + + resource "docker_service" "foo" { + name = "tftest-fnf-service-up-crihiadr" + + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v2" + + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/configs.json" + }, + ] + + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "2s" + timeout = "800ms" + retries = 4 + } + + stop_grace_period = "10s" + } + } + + mode { + replicated { + replicas = 6 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports = [ + { + target_port = "8080" + published_port = "8081" + }, + { + target_port = "8080" + published_port = "8082" + } + ] + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-fnf-service-up-crihiadr"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v2"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "6"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "2s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "800ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_config" "service_config" { + name = "tftest-myconfig-${uuid()}" + data = "ewogICJwcmVmaXgiOiAiNTY3Igp9" + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + + resource "docker_service" "foo" { + name = "tftest-fnf-service-up-crihiadr" + + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v2" + + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/configs.json" + }, + ] + + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "2s" + timeout = "800ms" + retries = 4 + } + + stop_grace_period = "10s" + } + } + + mode { + replicated { + replicas = 3 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports = [ + { + target_port = "8080" + published_port = "8081" + }, + { + target_port = "8080" + published_port = "8082" + } + ] + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-fnf-service-up-crihiadr"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v2"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "3"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "2s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "800ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + ), + }, + }, + }) +} + +// Converging tests +func TestAccDockerService_nonExistingPrivateImageConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-privateimagedoesnotexist" + task_spec { + container_spec = { + image = "127.0.0.1:15000/idonoexist:latest" + } + } + + mode { + replicated { + replicas = 2 + } + } + + converge_config { + delay = "7s" + timeout = "20s" + } + } + `, + ExpectError: regexp.MustCompile(`.*did not converge after.*`), + Check: resource.ComposeTestCheckFunc( + isServiceRemoved("tftest-service-privateimagedoesnotexist"), + ), + }, + }, + }) +} +func TestAccDockerService_nonExistingPublicImageConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-publicimagedoesnotexist" + task_spec { + container_spec = { + image = "stovogel/blablabla:part5" + } + } + + mode { + replicated { + replicas = 2 + } + } + + converge_config { + delay = "7s" + timeout = "10s" + } + } + `, + ExpectError: regexp.MustCompile(`.*did not converge after.*`), + Check: resource.ComposeTestCheckFunc( + isServiceRemoved("tftest-service-publicimagedoesnotexist"), + ), + }, + }, + }) +} + +func TestAccDockerService_basicConvergeAndStopGracefully(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-basic-converge" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "5s" + timeout = "2s" + start_period = "0s" + retries = 4 + } + } + } + + mode { + replicated { + replicas = 2 + } + } + + endpoint_spec { + ports { + target_port = "8080" + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-basic-converge"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + ), + }, + }, + }) +} +func TestAccDockerService_updateFailsAndRollbackConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-updateFailsAndRollbackConverge" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "5s" + timeout = "2s" + start_period = "0s" + retries = 4 + } + } + } + + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 1 + delay = "5s" + failure_action = "rollback" + monitor = "10s" + max_failure_ratio = "0.0" + order = "stop-first" + } + + rollback_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "4s" + max_failure_ratio = "0.0" + order = "stop-first" + } + + endpoint_spec { + mode = "vip" + ports { + name = "random" + protocol = "tcp" + target_port = "8080" + published_port = "8080" + publish_mode = "ingress" + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-updateFailsAndRollbackConverge"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-updateFailsAndRollbackConverge" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v3" + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "5s" + timeout = "2s" + start_period = "0s" + retries = 4 + } + } + } + + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 1 + delay = "5s" + failure_action = "rollback" + monitor = "10s" + max_failure_ratio = "0.0" + order = "stop-first" + } + + rollback_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "4s" + max_failure_ratio = "0.0" + order = "stop-first" + } + + endpoint_spec { + mode = "vip" + ports { + name = "random" + protocol = "tcp" + target_port = "8080" + published_port = "8080" + publish_mode = "ingress" + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + ExpectError: regexp.MustCompile(`.*rollback completed.*`), + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-updateFailsAndRollbackConverge"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + ), + }, + }, + }) +} + +func TestAccDockerService_updateNetworksConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_network" "test_network" { + name = "tftest-network" + driver = "overlay" + } + + resource "docker_network" "test_network2" { + name = "tftest-network2" + driver = "overlay" + } + + resource "docker_service" "foo" { + name = "tftest-service-up-network" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + } + networks = ["${docker_network.test_network.id}"] + } + mode { + replicated { + replicas = 2 + } + } + + + endpoint_spec { + mode = "vip" + } + converge_config { + delay = "7s" + timeout = "3m" + } + + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-network"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.networks.#", "1"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_network" "test_network" { + name = "tftest-network" + driver = "overlay" + } + + resource "docker_network" "test_network2" { + name = "tftest-network2" + driver = "overlay" + } + + resource "docker_service" "foo" { + name = "tftest-service-up-network" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + } + networks = ["${docker_network.test_network2.id}"] + } + mode { + replicated { + replicas = 2 + } + } + + endpoint_spec { + mode = "vip" + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-network"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.networks.#", "1"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_network" "test_network" { + name = "tftest-network" + driver = "overlay" + } + + resource "docker_network" "test_network2" { + name = "tftest-network2" + driver = "overlay" + } + + resource "docker_service" "foo" { + name = "tftest-service-up-network" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + } + networks = [ + "${docker_network.test_network.id}", + "${docker_network.test_network2.id}" + ] + } + + mode { + replicated { + replicas = 2 + } + } + + endpoint_spec { + mode = "vip" + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-network"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.networks.#", "2"), + ), + }, + }, + }) +} +func TestAccDockerService_updateMountsConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_volume" "foo" { + name = "tftest-volume" + } + + resource "docker_volume" "foo2" { + name = "tftest-volume2" + } + + resource "docker_service" "foo" { + name = "tftest-service-up-mounts" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + mounts = [ + { + source = "${docker_volume.foo.name}" + target = "/mount/test" + type = "volume" + read_only = true + volume_options { + labels { + env = "dev" + terraform = "true" + } + } + } + ] + stop_grace_period = "10s" + } + } + mode { + replicated { + replicas = 2 + } + } + + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-mounts"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.mounts.#", "1"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_volume" "foo" { + name = "tftest-volume" + } + + resource "docker_volume" "foo2" { + name = "tftest-volume2" + } + + resource "docker_service" "foo" { + name = "tftest-service-up-mounts" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + mounts = [ + { + source = "${docker_volume.foo.name}" + target = "/mount/test" + type = "volume" + read_only = true + volume_options { + labels { + env = "dev" + terraform = "true" + } + } + }, + { + source = "${docker_volume.foo2.name}" + target = "/mount/test2" + type = "volume" + read_only = true + volume_options { + labels { + env = "dev" + terraform = "true" + } + } + } + ] + stop_grace_period = "10s" + } + } + + mode { + replicated { + replicas = 2 + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-mounts"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.mounts.#", "2"), + ), + }, + }, + }) +} +func TestAccDockerService_updateHostsConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-up-hosts" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + hosts = [ + { + host = "testhost" + ip = "10.0.1.0" + } + ] + stop_grace_period = "10s" + } + } + + mode { + replicated { + replicas = 2 + } + } + + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-hosts"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.hosts.#", "1"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-up-hosts" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + hosts = [ + { + host = "testhost2" + ip = "10.0.2.2" + } + ] + stop_grace_period = "10s" + } + } + + mode { + replicated { + replicas = 2 + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-hosts"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.hosts.#", "1"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-up-hosts" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + hosts = [ + { + host = "testhost" + ip = "10.0.1.0" + }, + { + host = "testhost2" + ip = "10.0.2.2" + } + ] + stop_grace_period = "10s" + } + } + mode { + replicated { + replicas = 2 + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-hosts"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.hosts.#", "2"), + ), + }, + }, + }) +} +func TestAccDockerService_updateLoggingConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-up-logging" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + } + + log_driver { + name = "json-file" + + options { + max-size = "10m" + max-file = "3" + } + } + } + + mode { + replicated { + replicas = 2 + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-logging"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.log_driver.0.name", "json-file"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.log_driver.0.options.%", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.log_driver.0.options.max-size", "10m"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.log_driver.0.options.max-file", "3"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-up-logging" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + } + log_driver { + name = "json-file" + + options { + max-size = "15m" + max-file = "5" + } + } + } + + mode { + replicated { + replicas = 2 + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-logging"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.log_driver.0.name", "json-file"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.log_driver.0.options.%", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.log_driver.0.options.max-size", "15m"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.log_driver.0.options.max-file", "5"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-up-logging" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + } + } + + mode { + replicated { + replicas = 2 + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-logging"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + ), + }, + }, + }) +} + +func TestAccDockerService_updateHealthcheckConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-up-healthcheck" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 4 + } + } + } + + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8080" + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-healthcheck"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-up-healthcheck" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "2s" + timeout = "800ms" + retries = 2 + } + } + } + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8080" + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-healthcheck"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "2s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "800ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "2"), + ), + }, + }, + }) +} + +func TestAccDockerService_updateIncreaseReplicasConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-increase-replicas" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 4 + } + } + } + + mode { + replicated { + replicas = 1 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8080" + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-increase-replicas"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-increase-replicas" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 4 + } + } + } + + mode { + replicated { + replicas = 3 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8080" + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-increase-replicas"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "3"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + ), + }, + }, + }) +} +func TestAccDockerService_updateDecreaseReplicasConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-decrease-replicas" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 4 + } + } + } + + mode { + replicated { + replicas = 5 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8080" + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-decrease-replicas"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "5"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-decrease-replicas" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + + stop_grace_period = "10s" + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 4 + } + } + } + + mode { + replicated { + replicas = 1 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8080" + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-decrease-replicas"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + ), + }, + }, + }) +} + +func TestAccDockerService_updateImageConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-up-image" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 2 + } + } + } + + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8080" + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-image"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "2"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-up-image" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v2" + stop_grace_period = "10s" + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 2 + } + } + } + + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.5" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8080" + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-image"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v2"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.5"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "2"), + ), + }, + }, + }) +} + +func TestAccDockerService_updateConfigConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_config" "service_config" { + name = "tftest-myconfig-${uuid()}" + data = "ewogICJwcmVmaXgiOiAiMTIzIgp9" + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + + resource "docker_service" "foo" { + name = "tftest-service-up-config" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 4 + } + + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/configs.json" + }, + ] + } + } + + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.5" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8080" + } + } + + converge_config { + delay = "7s" + timeout = "30s" + } + + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-config"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.5"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_config" "service_config" { + name = "tftest-myconfig-${uuid()}" + data = "ewogICJwcmVmaXgiOiAiNTY3Igp9" # UPDATED to prefix: 567 + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + + resource "docker_service" "foo" { + name = "tftest-service-up-config" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 4 + } + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/configs.json" + }, + ] + } + } + + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8080" + } + } + + converge_config { + delay = "7s" + timeout = "30s" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-config"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + ), + }, + }, + }) +} +func TestAccDockerService_updateConfigAndSecretConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_config" "service_config" { + name = "tftest-myconfig-${uuid()}" + data = "ewogICJwcmVmaXgiOiAiMTIzIgp9" + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + + resource "docker_secret" "service_secret" { + name = "tftest-tftest-mysecret-${replace(timestamp(),":", ".")}" + data = "ewogICJrZXkiOiAiUVdFUlRZIgp9" + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + + resource "docker_service" "foo" { + name = "tftest-service-up-config-secret" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/configs.json" + }, + ] + + secrets = [ + { + secret_id = "${docker_secret.service_secret.id}" + secret_name = "${docker_secret.service_secret.name}" + file_name = "/secrets.json" + }, + ] + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 4 + } + stop_grace_period = "10s" + } + } + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8080" + } + } + + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-config-secret"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.configs.#", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.secrets.#", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_config" "service_config" { + name = "tftest-myconfig-${uuid()}" + data = "ewogICJwcmVmaXgiOiAiNTY3Igp9" # UPDATED to prefix: 567 + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + + resource "docker_secret" "service_secret" { + name = "tftest-tftest-mysecret-${replace(timestamp(),":", ".")}" + data = "ewogICJrZXkiOiAiUVdFUlRZIgp9" # UPDATED to YXCVB + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + + resource "docker_service" "foo" { + name = "tftest-service-up-config-secret" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/configs.json" + }, + ] + + secrets = [ + { + secret_id = "${docker_secret.service_secret.id}" + secret_name = "${docker_secret.service_secret.name}" + file_name = "/secrets.json" + }, + ] + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 4 + } + stop_grace_period = "10s" + } + } + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8080" + } + } + + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-config-secret"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.configs.#", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.secrets.#", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + ), + }, + }, + }) +} +func TestAccDockerService_updatePortConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-up-port" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 2 + } + } + } + + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8081" + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-port"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "2"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_service" "foo" { + name = "tftest-service-up-port" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + stop_grace_period = "10s" + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 2 + } + } + } + + mode { + replicated { + replicas = 4 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports = [ + { + target_port = "8080" + published_port = "8081" + }, + { + target_port = "8080" + published_port = "8082" + } + ] + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-port"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "4"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "2"), + ), + }, + }, + }) +} +func TestAccDockerService_updateConfigReplicasImageAndHealthConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_config" "service_config" { + name = "tftest-myconfig-${uuid()}" + data = "ewogICJwcmVmaXgiOiAiMTIzIgp9" + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + + resource "docker_service" "foo" { + name = "tftest-service-up-crihc" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/configs.json" + }, + ] + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 2 + } + stop_grace_period = "10s" + } + } + + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.5" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8081" + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-crihc"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.5"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "2"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_config" "service_config" { + name = "tftest-myconfig-${uuid()}" + data = "ewogICJwcmVmaXgiOiAiNTY3Igp9" # UPDATED to prefix: 567 + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + + resource "docker_service" "foo" { + name = "tftest-service-up-crihc" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v2" + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/configs.json" + }, + ] + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "2s" + timeout = "800ms" + retries = 4 + } + stop_grace_period = "10s" + } + } + + mode { + replicated { + replicas = 4 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.5" + order = "start-first" + } + + endpoint_spec { + ports = [ + { + target_port = "8080" + published_port = "8081" + }, + { + target_port = "8080" + published_port = "8082" + } + ] + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-crihc"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v2"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "4"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.5"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "2s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "800ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + ), + }, + }, + }) +} +func TestAccDockerService_updateConfigAndDecreaseReplicasConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_config" "service_config" { + name = "tftest-myconfig-${uuid()}" + data = "ewogICJwcmVmaXgiOiAiMTIzIgp9" + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + + resource "docker_service" "foo" { + name = "tftest-service-up-config-dec-repl" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/configs.json" + }, + ] + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 4 + } + stop_grace_period = "10s" + } + } + + mode { + replicated { + replicas = 5 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8080" + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-config-dec-repl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "5"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_config" "service_config" { + name = "tftest-myconfig-${uuid()}" + data = "ewogICJwcmVmaXgiOiAiNTY3Igp9" # UPDATED to prefix: 567 + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + + resource "docker_service" "foo" { + name = "tftest-service-up-config-dec-repl" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/configs.json" + }, + ] + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 4 + } + stop_grace_period = "10s" + } + } + + mode { + replicated { + replicas = 1 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.1" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8080" + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-config-dec-repl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + ), + }, + }, + }) +} +func TestAccDockerService_updateConfigReplicasImageAndHealthIncreaseAndDecreaseReplicasConverge(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: ` + resource "docker_config" "service_config" { + name = "tftest-myconfig-${uuid()}" + data = "ewogICJwcmVmaXgiOiAiMTIzIgp9" + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + + resource "docker_service" "foo" { + name = "tftest-service-up-crihiadr" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v1" + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/configs.json" + }, + ] + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "1s" + timeout = "500ms" + retries = 2 + } + stop_grace_period = "10s" + } + } + + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.5" + order = "start-first" + } + + endpoint_spec { + ports { + target_port = "8080" + published_port = "8081" + } + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-crihiadr"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v1"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "2"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.5"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "500ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "2"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_config" "service_config" { + name = "tftest-myconfig-${uuid()}" + data = "ewogICJwcmVmaXgiOiAiNTY3Igp9" # UPDATED to prefix: 567 + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + + resource "docker_service" "foo" { + name = "tftest-service-up-crihiadr" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v2" + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/configs.json" + }, + ] + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "2s" + timeout = "800ms" + retries = 4 + } + stop_grace_period = "10s" + } + } + + mode { + replicated { + replicas = 6 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.5" + order = "start-first" + } + + endpoint_spec { + ports = [ + { + target_port = "8080" + published_port = "8081" + }, + { + target_port = "8080" + published_port = "8082" + } + ] + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-crihiadr"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v2"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "6"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.5"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "2s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "800ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + ), + }, + resource.TestStep{ + Config: ` + resource "docker_config" "service_config" { + name = "tftest-myconfig-${uuid()}" + data = "ewogICJwcmVmaXgiOiAiNTY3Igp9" + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } + } + + resource "docker_service" "foo" { + name = "tftest-service-up-crihiadr" + task_spec { + container_spec { + image = "127.0.0.1:15000/tftest-service:v2" + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/configs.json" + }, + ] + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "2s" + timeout = "800ms" + retries = 4 + } + stop_grace_period = "10s" + } + } + + mode { + replicated { + replicas = 3 + } + } + + update_config { + parallelism = 1 + delay = "1s" + failure_action = "pause" + monitor = "1s" + max_failure_ratio = "0.5" + order = "start-first" + } + + endpoint_spec { + ports = [ + { + target_port = "8080" + published_port = "8081" + }, + { + target_port = "8080" + published_port = "8082" + } + ] + } + + converge_config { + delay = "7s" + timeout = "3m" + } + } + `, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.foo", "name", "tftest-service-up-crihiadr"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.image", "127.0.0.1:15000/tftest-service:v2"), + resource.TestCheckResourceAttr("docker_service.foo", "mode.0.replicated.0.replicas", "3"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.parallelism", "1"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.delay", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.failure_action", "pause"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.monitor", "1s"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.max_failure_ratio", "0.5"), + resource.TestCheckResourceAttr("docker_service.foo", "update_config.0.order", "start-first"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.0", "CMD"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.1", "curl"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.2", "-f"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.test.3", "http://localhost:8080/health"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.interval", "2s"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.timeout", "800ms"), + resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.healthcheck.0.retries", "4"), + ), + }, + }, + }) +} + +func TestAccDockerService_privateConverge(t *testing.T) { + registry := os.Getenv("DOCKER_REGISTRY_ADDRESS") + image := os.Getenv("DOCKER_PRIVATE_IMAGE") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf(` + provider "docker" { + alias = "private" + registry_auth { + address = "%s" + } + } + + resource "docker_service" "bar" { + provider = "docker.private" + name = "tftest-service-bar" + task_spec { + container_spec { + image = "%s" + } + } + mode { + replicated { + replicas = 2 + } + } + } + `, registry, image), + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("docker_service.bar", "id", serviceIDRegex), + resource.TestCheckResourceAttr("docker_service.bar", "name", "tftest-service-bar"), + resource.TestCheckResourceAttr("docker_service.bar", "task_spec.0.container_spec.0.image", image), + ), + }, + }, + }) +} + +// Helpers +func isServiceRemoved(serviceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + client := testAccProvider.Meta().(*ProviderConfig).DockerClient + filter := make(map[string][]string) + filter["name"] = []string{serviceName} + services, err := client.ListServices(dc.ListServicesOptions{ + Filters: filter, + }) + if err != nil { + return fmt.Errorf("Error listing service for name %s: %v", serviceName, err) + } + length := len(services) + if length != 0 { + return fmt.Errorf("Service should be removed but is running: %s", serviceName) + } + + return nil + } +} diff --git a/docker/resource_docker_volume.go b/docker/resource_docker_volume.go index 2b88204d..b9db721b 100644 --- a/docker/resource_docker_volume.go +++ b/docker/resource_docker_volume.go @@ -3,6 +3,7 @@ package docker import ( "fmt" "log" + "strings" "time" dc "github.com/fsouza/go-dockerclient" @@ -95,24 +96,30 @@ func resourceDockerVolumeRead(d *schema.ResourceData, meta interface{}) error { func resourceDockerVolumeDelete(d *schema.ResourceData, meta interface{}) error { client := meta.(*ProviderConfig).DockerClient + // TODO catch error if removal is already in progress + fix with statefunc if err := client.RemoveVolume(d.Id()); err != nil && err != dc.ErrNoSuchVolume { if err == dc.ErrVolumeInUse { - loops := 50 + loops := 20 sleepTime := 1000 * time.Millisecond for i := loops; i > 0; i-- { if err = client.RemoveVolume(d.Id()); err != nil { - log.Printf("[INFO] Volume remove loop: %d of %d due to error: %s", loops-i+1, loops, err) if err == dc.ErrVolumeInUse { + log.Printf("[INFO] Volume remove loop: %d of %d due to error: %s", loops-i+1, loops, err) time.Sleep(sleepTime) continue } if err == dc.ErrNoSuchVolume { - break // it's removed + log.Printf("[INFO] Volume successfully removed") + d.SetId("") + return nil + } + if !strings.Contains(err.Error(), "is already in progress") { + // if it's not in use any more (so it's deleted successfully) and another error occurred + return fmt.Errorf("Error deleting volume %s: %s", d.Id(), err) } - // if it's not in use any more (so it's deleted successfully) and another error occurred - return fmt.Errorf("Error deleting volume %s: %s", d.Id(), err) } } + return fmt.Errorf("Error deleting volume %s: %s after %d tries", d.Id(), err, loops) } } diff --git a/docker/structures_service.go b/docker/structures_service.go new file mode 100644 index 00000000..c4d2056e --- /dev/null +++ b/docker/structures_service.go @@ -0,0 +1,549 @@ +package docker + +import ( + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/hashicorp/terraform/helper/schema" +) + +func flattenTaskSpec(in swarm.TaskSpec) []interface{} { + m := make(map[string]interface{}) + if in.ContainerSpec != nil { + m["container_spec"] = flattenContainerSpec(in.ContainerSpec) + } + if in.Resources != nil { + m["resources"] = flattenTaskResources(in.Resources) + } + if in.RestartPolicy != nil { + m["restart_policy"] = flattenTaskRestartPolicy(in.RestartPolicy) + } + if in.Placement != nil { + m["placement"] = flattenTaskPlacement(in.Placement) + } + if in.ForceUpdate >= 0 { + m["force_update"] = in.ForceUpdate + } + if len(in.Runtime) > 0 { + m["runtime"] = in.Runtime + } + if len(in.Networks) > 0 { + m["networks"] = flattenTaskNetworks(in.Networks) + } + if in.LogDriver != nil { + m["log_driver"] = flattenTaskLogDriver(in.LogDriver) + } + + return []interface{}{m} +} + +func flattenServiceMode(in swarm.ServiceMode) []interface{} { + m := make(map[string]interface{}) + if in.Replicated != nil { + m["replicated"] = flattenReplicated(in.Replicated) + } + if in.Global != nil { + m["global"] = true + } else { + m["global"] = false + } + + return []interface{}{m} +} + +func flattenReplicated(in *swarm.ReplicatedService) []interface{} { + var out = make([]interface{}, 0, 0) + m := make(map[string]interface{}) + if in != nil { + if in.Replicas != nil { + replicas := int(*in.Replicas) + m["replicas"] = replicas + } + } + out = append(out, m) + return out +} + +func flattenServiceUpdateOrRollbackConfig(in *swarm.UpdateConfig) []interface{} { + var out = make([]interface{}, 0, 0) + if in == nil { + return out + } + + m := make(map[string]interface{}) + m["parallelism"] = in.Parallelism + m["delay"] = shortDur(in.Delay) + m["failure_action"] = in.FailureAction + m["monitor"] = shortDur(in.Monitor) + m["max_failure_ratio"] = strconv.FormatFloat(float64(in.MaxFailureRatio), 'f', 1, 64) + m["order"] = in.Order + out = append(out, m) + return out +} + +func flattenServiceEndpointSpec(in swarm.EndpointSpec) []interface{} { + var out = make([]interface{}, 0, 0) + m := make(map[string]interface{}) + if len(in.Mode) > 0 { + m["mode"] = in.Mode + } + if len(in.Ports) > 0 { + m["ports"] = flattenServicePorts(in.Ports) + } + + out = append(out, m) + return out +} + +///// start TaskSpec +func flattenContainerSpec(in *swarm.ContainerSpec) []interface{} { + var out = make([]interface{}, 0, 0) + m := make(map[string]interface{}) + if len(in.Image) > 0 { + m["image"] = in.Image + } + if len(in.Labels) > 0 { + m["labels"] = in.Labels + } + if len(in.Command) > 0 { + m["command"] = in.Command + } + if len(in.Args) > 0 { + m["args"] = in.Args + } + if len(in.Hostname) > 0 { + m["hostname"] = in.Hostname + } + if len(in.Env) > 0 { + m["env"] = mapStringSliceToMap(in.Env) + } + if len(in.User) > 0 { + m["user"] = in.User + } + if len(in.Dir) > 0 { + m["dir"] = in.Dir + } + if len(in.Groups) > 0 { + m["groups"] = in.Groups + } + if in.Privileges != nil { + m["privileges"] = flattenPrivileges(in.Privileges) + } + if in.ReadOnly { + m["read_only"] = in.ReadOnly + } + if len(in.Mounts) > 0 { + m["mounts"] = flattenServiceMounts(in.Mounts) + } + if len(in.StopSignal) > 0 { + m["stop_signal"] = in.StopSignal + } + if in.StopGracePeriod != nil { + m["stop_grace_period"] = shortDur(*in.StopGracePeriod) + } + if in.Healthcheck != nil { + m["healthcheck"] = flattenServiceHealthcheck(in.Healthcheck) + } + if len(in.Hosts) > 0 { + m["hosts"] = flattenServiceHosts(in.Hosts) + } + if in.DNSConfig != nil { + m["dns_config"] = flattenServiceDNSConfig(in.DNSConfig) + } + if len(in.Secrets) > 0 { + m["secrets"] = flattenServiceSecrets(in.Secrets) + } + if len(in.Configs) > 0 { + m["configs"] = flattenServiceConfigs(in.Configs) + } + out = append(out, m) + return out +} + +func flattenPrivileges(in *swarm.Privileges) []interface{} { + if in == nil { + return make([]interface{}, 0, 0) + } + + var out = make([]interface{}, 1, 1) + m := make(map[string]interface{}) + + if in.CredentialSpec != nil { + credSpec := make([]interface{}, 1, 1) + internal := make(map[string]interface{}) + internal["file"] = in.CredentialSpec.File + internal["registry"] = in.CredentialSpec.Registry + credSpec[0] = internal + m["credential_spec"] = credSpec + } + if in.SELinuxContext != nil { + seLinuxContext := make([]interface{}, 1, 1) + internal := make(map[string]interface{}) + internal["disable"] = in.SELinuxContext.Disable + internal["user"] = in.SELinuxContext.User + internal["role"] = in.SELinuxContext.Role + internal["type"] = in.SELinuxContext.Type + internal["level"] = in.SELinuxContext.Level + seLinuxContext[0] = internal + m["se_linux_context"] = seLinuxContext + } + out[0] = m + return out +} + +func flattenServiceMounts(in []mount.Mount) *schema.Set { + var out = make([]interface{}, len(in), len(in)) + for i, v := range in { + m := make(map[string]interface{}) + m["target"] = v.Target + m["source"] = v.Source + m["type"] = string(v.Type) + m["read_only"] = v.ReadOnly + if v.BindOptions != nil { + bindOptions := make([]interface{}, 0, 0) + bindOptionsItem := make(map[string]interface{}, 0) + + if len(v.BindOptions.Propagation) > 0 { + bindOptionsItem["propagation"] = string(v.BindOptions.Propagation) + } + + bindOptions = append(bindOptions, bindOptionsItem) + m["bind_options"] = bindOptions + } + + if v.VolumeOptions != nil { + volumeOptions := make([]interface{}, 0, 0) + volumeOptionsItem := make(map[string]interface{}, 0) + + volumeOptionsItem["no_copy"] = v.VolumeOptions.NoCopy + volumeOptionsItem["labels"] = mapStringStringToMapStringInterface(v.VolumeOptions.Labels) + if v.VolumeOptions.DriverConfig != nil { + if len(v.VolumeOptions.DriverConfig.Name) > 0 { + volumeOptionsItem["driver_name"] = v.VolumeOptions.DriverConfig.Name + } + volumeOptionsItem["driver_options"] = mapStringStringToMapStringInterface(v.VolumeOptions.DriverConfig.Options) + } + + volumeOptions = append(volumeOptions, volumeOptionsItem) + m["volume_options"] = volumeOptions + } + + if v.TmpfsOptions != nil { + tmpfsOptions := make([]interface{}, 0, 0) + tmpfsOptionsItem := make(map[string]interface{}, 0) + + tmpfsOptionsItem["size_bytes"] = int(v.TmpfsOptions.SizeBytes) + tmpfsOptionsItem["mode"] = v.TmpfsOptions.Mode.Perm + + tmpfsOptions = append(tmpfsOptions, tmpfsOptionsItem) + m["tmpfs_options"] = tmpfsOptions + } + + out[i] = m + } + taskSpecResource := resourceDockerService().Schema["task_spec"].Elem.(*schema.Resource) + containerSpecResource := taskSpecResource.Schema["container_spec"].Elem.(*schema.Resource) + mountsResource := containerSpecResource.Schema["mounts"].Elem.(*schema.Resource) + f := schema.HashResource(mountsResource) + return schema.NewSet(f, out) +} + +func flattenServiceHealthcheck(in *container.HealthConfig) []interface{} { + if in == nil { + return make([]interface{}, 0, 0) + } + + var out = make([]interface{}, 1, 1) + m := make(map[string]interface{}) + if len(in.Test) > 0 { + m["test"] = in.Test + } + m["interval"] = shortDur(in.Interval) + m["timeout"] = shortDur(in.Timeout) + m["start_period"] = shortDur(in.StartPeriod) + m["retries"] = in.Retries + out[0] = m + return out +} + +func flattenServiceHosts(in []string) *schema.Set { + var out = make([]interface{}, len(in), len(in)) + for i, v := range in { + m := make(map[string]interface{}) + split := strings.Split(v, ":") + m["host"] = split[0] + m["ip"] = split[1] + out[i] = m + } + taskSpecResource := resourceDockerService().Schema["task_spec"].Elem.(*schema.Resource) + containerSpecResource := taskSpecResource.Schema["container_spec"].Elem.(*schema.Resource) + hostsResource := containerSpecResource.Schema["hosts"].Elem.(*schema.Resource) + f := schema.HashResource(hostsResource) + return schema.NewSet(f, out) +} + +func flattenServiceDNSConfig(in *swarm.DNSConfig) []interface{} { + if in == nil { + return make([]interface{}, 0, 0) + } + + var out = make([]interface{}, 1, 1) + m := make(map[string]interface{}) + if len(in.Nameservers) > 0 { + m["nameservers"] = in.Nameservers + } + if len(in.Search) > 0 { + m["search"] = in.Search + } + if len(in.Options) > 0 { + m["options"] = in.Options + } + out[0] = m + return out +} + +func flattenServiceSecrets(in []*swarm.SecretReference) *schema.Set { + var out = make([]interface{}, len(in), len(in)) + for i, v := range in { + m := make(map[string]interface{}) + m["secret_id"] = v.SecretID + if len(v.SecretName) > 0 { + m["secret_name"] = v.SecretName + } + if v.File != nil { + m["file_name"] = v.File.Name + } + out[i] = m + } + taskSpecResource := resourceDockerService().Schema["task_spec"].Elem.(*schema.Resource) + containerSpecResource := taskSpecResource.Schema["container_spec"].Elem.(*schema.Resource) + secretsResource := containerSpecResource.Schema["secrets"].Elem.(*schema.Resource) + f := schema.HashResource(secretsResource) + return schema.NewSet(f, out) +} + +func flattenServiceConfigs(in []*swarm.ConfigReference) *schema.Set { + var out = make([]interface{}, len(in), len(in)) + for i, v := range in { + m := make(map[string]interface{}) + m["config_id"] = v.ConfigID + if len(v.ConfigName) > 0 { + m["config_name"] = v.ConfigName + } + if v.File != nil { + m["file_name"] = v.File.Name + } + out[i] = m + } + taskSpecResource := resourceDockerService().Schema["task_spec"].Elem.(*schema.Resource) + containerSpecResource := taskSpecResource.Schema["container_spec"].Elem.(*schema.Resource) + configsResource := containerSpecResource.Schema["configs"].Elem.(*schema.Resource) + f := schema.HashResource(configsResource) + return schema.NewSet(f, out) +} + +func flattenTaskResources(in *swarm.ResourceRequirements) []interface{} { + var out = make([]interface{}, 0, 0) + if in != nil { + m := make(map[string]interface{}) + m["limits"] = flattenResourceLimitsOrReservations(in.Limits) + m["reservation"] = flattenResourceLimitsOrReservations(in.Reservations) + out = append(out, m) + } + return out +} + +func flattenResourceLimitsOrReservations(in *swarm.Resources) []interface{} { + var out = make([]interface{}, 0, 0) + if in != nil { + m := make(map[string]interface{}) + m["nano_cpus"] = in.NanoCPUs + m["memory_bytes"] = in.MemoryBytes + m["generic_resources"] = flattenResourceGenericResource(in.GenericResources) + out = append(out, m) + } + return out +} + +func flattenResourceGenericResource(in []swarm.GenericResource) []interface{} { + var out = make([]interface{}, 0, 0) + if in != nil && len(in) > 0 { + m := make(map[string]interface{}) + named := make([]string, 0) + discrete := make([]string, 0) + for _, genericResource := range in { + if genericResource.NamedResourceSpec != nil { + named = append(named, genericResource.NamedResourceSpec.Kind+"="+genericResource.NamedResourceSpec.Value) + } + if genericResource.DiscreteResourceSpec != nil { + discrete = append(discrete, genericResource.DiscreteResourceSpec.Kind+"="+strconv.Itoa(int(genericResource.DiscreteResourceSpec.Value))) + } + } + m["named_resources_spec"] = newStringSet(schema.HashString, named) + m["discrete_resources_spec"] = newStringSet(schema.HashString, discrete) + out = append(out, m) + } + return out +} + +func flattenTaskRestartPolicy(in *swarm.RestartPolicy) map[string]interface{} { + m := make(map[string]interface{}) + if len(in.Condition) > 0 { + m["condition"] = string(in.Condition) + } + if in.Delay != nil { + m["delay"] = shortDur(*in.Delay) + } + if in.MaxAttempts != nil { + mapped := *in.MaxAttempts + m["max_attempts"] = strconv.Itoa(int(mapped)) + } + if in.Window != nil { + m["window"] = shortDur(*in.Window) + } + return m +} + +func flattenTaskPlacement(in *swarm.Placement) []interface{} { + if in == nil { + return make([]interface{}, 0, 0) + } + var out = make([]interface{}, 1, 1) + m := make(map[string]interface{}) + if len(in.Constraints) > 0 { + m["constraints"] = newStringSet(schema.HashString, in.Constraints) + } + if len(in.Preferences) > 0 { + m["prefs"] = flattenPlacementPrefs(in.Preferences) + } + if len(in.Platforms) > 0 { + m["platforms"] = flattenPlacementPlatforms(in.Platforms) + } + out[0] = m + return out +} + +func flattenPlacementPrefs(in []swarm.PlacementPreference) *schema.Set { + if in == nil || len(in) == 0 { + return schema.NewSet(schema.HashString, make([]interface{}, 0, 0)) + } + + var out = make([]interface{}, len(in), len(in)) + for i, v := range in { + out[i] = v.Spread.SpreadDescriptor + } + return schema.NewSet(schema.HashString, out) +} + +func flattenPlacementPlatforms(in []swarm.Platform) *schema.Set { + var out = make([]interface{}, len(in), len(in)) + for i, v := range in { + m := make(map[string]interface{}) + m["architecture"] = v.Architecture + m["os"] = v.OS + out[i] = m + } + taskSpecResource := resourceDockerService().Schema["task_spec"].Elem.(*schema.Resource) + placementResource := taskSpecResource.Schema["placement"].Elem.(*schema.Resource) + f := schema.HashResource(placementResource) + return schema.NewSet(f, out) +} + +func flattenTaskNetworks(in []swarm.NetworkAttachmentConfig) *schema.Set { + var out = make([]interface{}, len(in), len(in)) + for i, v := range in { + out[i] = v.Target + } + return schema.NewSet(schema.HashString, out) +} + +func flattenTaskLogDriver(in *swarm.Driver) []interface{} { + if in == nil { + return make([]interface{}, 0, 0) + } + + var out = make([]interface{}, 1, 1) + m := make(map[string]interface{}) + m["name"] = in.Name + if len(in.Options) > 0 { + m["options"] = in.Options + } + out[0] = m + return out +} + +///// end TaskSpec +///// start EndpointSpec +func flattenServicePorts(in []swarm.PortConfig) *schema.Set { + var out = make([]interface{}, len(in), len(in)) + for i, v := range in { + m := make(map[string]interface{}) + if len(v.Name) > 0 { + m["name"] = v.Name + } + m["protocol"] = string(v.Protocol) + m["target_port"] = int(v.TargetPort) + if v.PublishedPort > 0 { + m["published_port"] = int(v.PublishedPort) + } + m["publish_mode"] = string(v.PublishMode) + out[i] = m + } + endpointSpecResource := resourceDockerService().Schema["endpoint_spec"].Elem.(*schema.Resource) + portsResource := endpointSpecResource.Schema["ports"].Elem.(*schema.Resource) + f := schema.HashResource(portsResource) + return schema.NewSet(f, out) +} + +///// end EndpointSpec + +// HELPERS +func shortDur(d time.Duration) string { + s := d.String() + if strings.HasSuffix(s, "m0s") { + s = s[:len(s)-2] + } + if strings.HasSuffix(s, "h0m") { + s = s[:len(s)-2] + } + return s +} + +func newStringSet(f schema.SchemaSetFunc, in []string) *schema.Set { + var out = make([]interface{}, len(in), len(in)) + for i, v := range in { + out[i] = v + } + return schema.NewSet(f, out) +} + +// mapStringSliceToMap maps a slice with '=' delimiter to as map: e.g. 'foo=bar' -> foo = "bar" +func mapStringSliceToMap(in []string) map[string]string { + mapped := make(map[string]string, len(in)) + for _, v := range in { + if len(v) > 0 { + splitted := strings.Split(v, "=") + key := splitted[0] + value := splitted[1] + mapped[key] = value + } + } + return mapped +} + +// mapStringStringToMapStringInterface maps a string string map to a string interface map +func mapStringStringToMapStringInterface(in map[string]string) map[string]interface{} { + if in == nil || len(in) == 0 { + return make(map[string]interface{}, 0) + } + + mapped := make(map[string]interface{}, len(in)) + for k, v := range in { + mapped[k] = v + } + return mapped +} diff --git a/docker/validators.go b/docker/validators.go index 447f5bb4..7398f2d0 100644 --- a/docker/validators.go +++ b/docker/validators.go @@ -4,6 +4,7 @@ import ( "encoding/base64" "fmt" "regexp" + "strconv" "time" "github.com/hashicorp/terraform/helper/schema" @@ -46,6 +47,34 @@ func validateFloatRatio() schema.SchemaValidateFunc { } } +func validateStringIsFloatRatio() schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + switch v.(type) { + case string: + stringValue := v.(string) + value, err := strconv.ParseFloat(stringValue, 64) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q is not a float", k)) + } + if value < 0.0 || value > 1.0 { + errors = append(errors, fmt.Errorf( + "%q has to be between 0.0 and 1.0", k)) + } + case int: + value := float64(v.(int)) + if value < 0.0 || value > 1.0 { + errors = append(errors, fmt.Errorf( + "%q has to be between 0.0 and 1.0", k)) + } + default: + errors = append(errors, fmt.Errorf( + "%q is not a string", k)) + } + return + } +} + func validateDurationGeq0() schema.SchemaValidateFunc { return func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) diff --git a/docker/validators_test.go b/docker/validators_test.go index 75725847..9809b844 100644 --- a/docker/validators_test.go +++ b/docker/validators_test.go @@ -50,6 +50,42 @@ func TestValidateFloatRatio(t *testing.T) { t.Fatalf("%v should be an invalid float greater than 1.0", v) } } +func TestValidateStringIsFloatRatio(t *testing.T) { + v := "0.9" + if _, error := validateStringIsFloatRatio()(v, "name"); error != nil { + t.Fatalf("%v should be a float between 0.0 and 1.0", v) + } + + v = "-4.5" + if _, error := validateStringIsFloatRatio()(v, "name"); error == nil { + t.Fatalf("%v should be an invalid float smaller than 0.0", v) + } + + v = "1.1" + if _, error := validateStringIsFloatRatio()(v, "name"); error == nil { + t.Fatalf("%v should be an invalid float greater than 1.0", v) + } + v = "false" + if _, error := validateStringIsFloatRatio()(v, "name"); error == nil { + t.Fatalf("%v should be an invalid float because it is a bool in a string", v) + } + w := false + if _, error := validateStringIsFloatRatio()(w, "name"); error == nil { + t.Fatalf("%v should be an invalid float because it is a bool", v) + } + i := 0 + if _, error := validateStringIsFloatRatio()(i, "name"); error != nil { + t.Fatalf("%v should be a valid float because int can be casted", v) + } + i = 1 + if _, error := validateStringIsFloatRatio()(i, "name"); error != nil { + t.Fatalf("%v should be a valid float because int can be casted", v) + } + i = 4 + if _, error := validateStringIsFloatRatio()(i, "name"); error == nil { + t.Fatalf("%v should be an invalid float because it is an int out of range", v) + } +} func TestValidateDurationGeq0(t *testing.T) { v := "1ms" if _, error := validateDurationGeq0()(v, "name"); error != nil { diff --git a/scripts/runAccTests.sh b/scripts/runAccTests.sh index a146f369..486bf893 100755 --- a/scripts/runAccTests.sh +++ b/scripts/runAccTests.sh @@ -17,13 +17,12 @@ setup() { } run() { - # Run the acc test suite TF_ACC=1 go test ./docker -v -timeout 120m - # for a single test - # TF_LOG=INFO TF_ACC=1 go test -v github.com/terraform-providers/terraform-provider-docker/docker -run ^TestAccDockerContainer_basic$ -timeout 360s + # for a single test comment the previous line and uncomment the next line + #TF_LOG=INFO TF_ACC=1 go test -v github.com/terraform-providers/terraform-provider-docker/docker -run ^TestAccDockerService_full$ -timeout 360s - # keep the return for the scripts to fail and clean properly + # keep the return value for the scripts to fail and clean properly return $? } @@ -43,7 +42,7 @@ cleanup() { for r in $(docker $resource ls -f 'name=tftest-' -q); do docker $resource rm "$r"; done echo "### removed $resource ###" done - for i in $(docker images -aq 127.0.0.1:5000/tftest-service); do docker rmi -f "$i"; done + for i in $(docker images -aq 127.0.0.1:15000/tftest-service); do docker rmi -f "$i"; done echo "### removed service images ###" } diff --git a/scripts/testing/server_v3.js b/scripts/testing/server_v3.js index 71fb5dca..3467c972 100644 --- a/scripts/testing/server_v3.js +++ b/scripts/testing/server_v3.js @@ -8,4 +8,4 @@ var handleRequest = function (request, response) { response.end(configs.prefix + ' - Hello World!'); }; var www = http.createServer(handleRequest); -www.listen(8085); // changed here on purpose \ No newline at end of file +www.listen(8085); // changed here on purpose diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE~HEAD similarity index 100% rename from vendor/github.com/gogo/protobuf/LICENSE rename to vendor/github.com/gogo/protobuf/LICENSE~HEAD diff --git a/vendor/golang.org/x/sys/LICENSE~HEAD b/vendor/golang.org/x/sys/LICENSE~HEAD new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/sys/LICENSE~HEAD @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/website/docker.erb b/website/docker.erb index 13da81be..6739ac94 100644 --- a/website/docker.erb +++ b/website/docker.erb @@ -39,6 +39,22 @@ + > + Swarm Resources + + <% end %> diff --git a/website/docs/r/config.html.markdown b/website/docs/r/config.html.markdown new file mode 100644 index 00000000..a8cacb76 --- /dev/null +++ b/website/docs/r/config.html.markdown @@ -0,0 +1,99 @@ +--- +layout: "docker" +page_title: "Docker: docker_config" +sidebar_current: "docs-docker-resource-config" +description: |- + Manages the configs of a Docker service in a swarm. +--- + +# docker\_config + +Manages the configuration of a Docker service in a swarm. + +## Example Usage + +## Basic +```hcl +# Creates a config +resource "docker_config" "foo_config" { + name = "foo_config" + data = "ewogICJzZXJIfQo=" +} +``` + +### Advanced +#### Dynamically set config with a template +In this example you can use the `${var.foo_port}` variable to dynamically +set the `${port}` variable in the `foo.configs.json.tpl` template and create +the data of the `foo_config` with the help of the `base64encode` interpolation +function. + +File `foo.config.json.tpl` + +```json +{ + "server": { + "public_port": ${port} + } +} +``` + +File `main.tf` + +```hcl +# Creates the template in renders the variable +data "template_file" "foo_config_tpl" { + template = "${file("foo.config.json.tpl")}" + + vars { + port = "${var.foo_port}" + } +} + +# Creates the config +resource "docker_config" "foo_config" { + name = "foo_config" + data = "${base64encode(data.template_file.foo_config_tpl.rendered)}" +} +``` + +#### Update config with no downtime +To update a `config`, Terraform will destroy the existing resource and create a replacement. To effectively use a `docker_config` resource with a `docker_service` resource, it's recommended to specify `create_before_destroy` in a `lifecycle` block. Provide a uniqie `name` attribute, for example +with one of the interpolation functions `uuid` or `timestamp` as shown +in the example below. The reason is [moby-35803](https://github.com/moby/moby/issues/35803). + +```hcl +resource "docker_config" "service_config" { + name = "${var.service_name}-config-${replace(timestamp(),":", ".")}" + data = "${base64encode(data.template_file.service_config_tpl.rendered)}" + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } +} +resource "docker_service" "service" { + # ... + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/root/configs/configs.json" + }, + ] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required, string) The name of the Docker config. +* `data` - (Required, string) The base64 encoded data of the config. + + +## Attributes Reference + +The following attributes are exported in addition to the above configuration: + +* `id` (string) diff --git a/website/docs/r/secret.html.markdown b/website/docs/r/secret.html.markdown new file mode 100644 index 00000000..4215c1d8 --- /dev/null +++ b/website/docs/r/secret.html.markdown @@ -0,0 +1,64 @@ +--- +layout: "docker" +page_title: "Docker: docker_secret" +sidebar_current: "docs-docker-resource-secret" +description: |- + Manages the secrets of a Docker service in a swarm. +--- + +# docker\_secret + +Manages the secrets of a Docker service in a swarm. + +## Example Usage + +### Basic + +```hcl +# Creates a secret +resource "docker_secret" "foo_secret" { + name = "foo_secret" + data = "ewogICJzZXJsaasIfQo=" +} +``` + +#### Update secret with no downtime +To update a `secret`, Terraform will destroy the existing resource and create a replacement. To effectively use a `docker_secret` resource with a `docker_service` resource, it's recommended to specify `create_before_destroy` in a `lifecycle` block. Provide a unique `name` attribute, for example +with one of the interpolation functions `uuid` or `timestamp` as shown +in the example below. The reason is [moby-35803](https://github.com/moby/moby/issues/35803). + +```hcl +resource "docker_secret" "service_secret" { + name = "${var.service_name}-secret-${replace(timestamp(),":", ".")}" + data = "${base64encode(data.template_file.service_secret_tpl.rendered)}" + + lifecycle { + ignore_changes = ["name"] + create_before_destroy = true + } +} +resource "docker_service" "service" { + # ... + secrets = [ + { + secret_id = "${docker_secret.service_secret.id}" + secret_name = "${docker_secret.service_secret.name}" + file_name = "/root/configs/configs.json" + }, + ] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required, string) The name of the Docker secret. +* `data` - (Required, string) The base64 encoded data of the secret. + + +## Attributes Reference + +The following attributes are exported in addition to the above configuration: + +* `id` (string) diff --git a/website/docs/r/service.html.markdown b/website/docs/r/service.html.markdown new file mode 100644 index 00000000..91c198f6 --- /dev/null +++ b/website/docs/r/service.html.markdown @@ -0,0 +1,558 @@ +--- +layout: "docker" +page_title: "Docker: docker_service" +sidebar_current: "docs-docker-resource-service" +description: |- + Manages the lifecycle of a Docker service. +--- + +# docker\_service + +This resource manages the lifecycle of a Docker service. By default, the creation, update and delete of services are detached. + +With the [Converge Config](#convergeconfig) the behavior of the `docker cli` is imitated to guarantee that +for example, all tasks of a service are running or successfully updated or to inform `terraform` that a service could not +be updated and was successfully rolled back. + +## Example Usage +The following examples show the basic and advanced usage of the +Docker Service resource assuming the host machine is already part of a Swarm. + +### Basic +The following configuration starts a Docker Service with +- the given image, +- 1 replica +- exposes the port `8080` in `vip` mode to the host machine +- moreover, uses the `container` runtime + +```hcl +resource "docker_service" "foo" { + name = "foo-service" + task_spec { + container_spec { + image = "repo.mycompany.com:8080/foo-service:v1" + } + } + + endpoint_spec { + ports { + target_port = "8080" + } + } +} +``` + +The following command is the equivalent: + +```bash +$ docker service create -d -p 8080 --name foo-service repo.mycompany.com:8080/foo-service:v1 +``` + +### Advanced +The following configuration shows the full capabilities of a Docker Service. Currently, the [Docker API 1.32](https://docs.docker.com/engine/api/v1.32) is implemented. + +```hcl +resource "docker_volume" "test_volume" { + name = "tftest-volume" +} + +resource "docker_config" "service_config" { + name = "tftest-full-myconfig" + data = "ewogICJwcmVmaXgiOiAiMTIzIgp9" +} + +resource "docker_secret" "service_secret" { + name = "tftest-mysecret" + data = "ewogICJrZXkiOiAiUVdFUlRZIgp9" +} + +resource "docker_network" "test_network" { + name = "tftest-network" + driver = "overlay" +} + +resource "docker_service" "foo" { + name = "tftest-service-basic" + + task_spec { + container_spec { + image = "repo.mycompany.com:8080/foo-service:v1" + + labels { + foo = "bar" + } + + command = ["ls"] + args = ["-las"] + hostname = "my-fancy-service" + + env { + MYFOO = "BAR" + } + + dir = "/root" + user = "root" + groups = ["docker", "foogroup"] + + privileges { + se_linux_context { + disable = true + user = "user-label" + role = "role-label" + type = "type-label" + level = "level-label" + } + } + + read_only = true + + mounts = [ + { + target = "/mount/test" + source = "${docker_volume.test_volume.name}" + type = "volume" + read_only = true + + bind_options { + propagation = "private" + } + }, + ] + + stop_signal = "SIGTERM" + stop_grace_period = "10s" + + healthcheck { + test = ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval = "5s" + timeout = "2s" + retries = 4 + } + + hosts { + host = "testhost" + ip = "10.0.1.0" + } + + dns_config { + nameservers = ["8.8.8.8"] + search = ["example.org"] + options = ["timeout:3"] + } + + secrets = [ + { + secret_id = "${docker_secret.service_secret.id}" + secret_name = "${docker_secret.service_secret.name}" + file_name = "/secrets.json" + }, + ] + + configs = [ + { + config_id = "${docker_config.service_config.id}" + config_name = "${docker_config.service_config.name}" + file_name = "/configs.json" + }, + ] + } + + resources { + limits { + nano_cpus = 1000000 + memory_bytes = 536870912 + + generic_resources { + named_resources_spec = [ + "GPU=UUID1" + ] + + discrete_resources_spec = [ + "SSD=3" + ] + } + } + + reservation { + nano_cpus = 1000000 + memory_bytes = 536870912 + + generic_resources { + named_resources_spec = [ + "GPU=UUID1" + } + + discrete_resources_spec = [ + "SSD=3" + ] + } + } + } + + restart_policy { + condition = "on-failure" + delay = "3s" + max_attempts = 4 + window = "10s" + } + + placement { + constraints = [ + "node.role==manager", + ] + + prefs = [ + "spread=node.role.manager", + ] + } + + force_update = 0 + runtime = "container" + networks = ["${docker_network.test_network.id}"] + + log_driver { + name = "json-file" + + options { + max-size = "10m" + max-file = "3" + } + } + } + + mode { + replicated { + replicas = 2 + } + } + + update_config { + parallelism = 2 + delay = "10s" + failure_action = "pause" + monitor = "5s" + max_failure_ratio = "0.1" + order = "start-first" + } + + rollback_config { + parallelism = 2 + delay = "5ms" + failure_action = "pause" + monitor = "10h" + max_failure_ratio = "0.9" + order = "stop-first" + } + + endpoint_spec { + mode = "vip" + + ports { + name = "random" + protocol = "tcp" + target_port = "8080" + published_port = "8080" + publish_mode = "ingress" + } + } +} +``` + +See also the `TestAccDockerService_full` test or all the other tests for a complete overview. + +## Argument Reference + +The following arguments are supported: + +* `auth` - (Optional, block) See [Auth](#auth) below for details. +* `name` - (Required, string) The name of the Docker service. +* `task_spec` - (Required, block) See [TaskSpec](#task-spec) below for details. +* `mode` - (Optional, block) See [Mode](#mode) below for details. +* `update_config` - (Optional, block) See [UpdateConfig](#update-rollback-config) below for details. +* `rollback_config` - (Optional, block) See [RollbackConfig](#update-rollback-config) below for details. +* `endpoint_spec` - (Optional, block) See [EndpointSpec](#endpoint-spec) below for details. +* `converge_config` - (Optional, block) See [Converge Config](#converge-config) below for details. + + +### Auth + +`auth` can be used additionally to the `registry_auth`. If both properties are given the `auth` wins and overwrites the auth of the provider. + +* `server_address` - (Required, string) The address of the registry server +* `username` - (Optional, string) The username to use for authenticating to the registry. If this is blank, the `DOCKER_REGISTRY_USER` is also be checked. +* `password` - (Optional, string) The password to use for authenticating to the registry. If this is blank, the `DOCKER_REGISTRY_PASS` is also be checked. + + + +### TaskSpec + +`task_spec` is a block within the configuration that can be repeated only **once** to specify the mode configuration for the service. The `task_spec` block is the user modifiable task configuration and supports the following: + +* `container_spec` (Required, block) See [ContainerSpec](#container-spec) below for details. +* `resources` (Optional, block) See [Resources](#resources) below for details. +* `restart_policy` (Optional, block) See [Restart Policy](#restart-policy) below for details. +* `placement` (Optional, block) See [Placement](#placement) below for details. +* `force_update` (Optional, int) A counter that triggers an update even if no relevant parameters have been changed. See [Docker Spec](https://github.com/docker/swarmkit/blob/master/api/specs.proto#L126). +* `runtime` (Optional, string) Runtime is the type of runtime specified for the task executor. See [Docker Runtime](https://github.com/moby/moby/blob/master/api/types/swarm/runtime.go). +* `networks` - (Optional, set of strings) Ids of the networks in which the container will be put in. +* `log_driver` - (Optional, block) See [Log Driver](#log-driver) below for details. + + + + +#### ContainerSpec + +`container_spec` is a block within the configuration that can be repeated only **once** to specify the mode configuration for the service. The `container_spec` block is the spec for each container and supports the following: + +* `image` - (Required, string) The image used to create the Docker service. +* `labels` - (Optional, map of string/string key/value pairs) User-defined key/value metadata. +* `command` - (Optional, list of strings) The command to be run in the image. +* `args` - (Optional, list of strings) Arguments to the command. +* `hostname` - (Optional, string) The hostname to use for the container, as a valid RFC 1123 hostname. +* `env` - (Optional, map of string/string) A list of environment variables in the form VAR=value. +* `dir` - (Optional, string) The working directory for commands to run in. +* `user` - (Optional, string) The user inside the container. +* `groups` - (Optional, list of strings) A list of additional groups that the container process will run as. +* `privileges` (Optional, block) See [Privileges](#privileges) below for details. +* `read_only` - (Optional, bool) Mount the container's root filesystem as read only. +* `mounts` - (Optional, set of blocks) See [Mounts](#mounts) below for details. +* `stop_signal` - (Optional, string) Signal to stop the container. +* `stop_grace_period` - (Optional, string) Amount of time to wait for the container to terminate before forcefully removing it `(ms|s|m|h)`. +* `healthcheck` - (Optional, block) See [Healthcheck](#healthcheck) below for details. +* `host` - (Optional, map of string/string) A list of hostname/IP mappings to add to the container's hosts file. + * `ip` - (Required string) The ip + * `host` - (Required string) The hostname +* `dns_config` - (Optional, block) See [DNS Config](#dnsconfig) below for details. +* `secrets` - (Optional, set of blocks) See [Secrets](#secrets) below for details. +* `configs` - (Optional, set of blocks) See [Configs](#configs) below for details. + + + +#### Privileges + +`privileges` is a block within the configuration that can be repeated only **once** to specify the mode configuration for the service. The `privileges` block holds the security options for the container and supports the following: + +* `credential_spec` - (Optional, block) For managed service account (Windows only) + * `file` - (Optional, string) Load credential spec from this file. + * `registry` - (Optional, string) Load credential spec from this value in the Windows registry. +* `se_linux_context` - (Optional, block) SELinux labels of the container + * `disable` - (Optional, bool) Disable SELinux + * `user` - (Optional, string) SELinux user label + * `role` - (Optional, string) SELinux role label + * `type` - (Optional, string) SELinux type label + * `level` - (Optional, string) SELinux level label + + +#### Mounts + +`mount` is a block within the configuration that can be repeated to specify +the extra mount mappings for the container. Each `mount` block is the Specification for mounts to be added to containers created as part of the service and supports +the following: + +* `target` - (Required, string) The container path. +* `source` - (Required, string) The mount source (e.g., a volume name, a host path) +* `type` - (Required, string) The mount type: valid values are `bind|volume|tmpfs`. +* `read_only` - (Optional, string) Whether the mount should be read-only +* `bind_options` - (Optional, map) Optional configuration for the `bind` type. + * `propagation` - (Optional, string) A propagation mode with the value. +* `volume_options` - (Optional, map) Optional configuration for the `volume` type. + * `no_copy` - (Optional, string) Whether to populate volume with data from the target. + * `labels` - (Optional, map of key/value pairs) Adding labels. + * `driver_config` - (Optional, map) The name of the driver to create the volume. + * `name` - (Optional, string) The name of the driver to create the volume. + * `options` - (Optional, map of key/value pairs) Options for the driver. +* `tmpf_options` - (Optional, map) Optional configuration for the `tmpf` type. + * `size_bytes` - (Optional, int) The size for the tmpfs mount in bytes. + * `mode` - (Optional, int) The permission mode for the tmpfs mount in an integer. + + +#### Healthcheck + +`healthcheck` is a block within the configuration that can be repeated only **once** to specify the extra healthcheck configuration for the containers of the service. The `healthcheck` block is a test to perform to check that the container is healthy and supports the following: + +* `test` - (Required, list of strings) Command to run to check health. For example, to run `curl -f http://localhost/health` set the + command to be `["CMD", "curl", "-f", "http://localhost/health"]`. +* `interval` - (Optional, string) Time between running the check `(ms|s|m|h)`. Default: `0s`. +* `timeout` - (Optional, string) Maximum time to allow one check to run `(ms|s|m|h)`. Default: `0s`. +* `start_period` - (Optional, string) Start period for the container to initialize before counting retries towards unstable `(ms|s|m|h)`. Default: `0s`. +* `start_period` - Start period for the container to initialize before counting retries towards unstable `(ms|s|m|h)`. Default: `0s`. +* `retries` - (Optional, int) Consecutive failures needed to report unhealthy. Default: `0`. + + +### DNS Config + +`dns_config` is a block within the configuration that can be repeated only **once** to specify the extra DNS configuration for the containers of the service. The `dns_config` block supports the following: + +* `nameservers` - (Required, list of strings) The IP addresses of the name servers, for example, `8.8.8.8` +* `search` - (Optional, list of strings)A search list for host-name lookup. +* `options` - (Optional, list of strings) A list of internal resolver variables to be modified, for example, `debug`, `ndots:3` + + +### Secrets + +`secrets` is a block within the configuration that can be repeated to specify +the extra mount mappings for the container. Each `secrets` block is a reference to a secret that will be exposed to the service and supports the following: + +* `secret_id` - (Required, string) ConfigID represents the ID of the specific secret. +* `secret_name` - (Optional, string) The name of the secret that this references, but internally it is just provided for lookup/display purposes +* `file_name` - (Required, string) Represents the final filename in the filesystem. The specific target file that the secret data is written within the docker container, e.g. `/root/secret/secret.json` + + +### Configs + +`configs` is a block within the configuration that can be repeated to specify +the extra mount mappings for the container. Each `configs` is a reference to a secret that is exposed to the service and supports the following: + +* `config_id` - (Required, string) ConfigID represents the ID of the specific config. +* `config_name` - (Optional, string) The name of the config that this references, but internally it is just provided for lookup/display purposes +* `file_name` - (Required, string) Represents the final filename in the filesystem. The specific target file that the config data is written within the docker container, e.g. `/root/config/config.json` + + + + + +#### Resources + +`resources` is a block within the configuration that can be repeated only **once** to specify the mode configuration for the service. The `resources` block represents the requirements which apply to each container created as part of the service and supports the following: + +* `limits` - (Optional, list of strings) Describes the resources which can be advertised by a node and requested by a task. + * `nano_cpus` (Optional, int) CPU shares in units of 1/1e9 (or 10^-9) of the CPU. Should be at least 1000000 + * `memory_bytes` (Optional, int) The amount of memory in bytes the container allocates + * `generic_resources` (Optional, map) User-defined resources can be either Integer resources (e.g, SSD=3) or String resources (e.g, GPU=UUID1) + * `named_resources_spec` (Optional, set of string) The String resources, delimited by `=` + * `discrete_resources_spec` (Optional, set of string) The Integer resources, delimited by `=` +* `reservation` - (Optional, list of strings) An object describing the resources which can be advertised by a node and requested by a task. + * `nano_cpus` (Optional, int) CPU shares in units of 1/1e9 (or 10^-9) of the CPU. Should be at least 1000000 + * `memory_bytes` (Optional, int) The amount of memory in bytes the container allocates + * `generic_resources` (Optional, map) User-defined resources can be either Integer resources (e.g, SSD=3) or String resources (e.g, GPU=UUID1) + * `named_resources_spec` (Optional, set of string) The String resources + * `discrete_resources_spec` (Optional, set of string) The Integer resources + + + + +#### Restart Policy + +`restart_policy` is a block within the configuration that can be repeated only **once** to specify the mode configuration for the service. The `restart_policy` block specifies the restart policy which applies to containers created as part of this service and supports the following: + +* `condition` (Optional, string) Condition for restart: `(none|on-failure|any)` +* `delay` (Optional, string) Delay between restart attempts `(ms|s|m|h)` +* `max_attempts` (Optional, string) Maximum attempts to restart a given container before giving up (default value is `0`, which is ignored) +* `window` (Optional, string) The time window used to evaluate the restart policy (default value is `0`, which is unbounded) `(ms|s|m|h)` + + + + +#### Placement + +`placement` is a block within the configuration that can be repeated only **once** to specify the mode configuration for the service. The `placement` block specifies the placement preferences and supports the following: + +* `constraints` (Optional, set of strings) An array of constraints. e.g.: `node.role==manager` +* `prefs` (Optional, set of string) Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence, e.g.: `spread=node.role.manager` +* `platforms` (Optional, set of) Platforms stores all the platforms that the service's image can run on + * `architecture` (Required, string) The architecture, e.g., `amd64` + * `os` (Required, string) The operation system, e.g., `linux` + + + + +### Log Driver + +`log_driver` is a block within the configuration that can be repeated only **once** to specify the extra log_driver configuration for the containers of the service. The `log_driver` specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified. The block supports the following: + +* `name` - (Required, string) The logging driver to use. Either `(none|json-file|syslog|journald|gelf|fluentd|awslogs|splunk|etwlogs|gcplogs)`. +* `options` - (Optional, a map of strings and strings) The options for the logging driver, e.g. + +```hcl +options { + awslogs-region = "us-west-2" + awslogs-group = "dev/foo-service" +} +``` + + + + +### Mode + +`mode` is a block within the configuration that can be repeated only **once** to specify the mode configuration for the service. The `mode` block supports the following: + +* `global` - (Optional, bool) set it to `true` to run the service in the global mode + +```hcl +resource "docker_service" "foo" { + ... + mode { + global = true + } + ... +} +``` +* `replicated` - (Optional, map), which contains atm only the amount of `replicas` + +```hcl +resource "docker_service" "foo" { + ... + mode { + replicated { + replicas = 2 + } + } + ... +} +``` + +~> **NOTE on `mode`:** if neither `global` nor `replicated` is specified, the service +is started in `replicated` mode with 1 replica. A change of service mode is not possible. The service has to be destroyed an recreated in the new mode. + + +### UpdateConfig and RollbackConfig + +`update_config` or `rollback_config` is a block within the configuration that can be repeated only **once** to specify the extra update configuration for the containers of the service. The `update_config` `rollback_config` block supports the following: + +* `parallelism` - (Optional, int) The maximum number of tasks to be updated in one iteration simultaneously (0 to update all at once). +* `delay` - (Optional, int) Delay between updates `(ns|us|ms|s|m|h)`, e.g. `5s`. +* `failure_action` - (Optional, int) Action on update failure: `pause|continue|rollback`. +* `monitor` - (Optional, int) Duration after each task update to monitor for failure `(ns|us|ms|s|m|h)` +* `max_failure_ratio` - (Optional, string) The failure rate to tolerate during an update as `float`. **Important:** the `float`need to be wrapped in a `string` to avoid internal +casting and precision errors. +* `order` - (Optional, int) Update order either 'stop-first' or 'start-first'. + + +### EndpointSpec + +`endpoint_spec` is a block within the configuration that can be repeated only **once** to specify properties that can be configured to access and load balance a service. The block supports the following: + +* `mode` - (Optional, string) The mode of resolution to use for internal load balancing between tasks. `(vip|dnsrr)`. Default: `vip`. +* `ports` - (Optional, block) See [Ports](#ports) below for details. + + +#### Ports + +`ports` is a block within the configuration that can be repeated to specify +the port mappings of the container. Each `ports` block supports +the following: + +* `name` - (Optional, string) A random name for the port. +* `protocol` - (Optional, string) Protocol that can be used over this port: `tcp|ucp`. Default: `tcp`. +* `target_port` - (Required, int) Port inside the container. +* `published_port` - (Required, int) The port on the swarm hosts. If not set the value of `target_port` will be used. +* `publish_mode` - (Optional, string) Represents the mode in which the port is to be published: `ingress|host` + + +### Converge Config + +`converge_config` is a block within the configuration that can be repeated only **once** to specify the extra Converging configuration for the containers of the service. This is the same behavior as the `docker cli`. By adding this configuration, it is monitored with the +given interval that, e.g., all tasks/replicas of a service are up and healthy + +The `converge_config` block supports the following: + +* `delay` - (Optional, string) Time between each the check to check docker endpoint `(ms|s|m|h)`. For example, to check if +all tasks are up when a service is created, or to check if all tasks are successfully updated on an update. Default: `7s`. +* `timeout` - (Optional, string) The timeout of the service to reach the desired state `(s|m)`. Default: `3m`. + +## Attributes Reference + +The following attributes are exported in addition to the above configuration: + +* `id` (string)