terraform-provider-docker/testdata/resources/docker_service/testAccDockerServiceFullSpec.tf
Manuel Vogel 0588c2071b
chore/refactor tests (#201)
* chore: format test configs for datasources

* chore: outlines load test config helper and structure

* docs(contributing): add command for resouce tests

to have an example of the regex

* refactor: move container test configs into separate files

* fix: add insecure_skip_verify for image pulls

to fix the local test setup with invalid certs

* chore(ci): remove insecure registry adaption

* chore: regenerate website

* chore: update gitignore for scipts/testing dir

* fix: replace nodejs services with go versions

* fix: move testing program versions in separate files

* test: reactivate flaky test from travis

* chore: fix linter on all go files

* fix(linter): testing go servers

* chore(ci): add env for go version

* chore(ci): name workflow steps

also moves description of available docker versions in to acc dockerfile

* Revert "test: reactivate flaky test from travis"

This reverts commit b02654acc4d6b7d02c8f3ba090e6a3f248741b10.

* docs: fix provider-ssh example

* chore: use alpine als final image for tests

* refactor: move test configs from folder into testname.tf files

* refactor: image delete log is now debug and indented

* refactor: image test config into seprate files

* refactor: move network test config into seperate files

* refactor: move plugin test config into seperate files

* chore: rename registry image test file

* refactor: move registry_image test config into seperate files

* chore: format secret test configs

* refactor: inline volume test configs

* fix: remove unused volume label test function

* refactor: move service test configs into seperate files

* test: reactivate and fix service test

* chore: simplify insecure skip verify add to http client

* chore(ci): debug into service test

* chore(ci): add testacc setup

* chore: format tf config for provider test

* chore(ci): add debug output for config.json

* fix: check service auth for emptyness

* fix: remove re-read of provider auth config

because the bug occured only in CI as the meta object might be GCd

* test: pass auth to service instead of provider

* chore: reactivate all acc tests

* test: outlines service inspect json check for full spec

* test: add service inspect json checks

* test: finish service inspect json checks

* chore(service): move test helper to end to of the file

* chore: move mapEquals to test helpers

* test: add json inspect for config

* chore: add debug inspect log for plugin, secret and volume

* test: add json inspect for secret

* test: add json inspect for image

* test: add json inspect for network

* test: add json inspect for plugin

* test: add json inspect for volume

* test: inline ds plugin test configs

* test: inline network configs

* test: move ds reg image configs into separate files

* test: reactivates container upload checks

* chore: adapt issues ref from old to new xw repo

* fix: reactivate network ingress test

and provide helpers for removing the default ingress network and leaving the swamr

* docs: rerun website gen

* test: fix reg image build and keep test

* chore: add name to todo

* chore: move ds network and plugin specs to file

* chore: format provider test spec

* chore: use simpler error message for empty strings
2021-05-31 16:11:49 +09:00

204 lines
3.9 KiB
HCL

provider "docker" {
registry_auth {
address = "127.0.0.1:15000"
}
}
resource "docker_volume" "test_volume" {
name = "tftest-volume"
}
resource "docker_config" "service_config" {
name = "tftest-full-myconfig"
data = "ewogICJwcmVmaXgiOiAiMTIzIgp9"
}
resource "docker_secret" "service_secret" {
name = "tftest-mysecret"
data = "ewogICJrZXkiOiAiUVdFUlRZIgp9"
}
resource "docker_network" "test_network" {
name = "tftest-network"
driver = "overlay"
}
resource "docker_service" "foo" {
name = "tftest-service-basic"
labels {
label = "servicelabel"
value = "true"
}
task_spec {
container_spec {
image = "127.0.0.1:15000/tftest-service:v1"
labels {
label = "foo"
value = "bar"
}
command = ["ls"]
args = ["-las"]
hostname = "my-fancy-service"
env = {
MYFOO = "BAR"
URI = "/api-call?param1=value1"
}
dir = "/root"
user = "root"
groups = ["docker", "foogroup"]
privileges {
se_linux_context {
disable = true
user = "user-label"
role = "role-label"
type = "type-label"
level = "level-label"
}
}
read_only = true
mounts {
target = "/mount/test"
source = docker_volume.test_volume.name
type = "volume"
read_only = true
volume_options {
no_copy = true
labels {
label = "foo"
value = "bar"
}
driver_name = "random-driver"
driver_options = {
op1 = "val1"
}
}
}
stop_signal = "SIGTERM"
stop_grace_period = "10s"
healthcheck {
test = ["CMD", "curl", "-f", "localhost:8080/health"]
interval = "5s"
timeout = "2s"
retries = 4
}
hosts {
host = "testhost"
ip = "10.0.1.0"
}
dns_config {
nameservers = ["8.8.8.8"]
search = ["example.org"]
options = ["timeout:3"]
}
secrets {
secret_id = docker_secret.service_secret.id
secret_name = docker_secret.service_secret.name
file_name = "/secrets.json"
file_uid = "0"
file_gid = "0"
file_mode = 0777
}
configs {
config_id = docker_config.service_config.id
config_name = docker_config.service_config.name
file_name = "/configs.json"
}
}
resources {
limits {
nano_cpus = 1000000
memory_bytes = 536870912
}
}
restart_policy {
condition = "on-failure"
delay = "3s"
max_attempts = 4
window = "10s"
}
placement {
constraints = [
"node.role==manager",
]
prefs = [
"spread=node.role.manager",
]
platforms {
architecture = "amd64"
os = "linux"
}
max_replicas = 2
}
force_update = 0
runtime = "container"
networks = [docker_network.test_network.id]
log_driver {
name = "json-file"
options = {
max-size = "10m"
max-file = "3"
}
}
}
mode {
replicated {
replicas = 2
}
}
update_config {
parallelism = 2
delay = "10s"
failure_action = "pause"
monitor = "5s"
max_failure_ratio = "0.1"
order = "start-first"
}
rollback_config {
parallelism = 2
delay = "5ms"
failure_action = "pause"
monitor = "10h"
max_failure_ratio = "0.9"
order = "stop-first"
}
endpoint_spec {
mode = "vip"
ports {
name = "random"
protocol = "tcp"
target_port = "8080"
published_port = "8080"
publish_mode = "ingress"
}
}
}