Migrate/docker-client (#70)

Migrates to the official docker client to connect to the Docker API/Deamon. Closes #32
This commit is contained in:
Manuel Vogel 2018-07-03 17:30:53 +02:00 committed by GitHub
parent e372355ab7
commit 1f563cc913
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
366 changed files with 19695 additions and 11476 deletions

3
.gitignore vendored
View file

@ -31,3 +31,6 @@ website/vendor
!command/test-fixtures/**/.terraform/
scripts/testing/auth
scripts/testing/certs
# build outputs
results

View file

@ -25,10 +25,11 @@ install:
- sudo service docker restart
script:
- make test
- make testacc
- make vendor-status
- make vet
- make test
- make testacc
- make compile
- make website-test
branches:

View file

@ -16,6 +16,9 @@ test: fmtcheck
testacc: fmtcheck
@sh -c "'$(CURDIR)/scripts/runAccTests.sh'"
compile: fmtcheck
@sh -c "'$(CURDIR)/scripts/compile.sh'"
vet:
@echo "go vet ."
@go vet $$(go list ./... | grep -v vendor/) ; if [ $$? -eq 1 ]; then \

View file

@ -5,12 +5,15 @@ import (
"path/filepath"
"strings"
dc "github.com/fsouza/go-dockerclient"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
)
// DockerConfig is the structure that stores the configuration to talk to a
const apiVersion = "1.37"
// Config is the structure that stores the configuration to talk to a
// Docker API compatible host.
type DockerConfig struct {
type Config struct {
Host string
Ca string
Cert string
@ -18,8 +21,8 @@ type DockerConfig struct {
CertPath string
}
// NewClient() returns a new Docker client.
func (c *DockerConfig) NewClient() (*dc.Client, error) {
// NewClient returns a new Docker client.
func (c *Config) NewClient() (*client.Client, error) {
if c.Ca != "" || c.Cert != "" || c.Key != "" {
if c.Ca == "" || c.Cert == "" || c.Key == "" {
return nil, fmt.Errorf("ca_material, cert_material, and key_material must be specified")
@ -29,7 +32,11 @@ func (c *DockerConfig) NewClient() (*dc.Client, error) {
return nil, fmt.Errorf("cert_path must not be specified")
}
return dc.NewTLSClientFromBytes(c.Host, []byte(c.Cert), []byte(c.Key), []byte(c.Ca))
return client.NewClientWithOpts(
client.WithHost(c.Host),
client.WithTLSClientConfig(c.Ca, c.Cert, c.Key),
client.WithVersion(apiVersion),
)
}
if c.CertPath != "" {
@ -37,22 +44,29 @@ func (c *DockerConfig) NewClient() (*dc.Client, error) {
ca := filepath.Join(c.CertPath, "ca.pem")
cert := filepath.Join(c.CertPath, "cert.pem")
key := filepath.Join(c.CertPath, "key.pem")
return dc.NewTLSClient(c.Host, cert, key, ca)
return client.NewClientWithOpts(
client.WithHost(c.Host),
client.WithTLSClientConfig(ca, cert, key),
client.WithVersion(apiVersion),
)
}
// If there is no cert information, then just return the direct client
return dc.NewClient(c.Host)
return client.NewClientWithOpts(
client.WithHost(c.Host),
client.WithVersion(apiVersion),
)
}
// Data structure for holding data that we fetch from Docker.
type Data struct {
DockerImages map[string]*dc.APIImages
DockerImages map[string]*types.ImageSummary
}
// ProviderConfig for the custom registry provider
type ProviderConfig struct {
DockerClient *dc.Client
AuthConfigs *dc.AuthConfigurations
DockerClient *client.Client
AuthConfigs *AuthConfigs
}
// The registry address can be referenced in various places (registry auth, docker config file, image name)

View file

@ -171,10 +171,10 @@ func getImageDigest(registry, image, tag, username, password string, fallback bo
}
return digestResponse.Header.Get("Docker-Content-Digest"), nil
} else {
return "", fmt.Errorf("Bad credentials: " + resp.Status)
}
return "", fmt.Errorf("Bad credentials: " + resp.Status)
// Some unexpected status was given, return an error
default:
return "", fmt.Errorf("Got bad response from registry: " + resp.Status)

View file

@ -1,12 +1,18 @@
package docker
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/user"
"strings"
dc "github.com/fsouza/go-dockerclient"
"github.com/docker/docker/api/types"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
)
@ -107,7 +113,7 @@ func Provider() terraform.ResourceProvider {
}
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
config := DockerConfig{
config := Config{
Host: d.Get("host").(string),
Ca: d.Get("ca_material").(string),
Cert: d.Get("cert_material").(string),
@ -120,12 +126,13 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
return nil, fmt.Errorf("Error initializing Docker client: %s", err)
}
err = client.Ping()
ctx := context.Background()
_, err = client.Ping(ctx)
if err != nil {
return nil, fmt.Errorf("Error pinging Docker server: %s", err)
}
authConfigs := &dc.AuthConfigurations{}
authConfigs := &AuthConfigs{}
if v, ok := d.GetOk("registry_auth"); ok {
authConfigs, err = providerSetToRegistryAuth(v.(*schema.Set))
@ -143,15 +150,31 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
return &providerConfig, nil
}
// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed.
var ErrCannotParseDockercfg = errors.New("Failed to read authentication from dockercfg")
// AuthConfigs represents authentication options to use for the
// PushImage method accommodating the new X-Registry-Config header
type AuthConfigs struct {
Configs map[string]types.AuthConfig `json:"configs"`
}
// dockerConfig represents a registry authentation configuration from the
// .dockercfg file.
type dockerConfig struct {
Auth string `json:"auth"`
Email string `json:"email"`
}
// Take the given registry_auth schemas and return a map of registry auth configurations
func providerSetToRegistryAuth(authSet *schema.Set) (*dc.AuthConfigurations, error) {
authConfigs := dc.AuthConfigurations{
Configs: make(map[string]dc.AuthConfiguration),
func providerSetToRegistryAuth(authSet *schema.Set) (*AuthConfigs, error) {
authConfigs := AuthConfigs{
Configs: make(map[string]types.AuthConfig),
}
for _, authInt := range authSet.List() {
auth := authInt.(map[string]interface{})
authConfig := dc.AuthConfiguration{}
authConfig := types.AuthConfig{}
authConfig.ServerAddress = normalizeRegistryAddress(auth["address"].(string))
// For each registry_auth block, generate an AuthConfiguration using either
@ -174,7 +197,7 @@ func providerSetToRegistryAuth(authSet *schema.Set) (*dc.AuthConfigurations, err
return nil, fmt.Errorf("Error opening docker registry config file: %v", err)
}
auths, err := dc.NewAuthConfigurations(r)
auths, err := newAuthConfigurations(r)
if err != nil {
return nil, fmt.Errorf("Error parsing docker registry config json: %v", err)
}
@ -199,3 +222,68 @@ func providerSetToRegistryAuth(authSet *schema.Set) (*dc.AuthConfigurations, err
return &authConfigs, nil
}
// newAuthConfigurations returns AuthConfigs from a JSON encoded string in the
// same format as the .dockercfg file.
func newAuthConfigurations(r io.Reader) (*AuthConfigs, error) {
var auth *AuthConfigs
confs, err := parseDockerConfig(r)
if err != nil {
return nil, err
}
auth, err = authConfigs(confs)
if err != nil {
return nil, err
}
return auth, nil
}
// parseDockerConfig parses the docker config file for auths
func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) {
buf := new(bytes.Buffer)
buf.ReadFrom(r)
byteData := buf.Bytes()
confsWrapper := struct {
Auths map[string]dockerConfig `json:"auths"`
}{}
if err := json.Unmarshal(byteData, &confsWrapper); err == nil {
if len(confsWrapper.Auths) > 0 {
return confsWrapper.Auths, nil
}
}
var confs map[string]dockerConfig
if err := json.Unmarshal(byteData, &confs); err != nil {
return nil, err
}
return confs, nil
}
// authConfigs converts a dockerConfigs map to a AuthConfigs object.
func authConfigs(confs map[string]dockerConfig) (*AuthConfigs, error) {
c := &AuthConfigs{
Configs: make(map[string]types.AuthConfig),
}
for reg, conf := range confs {
if conf.Auth == "" {
continue
}
data, err := base64.StdEncoding.DecodeString(conf.Auth)
if err != nil {
return nil, err
}
userpass := strings.SplitN(string(data), ":", 2)
if len(userpass) != 2 {
return nil, ErrCannotParseDockercfg
}
c.Configs[reg] = types.AuthConfig{
Email: conf.Email,
Username: userpass[0],
Password: userpass[1],
ServerAddress: reg,
Auth: conf.Auth,
}
}
return c, nil
}

View file

@ -4,8 +4,9 @@ import (
"encoding/base64"
"log"
"context"
"github.com/docker/docker/api/types/swarm"
dc "github.com/fsouza/go-dockerclient"
"github.com/hashicorp/terraform/helper/schema"
)
@ -39,16 +40,14 @@ func resourceDockerConfigCreate(d *schema.ResourceData, meta interface{}) error
client := meta.(*ProviderConfig).DockerClient
data, _ := base64.StdEncoding.DecodeString(d.Get("data").(string))
createConfigOpts := dc.CreateConfigOptions{
ConfigSpec: swarm.ConfigSpec{
Annotations: swarm.Annotations{
Name: d.Get("name").(string),
},
Data: data,
configSpec := swarm.ConfigSpec{
Annotations: swarm.Annotations{
Name: d.Get("name").(string),
},
Data: data,
}
config, err := client.CreateConfig(createConfigOpts)
config, err := client.ConfigCreate(context.Background(), configSpec)
if err != nil {
return err
}
@ -59,15 +58,12 @@ func resourceDockerConfigCreate(d *schema.ResourceData, meta interface{}) error
func resourceDockerConfigRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
config, err := client.InspectConfig(d.Id())
config, _, err := client.ConfigInspectWithRaw(context.Background(), d.Id())
if err != nil {
if _, ok := err.(*dc.NoSuchConfig); ok {
log.Printf("[WARN] Config (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
return err
log.Printf("[WARN] Config (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
d.SetId(config.ID)
return nil
@ -75,9 +71,7 @@ func resourceDockerConfigRead(d *schema.ResourceData, meta interface{}) error {
func resourceDockerConfigDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
err := client.RemoveConfig(dc.RemoveConfigOptions{
ID: d.Id(),
})
err := client.ConfigRemove(context.Background(), d.Id())
if err != nil {
return err
}

View file

@ -4,6 +4,7 @@ import (
"fmt"
"testing"
"context"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@ -82,9 +83,9 @@ func testCheckDockerConfigDestroy(s *terraform.State) error {
}
id := rs.Primary.Attributes["id"]
config, err := client.InspectConfig(id)
_, _, err := client.ConfigInspectWithRaw(context.Background(), id)
if err == nil || config != nil {
if err == nil {
return fmt.Errorf("Config with id '%s' still exists", id)
}
return nil

View file

@ -1,10 +1,6 @@
package docker
import (
"fmt"
"regexp"
"github.com/hashicorp/terraform/helper/schema"
)
@ -443,13 +439,3 @@ func resourceDockerContainer() *schema.Resource {
},
}
}
func validateDockerContainerPath(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[a-zA-Z]:\\|^/`).MatchString(value) {
errors = append(errors, fmt.Errorf("%q must be an absolute path", k))
}
return
}

View file

@ -8,8 +8,15 @@ import (
"strconv"
"time"
dc "github.com/fsouza/go-dockerclient"
"context"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
"github.com/docker/go-units"
"github.com/hashicorp/terraform/helper/schema"
"math/rand"
)
var (
@ -33,27 +40,19 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
image = image + ":latest"
}
// The awesome, wonderful, splendiferous, sensical
// Docker API now lets you specify a HostConfig in
// CreateContainerOptions, but in my testing it still only
// actually applies HostConfig options set in StartContainer.
// How cool is that?
createOpts := dc.CreateContainerOptions{
Name: d.Get("name").(string),
Config: &dc.Config{
Image: image,
Hostname: d.Get("hostname").(string),
Domainname: d.Get("domainname").(string),
},
config := &container.Config{
Image: image,
Hostname: d.Get("hostname").(string),
Domainname: d.Get("domainname").(string),
}
if v, ok := d.GetOk("env"); ok {
createOpts.Config.Env = stringSetToStringSlice(v.(*schema.Set))
config.Env = stringSetToStringSlice(v.(*schema.Set))
}
if v, ok := d.GetOk("command"); ok {
createOpts.Config.Cmd = stringListToStringSlice(v.([]interface{}))
for _, v := range createOpts.Config.Cmd {
config.Cmd = stringListToStringSlice(v.([]interface{}))
for _, v := range config.Cmd {
if v == "" {
return fmt.Errorf("values for command may not be empty")
}
@ -61,21 +60,21 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
}
if v, ok := d.GetOk("entrypoint"); ok {
createOpts.Config.Entrypoint = stringListToStringSlice(v.([]interface{}))
config.Entrypoint = stringListToStringSlice(v.([]interface{}))
}
if v, ok := d.GetOk("user"); ok {
createOpts.Config.User = v.(string)
config.User = v.(string)
}
exposedPorts := map[dc.Port]struct{}{}
portBindings := map[dc.Port][]dc.PortBinding{}
exposedPorts := map[nat.Port]struct{}{}
portBindings := map[nat.Port][]nat.PortBinding{}
if v, ok := d.GetOk("ports"); ok {
exposedPorts, portBindings = portSetToDockerPorts(v.(*schema.Set))
}
if len(exposedPorts) != 0 {
createOpts.Config.ExposedPorts = exposedPorts
config.ExposedPorts = exposedPorts
}
extraHosts := []string{}
@ -83,7 +82,7 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
extraHosts = extraHostsSetToDockerExtraHosts(v.(*schema.Set))
}
extraUlimits := []dc.ULimit{}
extraUlimits := []*units.Ulimit{}
if v, ok := d.GetOk("ulimit"); ok {
extraUlimits = ulimitsToDockerUlimits(v.(*schema.Set))
}
@ -98,21 +97,21 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
}
}
if len(volumes) != 0 {
createOpts.Config.Volumes = volumes
config.Volumes = volumes
}
if v, ok := d.GetOk("labels"); ok {
createOpts.Config.Labels = mapTypeMapValsToString(v.(map[string]interface{}))
config.Labels = mapTypeMapValsToString(v.(map[string]interface{}))
}
hostConfig := &dc.HostConfig{
hostConfig := &container.HostConfig{
Privileged: d.Get("privileged").(bool),
PublishAllPorts: d.Get("publish_all_ports").(bool),
RestartPolicy: dc.RestartPolicy{
RestartPolicy: container.RestartPolicy{
Name: d.Get("restart").(string),
MaximumRetryCount: d.Get("max_retry_count").(int),
},
LogConfig: dc.LogConfig{
LogConfig: container.LogConfig{
Type: d.Get("log_driver").(string),
},
}
@ -182,36 +181,29 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
hostConfig.LogConfig.Config = mapTypeMapValsToString(v.(map[string]interface{}))
}
networkingConfig := &network.NetworkingConfig{}
if v, ok := d.GetOk("network_mode"); ok {
hostConfig.NetworkMode = v.(string)
hostConfig.NetworkMode = container.NetworkMode(v.(string))
}
createOpts.HostConfig = hostConfig
var retContainer container.ContainerCreateCreatedBody
var retContainer *dc.Container
if retContainer, err = client.CreateContainer(createOpts); err != nil {
if retContainer, err = client.ContainerCreate(context.Background(), config, hostConfig, networkingConfig, d.Get("name").(string)); err != nil {
return fmt.Errorf("Unable to create container: %s", err)
}
if retContainer == nil {
return fmt.Errorf("Returned container is nil")
}
d.SetId(retContainer.ID)
if v, ok := d.GetOk("networks"); ok {
var connectionOpts dc.NetworkConnectionOptions
endpointConfig := &network.EndpointSettings{}
if v, ok := d.GetOk("network_alias"); ok {
endpointConfig := &dc.EndpointConfig{}
endpointConfig.Aliases = stringSetToStringSlice(v.(*schema.Set))
connectionOpts = dc.NetworkConnectionOptions{Container: retContainer.ID, EndpointConfig: endpointConfig}
} else {
connectionOpts = dc.NetworkConnectionOptions{Container: retContainer.ID}
}
for _, rawNetwork := range v.(*schema.Set).List() {
network := rawNetwork.(string)
if err := client.ConnectNetwork(network, connectionOpts); err != nil {
return fmt.Errorf("Unable to connect to network '%s': %s", network, err)
networkID := rawNetwork.(string)
if err := client.NetworkConnect(context.Background(), networkID, retContainer.ID, endpointConfig); err != nil {
return fmt.Errorf("Unable to connect to network '%s': %s", networkID, err)
}
}
}
@ -246,19 +238,18 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
return fmt.Errorf("Error creating tar archive: %s", err)
}
uploadOpts := dc.UploadToContainerOptions{
InputStream: bytes.NewReader(buf.Bytes()),
Path: "/",
}
if err := client.UploadToContainer(retContainer.ID, uploadOpts); err != nil {
dstPath := "/"
uploadContent := bytes.NewReader(buf.Bytes())
options := types.CopyToContainerOptions{}
if err := client.CopyToContainer(context.Background(), retContainer.ID, dstPath, uploadContent, options); err != nil {
return fmt.Errorf("Unable to upload volume content: %s", err)
}
}
}
creationTime = time.Now()
if err := client.StartContainer(retContainer.ID, nil); err != nil {
options := types.ContainerStartOptions{}
if err := client.ContainerStart(context.Background(), retContainer.ID, options); err != nil {
return fmt.Errorf("Unable to start container: %s", err)
}
@ -278,7 +269,7 @@ func resourceDockerContainerRead(d *schema.ResourceData, meta interface{}) error
return nil
}
var container *dc.Container
var container types.ContainerJSON
// TODO fix this with statefunc
loops := 1 // if it hasn't just been created, don't delay
@ -288,7 +279,7 @@ func resourceDockerContainerRead(d *schema.ResourceData, meta interface{}) error
sleepTime := 500 * time.Millisecond
for i := loops; i > 0; i-- {
container, err = client.InspectContainer(apiContainer.ID)
container, err = client.ContainerInspect(context.Background(), apiContainer.ID)
if err != nil {
return fmt.Errorf("Error inspecting container %s: %s", apiContainer.ID, err)
}
@ -302,7 +293,11 @@ func resourceDockerContainerRead(d *schema.ResourceData, meta interface{}) error
return resourceDockerContainerDelete(d, meta)
}
if container.State.FinishedAt.After(creationTime) {
finishTime, err := time.Parse(time.RFC3339, container.State.FinishedAt)
if err != nil {
return fmt.Errorf("Container finish time could not be parsed: %s", container.State.FinishedAt)
}
if finishTime.After(creationTime) {
// It exited immediately, so error out so dependent containers
// aren't started
resourceDockerContainerDelete(d, meta)
@ -338,19 +333,20 @@ func resourceDockerContainerDelete(d *schema.ResourceData, meta interface{}) err
// Stop the container before removing if destroy_grace_seconds is defined
if d.Get("destroy_grace_seconds").(int) > 0 {
var timeout = uint(d.Get("destroy_grace_seconds").(int))
if err := client.StopContainer(d.Id(), timeout); err != nil {
mapped := int32(d.Get("destroy_grace_seconds").(int))
timeoutInSeconds := rand.Int31n(mapped)
timeout := time.Duration(time.Duration(timeoutInSeconds) * time.Second)
if err := client.ContainerStop(context.Background(), d.Id(), &timeout); err != nil {
return fmt.Errorf("Error stopping container %s: %s", d.Id(), err)
}
}
removeOpts := dc.RemoveContainerOptions{
ID: d.Id(),
removeOpts := types.ContainerRemoveOptions{
RemoveVolumes: true,
Force: true,
}
if err := client.RemoveContainer(removeOpts); err != nil {
if err := client.ContainerRemove(context.Background(), d.Id(), removeOpts); err != nil {
return fmt.Errorf("Error deleting container %s: %s", d.Id(), err)
}
@ -398,8 +394,8 @@ func mapTypeMapValsToStringSlice(typeMap map[string]interface{}) []string {
return mapped
}
func fetchDockerContainer(ID string, client *dc.Client) (*dc.APIContainers, error) {
apiContainers, err := client.ListContainers(dc.ListContainersOptions{All: true})
func fetchDockerContainer(ID string, client *client.Client) (*types.Container, error) {
apiContainers, err := client.ContainerList(context.Background(), types.ContainerListOptions{All: true})
if err != nil {
return nil, fmt.Errorf("Error fetching container information from Docker: %s\n", err)
@ -414,23 +410,23 @@ func fetchDockerContainer(ID string, client *dc.Client) (*dc.APIContainers, erro
return nil, nil
}
func portSetToDockerPorts(ports *schema.Set) (map[dc.Port]struct{}, map[dc.Port][]dc.PortBinding) {
retExposedPorts := map[dc.Port]struct{}{}
retPortBindings := map[dc.Port][]dc.PortBinding{}
func portSetToDockerPorts(ports *schema.Set) (map[nat.Port]struct{}, map[nat.Port][]nat.PortBinding) {
retExposedPorts := map[nat.Port]struct{}{}
retPortBindings := map[nat.Port][]nat.PortBinding{}
for _, portInt := range ports.List() {
port := portInt.(map[string]interface{})
internal := port["internal"].(int)
protocol := port["protocol"].(string)
exposedPort := dc.Port(strconv.Itoa(internal) + "/" + protocol)
exposedPort := nat.Port(strconv.Itoa(internal) + "/" + protocol)
retExposedPorts[exposedPort] = struct{}{}
external, extOk := port["external"].(int)
ip, ipOk := port["ip"].(string)
if extOk {
portBinding := dc.PortBinding{
portBinding := nat.PortBinding{
HostPort: strconv.Itoa(external),
}
if ipOk {
@ -443,12 +439,12 @@ func portSetToDockerPorts(ports *schema.Set) (map[dc.Port]struct{}, map[dc.Port]
return retExposedPorts, retPortBindings
}
func ulimitsToDockerUlimits(extraUlimits *schema.Set) []dc.ULimit {
retExtraUlimits := []dc.ULimit{}
func ulimitsToDockerUlimits(extraUlimits *schema.Set) []*units.Ulimit {
retExtraUlimits := []*units.Ulimit{}
for _, ulimitInt := range extraUlimits.List() {
ulimits := ulimitInt.(map[string]interface{})
u := dc.ULimit{
u := &units.Ulimit{
Name: ulimits["name"].(string),
Soft: int64(ulimits["soft"].(int)),
Hard: int64(ulimits["hard"].(int)),
@ -508,8 +504,8 @@ func volumeSetToDockerVolumes(volumes *schema.Set) (map[string]struct{}, []strin
return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, nil
}
func deviceSetToDockerDevices(devices *schema.Set) []dc.Device {
retDevices := []dc.Device{}
func deviceSetToDockerDevices(devices *schema.Set) []container.DeviceMapping {
retDevices := []container.DeviceMapping{}
for _, deviceInt := range devices.List() {
deviceMap := deviceInt.(map[string]interface{})
hostPath := deviceMap["host_path"].(string)
@ -524,7 +520,7 @@ func deviceSetToDockerDevices(devices *schema.Set) []dc.Device {
permissions = "rwm"
}
device := dc.Device{
device := container.DeviceMapping{
PathOnHost: hostPath,
PathInContainer: containerPath,
CgroupPermissions: permissions,

View file

@ -4,18 +4,18 @@ import (
"archive/tar"
"bytes"
"fmt"
"os"
"strconv"
"strings"
"testing"
dc "github.com/fsouza/go-dockerclient"
"context"
"github.com/docker/docker/api/types"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDockerContainer_basic(t *testing.T) {
var c dc.Container
var c types.ContainerJSON
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@ -54,7 +54,7 @@ func TestAccDockerContainerPath_validation(t *testing.T) {
}
func TestAccDockerContainer_volume(t *testing.T) {
var c dc.Container
var c types.ContainerJSON
testCheck := func(*terraform.State) error {
if len(c.Mounts) != 1 {
@ -96,7 +96,7 @@ func TestAccDockerContainer_volume(t *testing.T) {
}
func TestAccDockerContainer_customized(t *testing.T) {
var c dc.Container
var c types.ContainerJSON
testCheck := func(*terraform.State) error {
if len(c.Config.Entrypoint) < 3 ||
@ -257,24 +257,18 @@ func TestAccDockerContainer_customized(t *testing.T) {
}
func TestAccDockerContainer_upload(t *testing.T) {
var c dc.Container
var c types.ContainerJSON
testCheck := func(*terraform.State) error {
client := testAccProvider.Meta().(*ProviderConfig).DockerClient
buf := new(bytes.Buffer)
opts := dc.DownloadFromContainerOptions{
OutputStream: buf,
Path: "/terraform/test.txt",
}
if err := client.DownloadFromContainer(c.ID, opts); err != nil {
srcPath := "/terraform/test.txt"
r, _, err := client.CopyFromContainer(context.Background(), c.ID, srcPath)
if err != nil {
return fmt.Errorf("Unable to download a file from container: %s", err)
}
r := bytes.NewReader(buf.Bytes())
tr := tar.NewReader(r)
if header, err := tr.Next(); err != nil {
return fmt.Errorf("Unable to read content of tar archive: %s", err)
} else {
@ -311,43 +305,32 @@ func TestAccDockerContainer_upload(t *testing.T) {
}
func TestAccDockerContainer_device(t *testing.T) {
var c dc.Container
var c types.ContainerJSON
testCheck := func(*terraform.State) error {
client := testAccProvider.Meta().(*ProviderConfig).DockerClient
createExecOpts := dc.CreateExecOptions{
Cmd: []string{"dd", "if=/dev/zero_test", "of=/tmp/test.txt", "count=10", "bs=1"},
Container: c.ID,
createExecOpts := types.ExecConfig{
Cmd: []string{"dd", "if=/dev/zero_test", "of=/tmp/test.txt", "count=10", "bs=1"},
}
exec, err := client.CreateExec(createExecOpts)
exec, err := client.ContainerExecCreate(context.Background(), c.ID, createExecOpts)
if err != nil {
return fmt.Errorf("Unable to create a exec instance on container: %s", err)
}
startExecOpts := dc.StartExecOptions{
OutputStream: os.Stdout,
ErrorStream: os.Stdout,
}
if err := client.StartExec(exec.ID, startExecOpts); err != nil {
startExecOpts := types.ExecStartCheck{}
if err := client.ContainerExecStart(context.Background(), exec.ID, startExecOpts); err != nil {
return fmt.Errorf("Unable to run exec a instance on container: %s", err)
}
buf := new(bytes.Buffer)
downloadFileOpts := dc.DownloadFromContainerOptions{
OutputStream: buf,
Path: "/tmp/test.txt",
}
if err := client.DownloadFromContainer(c.ID, downloadFileOpts); err != nil {
srcPath := "/tmp/test.txt"
out, _, err := client.CopyFromContainer(context.Background(), c.ID, srcPath)
if err != nil {
return fmt.Errorf("Unable to download a file from container: %s", err)
}
r := bytes.NewReader(buf.Bytes())
tr := tar.NewReader(r)
tr := tar.NewReader(out)
if _, err := tr.Next(); err != nil {
return fmt.Errorf("Unable to read content of tar archive: %s", err)
}
@ -383,7 +366,7 @@ func TestAccDockerContainer_device(t *testing.T) {
})
}
func testAccContainerRunning(n string, container *dc.Container) resource.TestCheckFunc {
func testAccContainerRunning(n string, container *types.ContainerJSON) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
@ -395,18 +378,18 @@ func testAccContainerRunning(n string, container *dc.Container) resource.TestChe
}
client := testAccProvider.Meta().(*ProviderConfig).DockerClient
containers, err := client.ListContainers(dc.ListContainersOptions{})
containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
return err
}
for _, c := range containers {
if c.ID == rs.Primary.ID {
inspected, err := client.InspectContainer(c.ID)
inspected, err := client.ContainerInspect(context.Background(), c.ID)
if err != nil {
return fmt.Errorf("Container could not be inspected: %s", err)
}
*container = *inspected
*container = inspected
return nil
}
}

View file

@ -1,10 +1,16 @@
package docker
import (
"context"
"fmt"
"log"
"strings"
dc "github.com/fsouza/go-dockerclient"
"bytes"
"encoding/base64"
"encoding/json"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/hashicorp/terraform/helper/schema"
)
@ -18,7 +24,7 @@ func resourceDockerImageCreate(d *schema.ResourceData, meta interface{}) error {
d.SetId(apiImage.ID + d.Get("name").(string))
d.Set("latest", apiImage.ID)
return nil
return resourceDockerImageRead(d, meta)
}
func resourceDockerImageRead(d *schema.ResourceData, meta interface{}) error {
@ -49,7 +55,7 @@ func resourceDockerImageUpdate(d *schema.ResourceData, meta interface{}) error {
d.Set("latest", apiImage.ID)
return nil
return resourceDockerImageRead(d, meta)
}
func resourceDockerImageDelete(d *schema.ResourceData, meta interface{}) error {
@ -62,7 +68,7 @@ func resourceDockerImageDelete(d *schema.ResourceData, meta interface{}) error {
return nil
}
func searchLocalImages(data Data, imageName string) *dc.APIImages {
func searchLocalImages(data Data, imageName string) *types.ImageSummary {
if apiImage, ok := data.DockerImages[imageName]; ok {
return apiImage
}
@ -73,7 +79,7 @@ func searchLocalImages(data Data, imageName string) *dc.APIImages {
return nil
}
func removeImage(d *schema.ResourceData, client *dc.Client) error {
func removeImage(d *schema.ResourceData, client *client.Client) error {
var data Data
if keepLocally := d.Get("keep_locally").(bool); keepLocally {
@ -92,23 +98,24 @@ func removeImage(d *schema.ResourceData, client *dc.Client) error {
foundImage := searchLocalImages(data, imageName)
if foundImage != nil {
err := client.RemoveImage(foundImage.ID)
imageDeleteResponseItems, err := client.ImageRemove(context.Background(), foundImage.ID, types.ImageRemoveOptions{})
if err != nil {
return err
}
log.Printf("[INFO] Deleted image items: %v", imageDeleteResponseItems)
}
return nil
}
func fetchLocalImages(data *Data, client *dc.Client) error {
images, err := client.ListImages(dc.ListImagesOptions{All: false})
func fetchLocalImages(data *Data, client *client.Client) error {
images, err := client.ImageList(context.Background(), types.ImageListOptions{All: false})
if err != nil {
return fmt.Errorf("Unable to list Docker images: %s", err)
}
if data.DockerImages == nil {
data.DockerImages = make(map[string]*dc.APIImages)
data.DockerImages = make(map[string]*types.ImageSummary)
}
// Docker uses different nomenclatures in different places...sometimes a short
@ -125,11 +132,11 @@ func fetchLocalImages(data *Data, client *dc.Client) error {
return nil
}
func pullImage(data *Data, client *dc.Client, authConfig *dc.AuthConfigurations, image string) error {
func pullImage(data *Data, client *client.Client, authConfig *AuthConfigs, image string) error {
pullOpts := parseImageOptions(image)
// If a registry was specified in the image name, try to find auth for it
auth := dc.AuthConfiguration{}
auth := types.AuthConfig{}
if pullOpts.Registry != "" {
if authConfig, ok := authConfig.Configs[normalizeRegistryAddress(pullOpts.Registry)]; ok {
auth = authConfig
@ -141,16 +148,39 @@ func pullImage(data *Data, client *dc.Client, authConfig *dc.AuthConfigurations,
}
}
if err := client.PullImage(pullOpts, auth); err != nil {
return fmt.Errorf("Error pulling image %s: %s\n", image, err)
encodedJSON, err := json.Marshal(auth)
if err != nil {
return fmt.Errorf("error creating auth config: %s", err)
}
out, err := client.ImagePull(context.Background(), image, types.ImagePullOptions{
RegistryAuth: base64.URLEncoding.EncodeToString(encodedJSON),
})
if err != nil {
return fmt.Errorf("error pulling image %s: %s", image, err)
}
defer out.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(out)
s := buf.String()
log.Printf("[DEBUG] pulled image %v: %v", image, s)
return fetchLocalImages(data, client)
}
func parseImageOptions(image string) dc.PullImageOptions {
pullOpts := dc.PullImageOptions{}
type internalPullImageOptions struct {
Repository string `qs:"fromImage"`
Tag string
// Only required for Docker Engine 1.9 or 1.10 w/ Remote API < 1.21
// and Docker Engine < 1.9
// This parameter was removed in Docker Engine 1.11
Registry string
}
func parseImageOptions(image string) internalPullImageOptions {
pullOpts := internalPullImageOptions{}
splitImageName := strings.Split(image, ":")
switch len(splitImageName) {
@ -195,11 +225,11 @@ func parseImageOptions(image string) dc.PullImageOptions {
return pullOpts
}
func findImage(d *schema.ResourceData, client *dc.Client, authConfig *dc.AuthConfigurations) (*dc.APIImages, error) {
func findImage(d *schema.ResourceData, client *client.Client, authConfig *AuthConfigs) (*types.ImageSummary, error) {
var data Data
if err := fetchLocalImages(&data, client); err != nil {
return nil, err
}
//if err := fetchLocalImages(&data, client); err != nil {
// return nil, err
//} Is done in pullImage
imageName := d.Get("name").(string)
if imageName == "" {

View file

@ -1,12 +1,12 @@
package docker
import (
"context"
"fmt"
"os"
"regexp"
"testing"
dc "github.com/fsouza/go-dockerclient"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@ -56,7 +56,7 @@ func TestAccDockerImage_destroy(t *testing.T) {
}
client := testAccProvider.Meta().(*ProviderConfig).DockerClient
_, err := client.InspectImage(rs.Primary.Attributes["latest"])
_, _, err := client.ImageInspectWithRaw(context.Background(), rs.Primary.Attributes["latest"])
if err != nil {
return err
}
@ -132,11 +132,9 @@ func testAccDockerImageDestroy(s *terraform.State) error {
}
client := testAccProvider.Meta().(*ProviderConfig).DockerClient
_, err := client.InspectImage(rs.Primary.Attributes["latest"])
_, _, err := client.ImageInspectWithRaw(context.Background(), rs.Primary.Attributes["latest"])
if err == nil {
return fmt.Errorf("Image still exists")
} else if err != dc.ErrNoSuchImage {
return err
}
}
return nil

View file

@ -3,16 +3,20 @@ package docker
import (
"fmt"
dc "github.com/fsouza/go-dockerclient"
"context"
"encoding/json"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/network"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"log"
"time"
)
func resourceDockerNetworkCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
createOpts := dc.CreateNetworkOptions{
Name: d.Get("name").(string),
}
createOpts := types.NetworkCreate{}
if v, ok := d.GetOk("check_duplicate"); ok {
createOpts.CheckDuplicate = v.(bool)
}
@ -20,13 +24,13 @@ func resourceDockerNetworkCreate(d *schema.ResourceData, meta interface{}) error
createOpts.Driver = v.(string)
}
if v, ok := d.GetOk("options"); ok {
createOpts.Options = v.(map[string]interface{})
createOpts.Options = mapTypeMapValsToString(v.(map[string]interface{}))
}
if v, ok := d.GetOk("internal"); ok {
createOpts.Internal = v.(bool)
}
ipamOpts := &dc.IPAMOptions{}
ipamOpts := &network.IPAM{}
ipamOptsSet := false
if v, ok := d.GetOk("ipam_driver"); ok {
ipamOpts.Driver = v.(string)
@ -41,46 +45,34 @@ func resourceDockerNetworkCreate(d *schema.ResourceData, meta interface{}) error
createOpts.IPAM = ipamOpts
}
var err error
var retNetwork *dc.Network
if retNetwork, err = client.CreateNetwork(createOpts); err != nil {
retNetwork := types.NetworkCreateResponse{}
retNetwork, err := client.NetworkCreate(context.Background(), d.Get("name").(string), createOpts)
if err != nil {
return fmt.Errorf("Unable to create network: %s", err)
}
if retNetwork == nil {
return fmt.Errorf("Returned network is nil")
}
d.SetId(retNetwork.ID)
d.Set("name", retNetwork.Name)
d.Set("scope", retNetwork.Scope)
d.Set("driver", retNetwork.Driver)
d.Set("options", retNetwork.Options)
// The 'internal' property is not send back when create network
d.Set("internal", createOpts.Internal)
return nil
return resourceDockerNetworkRead(d, meta)
}
func resourceDockerNetworkRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
log.Printf("[INFO] Waiting for network: '%s' to expose all fields: max '%v seconds'", d.Id(), 30)
var err error
var retNetwork *dc.Network
if retNetwork, err = client.NetworkInfo(d.Id()); err != nil {
if _, ok := err.(*dc.NoSuchNetwork); !ok {
return fmt.Errorf("Unable to inspect network: %s", err)
}
}
if retNetwork == nil {
d.SetId("")
return nil
stateConf := &resource.StateChangeConf{
Pending: []string{"pending"},
Target: []string{"all_fields", "removed"},
Refresh: resourceDockerNetworkReadRefreshFunc(d, meta),
Timeout: 30 * time.Second,
MinTimeout: 5 * time.Second,
Delay: 2 * time.Second,
}
d.Set("scope", retNetwork.Scope)
d.Set("driver", retNetwork.Driver)
d.Set("options", retNetwork.Options)
d.Set("internal", retNetwork.Internal)
// Wait, catching any errors
_, err := stateConf.WaitForState()
if err != nil {
return err
}
return nil
}
@ -88,23 +80,21 @@ func resourceDockerNetworkRead(d *schema.ResourceData, meta interface{}) error {
func resourceDockerNetworkDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
if err := client.RemoveNetwork(d.Id()); err != nil {
if _, ok := err.(*dc.NoSuchNetwork); !ok {
return fmt.Errorf("Error deleting network %s: %s", d.Id(), err)
}
if err := client.NetworkRemove(context.Background(), d.Id()); err != nil {
return fmt.Errorf("Error deleting network %s: %s", d.Id(), err)
}
d.SetId("")
return nil
}
func ipamConfigSetToIpamConfigs(ipamConfigSet *schema.Set) []dc.IPAMConfig {
ipamConfigs := make([]dc.IPAMConfig, ipamConfigSet.Len())
func ipamConfigSetToIpamConfigs(ipamConfigSet *schema.Set) []network.IPAMConfig {
ipamConfigs := make([]network.IPAMConfig, ipamConfigSet.Len())
for i, ipamConfigInt := range ipamConfigSet.List() {
ipamConfigRaw := ipamConfigInt.(map[string]interface{})
ipamConfig := dc.IPAMConfig{}
ipamConfig := network.IPAMConfig{}
ipamConfig.Subnet = ipamConfigRaw["subnet"].(string)
ipamConfig.IPRange = ipamConfigRaw["ip_range"].(string)
ipamConfig.Gateway = ipamConfigRaw["gateway"].(string)
@ -120,3 +110,38 @@ func ipamConfigSetToIpamConfigs(ipamConfigSet *schema.Set) []dc.IPAMConfig {
return ipamConfigs
}
func resourceDockerNetworkReadRefreshFunc(
d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
client := meta.(*ProviderConfig).DockerClient
networkID := d.Id()
retNetwork, _, err := client.NetworkInspectWithRaw(context.Background(), networkID, types.NetworkInspectOptions{})
if err != nil {
log.Printf("[WARN] Network (%s) not found, removing from state", networkID)
d.SetId("")
return networkID, "removed", err
}
jsonObj, _ := json.MarshalIndent(retNetwork, "", "\t")
log.Printf("[DEBUG] Docker network inspect: %s", jsonObj)
d.Set("internal", retNetwork.Internal)
d.Set("driver", retNetwork.Driver)
d.Set("scope", retNetwork.Scope)
if retNetwork.Scope == "overlay" {
if retNetwork.Options != nil && len(retNetwork.Options) != 0 {
d.Set("options", retNetwork.Options)
} else {
log.Printf("[DEBUG] options: %v not exposed", retNetwork.Options)
return networkID, "pending", nil
}
} else {
d.Set("options", retNetwork.Options)
}
log.Println("[DEBUG] all network fields exposed")
return networkID, "all_fields", nil
}
}

View file

@ -4,13 +4,14 @@ import (
"fmt"
"testing"
dc "github.com/fsouza/go-dockerclient"
"context"
"github.com/docker/docker/api/types"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDockerNetwork_basic(t *testing.T) {
var n dc.Network
var n types.NetworkResource
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -26,7 +27,7 @@ func TestAccDockerNetwork_basic(t *testing.T) {
})
}
func testAccNetwork(n string, network *dc.Network) resource.TestCheckFunc {
func testAccNetwork(n string, network *types.NetworkResource) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
@ -38,18 +39,18 @@ func testAccNetwork(n string, network *dc.Network) resource.TestCheckFunc {
}
client := testAccProvider.Meta().(*ProviderConfig).DockerClient
networks, err := client.ListNetworks()
networks, err := client.NetworkList(context.Background(), types.NetworkListOptions{})
if err != nil {
return err
}
for _, n := range networks {
if n.ID == rs.Primary.ID {
inspected, err := client.NetworkInfo(n.ID)
inspected, err := client.NetworkInspect(context.Background(), n.ID, types.NetworkInspectOptions{})
if err != nil {
return fmt.Errorf("Network could not be obtained: %s", err)
}
*network = *inspected
*network = inspected
return nil
}
}
@ -65,7 +66,7 @@ resource "docker_network" "foo" {
`
func TestAccDockerNetwork_internal(t *testing.T) {
var n dc.Network
var n types.NetworkResource
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -82,7 +83,7 @@ func TestAccDockerNetwork_internal(t *testing.T) {
})
}
func testAccNetworkInternal(network *dc.Network, internal bool) resource.TestCheckFunc {
func testAccNetworkInternal(network *types.NetworkResource, internal bool) resource.TestCheckFunc {
return func(s *terraform.State) error {
if network.Internal != internal {
return fmt.Errorf("Bad value for attribute 'internal': %t", network.Internal)

View file

@ -4,8 +4,8 @@ import (
"encoding/base64"
"log"
"context"
"github.com/docker/docker/api/types/swarm"
dc "github.com/fsouza/go-dockerclient"
"github.com/hashicorp/terraform/helper/schema"
)
@ -39,16 +39,14 @@ func resourceDockerSecretCreate(d *schema.ResourceData, meta interface{}) error
client := meta.(*ProviderConfig).DockerClient
data, _ := base64.StdEncoding.DecodeString(d.Get("data").(string))
createSecretOpts := dc.CreateSecretOptions{
SecretSpec: swarm.SecretSpec{
Annotations: swarm.Annotations{
Name: d.Get("name").(string),
},
Data: data,
secretSpec := swarm.SecretSpec{
Annotations: swarm.Annotations{
Name: d.Get("name").(string),
},
Data: data,
}
secret, err := client.CreateSecret(createSecretOpts)
secret, err := client.SecretCreate(context.Background(), secretSpec)
if err != nil {
return err
}
@ -60,15 +58,12 @@ func resourceDockerSecretCreate(d *schema.ResourceData, meta interface{}) error
func resourceDockerSecretRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
secret, err := client.InspectSecret(d.Id())
secret, _, err := client.SecretInspectWithRaw(context.Background(), d.Id())
if err != nil {
if _, ok := err.(*dc.NoSuchSecret); ok {
log.Printf("[WARN] Secret (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
return err
log.Printf("[WARN] Secret (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
d.SetId(secret.ID)
return nil
@ -76,9 +71,7 @@ func resourceDockerSecretRead(d *schema.ResourceData, meta interface{}) error {
func resourceDockerSecretDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
err := client.RemoveSecret(dc.RemoveSecretOptions{
ID: d.Id(),
})
err := client.SecretRemove(context.Background(), d.Id())
if err != nil {
return err

View file

@ -4,6 +4,7 @@ import (
"fmt"
"testing"
"context"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@ -29,7 +30,7 @@ func TestAccDockerSecret_basic(t *testing.T) {
},
})
}
func TestAccDockerSecret_basicUpdateble(t *testing.T) {
func TestAccDockerSecret_basicUpdatable(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@ -82,9 +83,9 @@ func testCheckDockerSecretDestroy(s *terraform.State) error {
}
id := rs.Primary.Attributes["id"]
secret, err := client.InspectSecret(id)
_, _, err := client.SecretInspectWithRaw(context.Background(), id)
if err == nil || secret != nil {
if err == nil {
return fmt.Errorf("Secret with id '%s' still exists", id)
}
return nil

View file

@ -10,10 +10,13 @@ import (
"strings"
"time"
"encoding/base64"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/swarm"
dc "github.com/fsouza/go-dockerclient"
"github.com/docker/docker/client"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
@ -53,17 +56,20 @@ func resourceDockerServiceCreate(d *schema.ResourceData, meta interface{}) error
return err
}
createOpts := dc.CreateServiceOptions{
ServiceSpec: serviceSpec,
}
serviceOptions := types.ServiceCreateOptions{}
auth := types.AuthConfig{}
if v, ok := d.GetOk("auth"); ok {
createOpts.Auth = authToServiceAuth(v.(map[string]interface{}))
auth = authToServiceAuth(v.(map[string]interface{}))
} else {
createOpts.Auth = fromRegistryAuth(d.Get("task_spec.0.container_spec.0.image").(string), meta.(*ProviderConfig).AuthConfigs.Configs)
auth = fromRegistryAuth(d.Get("task_spec.0.container_spec.0.image").(string), meta.(*ProviderConfig).AuthConfigs.Configs)
}
encodedJSON, err := json.Marshal(auth)
if err != nil {
return fmt.Errorf("error creating auth config: %s", err)
}
serviceOptions.EncodedRegistryAuth = base64.URLEncoding.EncodeToString(encodedJSON)
service, err := client.CreateService(createOpts)
service, err := client.ServiceCreate(context.Background(), serviceSpec, serviceOptions)
if err != nil {
return err
}
@ -108,7 +114,7 @@ func resourceDockerServiceRead(d *schema.ResourceData, meta interface{}) error {
d.SetId("")
return nil
}
service, err := client.InspectService(apiService.ID)
service, _, err := client.ServiceInspectWithRaw(context.Background(), apiService.ID, types.ServiceInspectOptions{})
if err != nil {
return fmt.Errorf("Error inspecting service %s: %s", apiService.ID, err)
}
@ -142,7 +148,7 @@ func resourceDockerServiceRead(d *schema.ResourceData, meta interface{}) error {
func resourceDockerServiceUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
service, err := client.InspectService(d.Id())
service, _, err := client.ServiceInspectWithRaw(context.Background(), d.Id(), types.ServiceInspectOptions{})
if err != nil {
return err
}
@ -152,20 +158,26 @@ func resourceDockerServiceUpdate(d *schema.ResourceData, meta interface{}) error
return err
}
updateOpts := dc.UpdateServiceOptions{
ServiceSpec: serviceSpec,
Version: service.Version.Index,
}
updateOptions := types.ServiceUpdateOptions{}
auth := types.AuthConfig{}
if v, ok := d.GetOk("auth"); ok {
updateOpts.Auth = authToServiceAuth(v.(map[string]interface{}))
auth = authToServiceAuth(v.(map[string]interface{}))
} else {
updateOpts.Auth = fromRegistryAuth(d.Get("task_spec.0.container_spec.0.image").(string), meta.(*ProviderConfig).AuthConfigs.Configs)
auth = fromRegistryAuth(d.Get("task_spec.0.container_spec.0.image").(string), meta.(*ProviderConfig).AuthConfigs.Configs)
}
encodedJSON, err := json.Marshal(auth)
if err != nil {
return fmt.Errorf("error creating auth config: %s", err)
}
updateOptions.EncodedRegistryAuth = base64.URLEncoding.EncodeToString(encodedJSON)
if err = client.UpdateService(d.Id(), updateOpts); err != nil {
updateResponse, err := client.ServiceUpdate(context.Background(), d.Id(), service.Version, serviceSpec, updateOptions)
if err != nil {
return err
}
if len(updateResponse.Warnings) > 0 {
log.Printf("[INFO] Warninig while updating Service '%s': %v", service.ID, updateResponse.Warnings)
}
if v, ok := d.GetOk("converge_config"); ok {
convergeConfig := createConvergeConfig(v.([]interface{}))
@ -209,8 +221,8 @@ func resourceDockerServiceDelete(d *schema.ResourceData, meta interface{}) error
// Helpers
/////////////////
// fetchDockerService fetches a service by its name or id
func fetchDockerService(ID string, name string, client *dc.Client) (*swarm.Service, error) {
apiServices, err := client.ListServices(dc.ListServicesOptions{})
func fetchDockerService(ID string, name string, client *client.Client) (*swarm.Service, error) {
apiServices, err := client.ServiceList(context.Background(), types.ServiceListOptions{})
if err != nil {
return nil, fmt.Errorf("Error fetching service information from Docker: %s", err)
@ -226,39 +238,34 @@ func fetchDockerService(ID string, name string, client *dc.Client) (*swarm.Servi
}
// deleteService deletes the service with the given id
func deleteService(serviceID string, d *schema.ResourceData, client *dc.Client) error {
func deleteService(serviceID string, d *schema.ResourceData, client *client.Client) error {
// get containerIDs of the running service because they do not exist after the service is deleted
serviceContainerIds := make([]string, 0)
if _, ok := d.GetOk("task_spec.0.container_spec.0.stop_grace_period"); ok {
filter := make(map[string][]string)
filter["service"] = []string{d.Get("name").(string)}
tasks, err := client.ListTasks(dc.ListTasksOptions{
Filters: filter,
filters := filters.NewArgs()
filters.Add("service", d.Get("name").(string))
tasks, err := client.TaskList(context.Background(), types.TaskListOptions{
Filters: filters,
})
if err != nil {
return err
}
for _, t := range tasks {
task, _ := client.InspectTask(t.ID)
log.Printf("[INFO] Found container ['%s'] for destroying: '%s'", task.Status.State, task.Status.ContainerStatus.ContainerID)
if strings.TrimSpace(task.Status.ContainerStatus.ContainerID) != "" && task.Status.State != swarm.TaskStateShutdown {
serviceContainerIds = append(serviceContainerIds, task.Status.ContainerStatus.ContainerID)
task, _, _ := client.TaskInspectWithRaw(context.Background(), t.ID)
containerID := ""
if task.Status.ContainerStatus != nil {
containerID = task.Status.ContainerStatus.ContainerID
}
log.Printf("[INFO] Found container ['%s'] for destroying: '%s'", task.Status.State, containerID)
if strings.TrimSpace(containerID) != "" && task.Status.State != swarm.TaskStateShutdown {
serviceContainerIds = append(serviceContainerIds, containerID)
}
}
}
// delete the service
log.Printf("[INFO] Deleting service: '%s'", serviceID)
removeOpts := dc.RemoveServiceOptions{
ID: serviceID,
}
if err := client.RemoveService(removeOpts); err != nil {
if _, ok := err.(*dc.NoSuchService); ok {
log.Printf("[WARN] Service (%s) not found, removing from state", serviceID)
d.SetId("")
return nil
}
if err := client.ServiceRemove(context.Background(), serviceID); err != nil {
return fmt.Errorf("Error deleting service %s: %s", serviceID, err)
}
@ -269,17 +276,16 @@ func deleteService(serviceID string, d *schema.ResourceData, client *dc.Client)
log.Printf("[INFO] Waiting for container: '%s' to exit: max %v", containerID, destroyGraceSeconds)
ctx, cancel := context.WithTimeout(context.Background(), destroyGraceSeconds)
defer cancel()
exitCode, _ := client.WaitContainerWithContext(containerID, ctx)
exitCode, _ := client.ContainerWait(ctx, containerID, container.WaitConditionRemoved)
log.Printf("[INFO] Container exited with code [%v]: '%s'", exitCode, containerID)
removeOpts := dc.RemoveContainerOptions{
ID: containerID,
removeOpts := types.ContainerRemoveOptions{
RemoveVolumes: true,
Force: true,
}
log.Printf("[INFO] Removing container: '%s'", containerID)
if err := client.RemoveContainer(removeOpts); err != nil {
if err := client.ContainerRemove(context.Background(), containerID, removeOpts); err != nil {
if !(strings.Contains(err.Error(), "No such container") || strings.Contains(err.Error(), "is already in progress")) {
return fmt.Errorf("Error deleting container %s: %s", containerID, err)
}
@ -321,18 +327,17 @@ func resourceDockerServiceCreateRefreshFunc(
updater = &replicatedConsoleLogUpdater{}
}
filter := make(map[string][]string)
filter["service"] = []string{serviceID}
filter["desired-state"] = []string{"running"}
filters := filters.NewArgs()
filters.Add("service", serviceID)
filters.Add("desired-state", "running")
getUpToDateTasks := func() ([]swarm.Task, error) {
return client.ListTasks(dc.ListTasksOptions{
Filters: filter,
Context: ctx,
return client.TaskList(ctx, types.TaskListOptions{
Filters: filters,
})
}
var service *swarm.Service
service, err := client.InspectService(serviceID)
service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
if err != nil {
return nil, "", err
}
@ -347,7 +352,7 @@ func resourceDockerServiceCreateRefreshFunc(
return nil, "", err
}
serviceCreateStatus, err := updater.update(service, tasks, activeNodes, false)
serviceCreateStatus, err := updater.update(&service, tasks, activeNodes, false)
if err != nil {
return nil, "", err
}
@ -377,18 +382,17 @@ func resourceDockerServiceUpdateRefreshFunc(
}
rollback = false
filter := make(map[string][]string)
filter["service"] = []string{serviceID}
filter["desired-state"] = []string{"running"}
filters := filters.NewArgs()
filters.Add("service", serviceID)
filters.Add("desired-state", "running")
getUpToDateTasks := func() ([]swarm.Task, error) {
return client.ListTasks(dc.ListTasksOptions{
Filters: filter,
Context: ctx,
return client.TaskList(ctx, types.TaskListOptions{
Filters: filters,
})
}
var service *swarm.Service
service, err := client.InspectService(serviceID)
service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
if err != nil {
return nil, "", err
}
@ -421,7 +425,7 @@ func resourceDockerServiceUpdateRefreshFunc(
return nil, "", err
}
isUpdateCompleted, err := updater.update(service, tasks, activeNodes, rollback)
isUpdateCompleted, err := updater.update(&service, tasks, activeNodes, rollback)
if err != nil {
return nil, "", err
}
@ -438,8 +442,8 @@ func resourceDockerServiceUpdateRefreshFunc(
}
// getActiveNodes gets the actives nodes withon a swarm
func getActiveNodes(ctx context.Context, client *dc.Client) (map[string]struct{}, error) {
nodes, err := client.ListNodes(dc.ListNodesOptions{Context: ctx})
func getActiveNodes(ctx context.Context, client *client.Client) (map[string]struct{}, error) {
nodes, err := client.NodeList(ctx, types.NodeListOptions{})
if err != nil {
return nil, err
}
@ -1265,20 +1269,20 @@ func createConvergeConfig(config []interface{}) *convergeConfig {
}
// authToServiceAuth maps the auth to AuthConfiguration
func authToServiceAuth(auth map[string]interface{}) dc.AuthConfiguration {
func authToServiceAuth(auth map[string]interface{}) types.AuthConfig {
if auth["username"] != nil && len(auth["username"].(string)) > 0 && auth["password"] != nil && len(auth["password"].(string)) > 0 {
return dc.AuthConfiguration{
return types.AuthConfig{
Username: auth["username"].(string),
Password: auth["password"].(string),
ServerAddress: auth["server_address"].(string),
}
}
return dc.AuthConfiguration{}
return types.AuthConfig{}
}
// fromRegistryAuth extract the desired AuthConfiguration for the given image
func fromRegistryAuth(image string, configs map[string]dc.AuthConfiguration) dc.AuthConfiguration {
func fromRegistryAuth(image string, configs map[string]types.AuthConfig) types.AuthConfig {
// Remove normalized prefixes to simlify substring
image = strings.Replace(strings.Replace(image, "http://", "", 1), "https://", "", 1)
// Get the registry with optional port
@ -1291,7 +1295,7 @@ func fromRegistryAuth(image string, configs map[string]dc.AuthConfiguration) dc.
}
}
return dc.AuthConfiguration{}
return types.AuthConfig{}
}
// stringSetToPlacementPrefs maps a string set to PlacementPreference

View file

@ -6,7 +6,9 @@ import (
"regexp"
"testing"
dc "github.com/fsouza/go-dockerclient"
"context"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@ -16,8 +18,8 @@ import (
// ----------------------------------------
func TestDockerSecretFromRegistryAuth_basic(t *testing.T) {
authConfigs := make(map[string]dc.AuthConfiguration)
authConfigs["https://repo.my-company.com:8787"] = dc.AuthConfiguration{
authConfigs := make(map[string]types.AuthConfig)
authConfigs["https://repo.my-company.com:8787"] = types.AuthConfig{
Username: "myuser",
Password: "mypass",
Email: "",
@ -32,14 +34,14 @@ func TestDockerSecretFromRegistryAuth_basic(t *testing.T) {
}
func TestDockerSecretFromRegistryAuth_multiple(t *testing.T) {
authConfigs := make(map[string]dc.AuthConfiguration)
authConfigs["https://repo.my-company.com:8787"] = dc.AuthConfiguration{
authConfigs := make(map[string]types.AuthConfig)
authConfigs["https://repo.my-company.com:8787"] = types.AuthConfig{
Username: "myuser",
Password: "mypass",
Email: "",
ServerAddress: "repo.my-company.com:8787",
}
authConfigs["https://nexus.my-fancy-company.com"] = dc.AuthConfiguration{
authConfigs["https://nexus.my-fancy-company.com"] = types.AuthConfig{
Username: "myuser33",
Password: "mypass123",
Email: "test@example.com",
@ -98,6 +100,7 @@ func TestAccDockerService_minimal(t *testing.T) {
},
})
}
func TestAccDockerService_full(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -113,7 +116,7 @@ func TestAccDockerService_full(t *testing.T) {
name = "tftest-full-myconfig"
data = "ewogICJwcmVmaXgiOiAiMTIzIgp9"
}
resource "docker_secret" "service_secret" {
name = "tftest-mysecret"
data = "ewogICJrZXkiOiAiUVdFUlRZIgp9"
@ -126,27 +129,27 @@ func TestAccDockerService_full(t *testing.T) {
resource "docker_service" "foo" {
name = "tftest-service-basic"
task_spec {
container_spec {
image = "127.0.0.1:15000/tftest-service:v1"
labels {
foo = "bar"
}
command = ["ls"]
args = ["-las"]
hostname = "my-fancy-service"
env {
MYFOO = "BAR"
}
dir = "/root"
user = "root"
groups = ["docker", "foogroup"]
privileges {
se_linux_context {
disable = true
@ -156,9 +159,9 @@ func TestAccDockerService_full(t *testing.T) {
level = "level-label"
}
}
read_only = true
mounts = [
{
target = "/mount/test"
@ -178,28 +181,28 @@ func TestAccDockerService_full(t *testing.T) {
}
},
]
stop_signal = "SIGTERM"
stop_grace_period = "10s"
healthcheck {
test = ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval = "5s"
timeout = "2s"
retries = 4
}
hosts {
host = "testhost"
ip = "10.0.1.0"
}
dns_config {
nameservers = ["8.8.8.8"]
search = ["example.org"]
options = ["timeout:3"]
}
secrets = [
{
secret_id = "${docker_secret.service_secret.id}"
@ -207,7 +210,7 @@ func TestAccDockerService_full(t *testing.T) {
file_name = "/secrets.json"
},
]
configs = [
{
config_id = "${docker_config.service_config.id}"
@ -216,51 +219,51 @@ func TestAccDockerService_full(t *testing.T) {
},
]
}
resources {
limits {
nano_cpus = 1000000
memory_bytes = 536870912
}
}
restart_policy {
condition = "on-failure"
delay = "3s"
max_attempts = 4
window = "10s"
}
placement {
constraints = [
"node.role==manager",
]
prefs = [
"spread=node.role.manager",
]
}
force_update = 0
runtime = "container"
networks = ["${docker_network.test_network.id}"]
log_driver {
name = "json-file"
options {
max-size = "10m"
max-file = "3"
}
}
}
mode {
replicated {
replicas = 2
}
}
update_config {
parallelism = 2
delay = "10s"
@ -269,7 +272,7 @@ func TestAccDockerService_full(t *testing.T) {
max_failure_ratio = "0.1"
order = "start-first"
}
rollback_config {
parallelism = 2
delay = "5ms"
@ -278,10 +281,10 @@ func TestAccDockerService_full(t *testing.T) {
max_failure_ratio = "0.9"
order = "stop-first"
}
endpoint_spec {
mode = "vip"
ports {
name = "random"
protocol = "tcp"
@ -291,7 +294,7 @@ func TestAccDockerService_full(t *testing.T) {
}
}
}
`,
Check: resource.ComposeTestCheckFunc(
resource.TestMatchResourceAttr("docker_service.foo", "id", serviceIDRegex),
@ -338,12 +341,12 @@ func TestAccDockerService_full(t *testing.T) {
resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.container_spec.0.secrets.#", "1"),
resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.resources.0.limits.0.nano_cpus", "1000000"),
resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.resources.0.limits.0.memory_bytes", "536870912"),
// resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.restart_policy.condition", "on-failure"),
// resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.restart_policy.delay", "3s"),
// resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.restart_policy.max_attempts", "4"),
// resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.restart_policy.window", "10s"),
// resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.placement.0.constraints.4248571116", "node.role==manager"),
// resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.placement.0.prefs.1751004438", "spread=node.role.manager"),
resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.restart_policy.condition", "on-failure"),
resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.restart_policy.delay", "3s"),
resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.restart_policy.max_attempts", "4"),
resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.restart_policy.window", "10s"),
resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.placement.0.constraints.4248571116", "node.role==manager"),
resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.placement.0.prefs.1751004438", "spread=node.role.manager"),
resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.force_update", "0"),
resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.networks.#", "1"),
resource.TestCheckResourceAttr("docker_service.foo", "task_spec.0.log_driver.0.name", "json-file"),
@ -503,6 +506,7 @@ func TestAccDockerService_GlobalAndReplicated(t *testing.T) {
},
})
}
func TestAccDockerService_GlobalWithConvergeConfig(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -692,7 +696,7 @@ func TestAccDockerService_updateConfigReplicasImageAndHealthIncreaseAndDecreaseR
task_spec {
container_spec {
image = "127.0.0.1:15000/tftest-service:v1"
configs = [
{
config_id = "${docker_config.service_config.id}"
@ -700,7 +704,7 @@ func TestAccDockerService_updateConfigReplicasImageAndHealthIncreaseAndDecreaseR
file_name = "/configs.json"
},
]
healthcheck {
test = ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval = "1s"
@ -708,7 +712,7 @@ func TestAccDockerService_updateConfigReplicasImageAndHealthIncreaseAndDecreaseR
start_period = "0s"
retries = 2
}
stop_grace_period = "10s"
}
}
@ -822,7 +826,7 @@ func TestAccDockerService_updateConfigReplicasImageAndHealthIncreaseAndDecreaseR
target_port = "8080"
published_port = "8082"
}
]
]
}
}
`,
@ -914,7 +918,7 @@ func TestAccDockerService_updateConfigReplicasImageAndHealthIncreaseAndDecreaseR
target_port = "8080"
published_port = "8082"
}
]
]
}
}
`,
@ -961,7 +965,7 @@ func TestAccDockerService_nonExistingPrivateImageConverge(t *testing.T) {
container_spec = {
image = "127.0.0.1:15000/idonoexist:latest"
}
}
}
mode {
replicated {
@ -983,6 +987,7 @@ func TestAccDockerService_nonExistingPrivateImageConverge(t *testing.T) {
},
})
}
func TestAccDockerService_nonExistingPublicImageConverge(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -996,7 +1001,7 @@ func TestAccDockerService_nonExistingPublicImageConverge(t *testing.T) {
container_spec = {
image = "stovogel/blablabla:part5"
}
}
}
mode {
replicated {
@ -1070,6 +1075,7 @@ func TestAccDockerService_basicConvergeAndStopGracefully(t *testing.T) {
},
})
}
func TestAccDockerService_updateFailsAndRollbackConverge(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -1082,7 +1088,7 @@ func TestAccDockerService_updateFailsAndRollbackConverge(t *testing.T) {
task_spec {
container_spec {
image = "127.0.0.1:15000/tftest-service:v1"
healthcheck {
test = ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval = "5s"
@ -1212,6 +1218,7 @@ func TestAccDockerService_updateFailsAndRollbackConverge(t *testing.T) {
}
func TestAccDockerService_updateNetworksConverge(t *testing.T) {
// t.Skip("Skipped because response from daemon is not always consistent")
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@ -1242,8 +1249,7 @@ func TestAccDockerService_updateNetworksConverge(t *testing.T) {
replicas = 2
}
}
endpoint_spec {
mode = "vip"
@ -1253,8 +1259,8 @@ func TestAccDockerService_updateNetworksConverge(t *testing.T) {
}
converge_config {
delay = "7s"
timeout = "3m"
delay = "5s"
timeout = "60s"
}
}
@ -1303,8 +1309,8 @@ func TestAccDockerService_updateNetworksConverge(t *testing.T) {
}
converge_config {
delay = "7s"
timeout = "3m"
delay = "5s"
timeout = "60s"
}
}
`,
@ -1356,8 +1362,8 @@ func TestAccDockerService_updateNetworksConverge(t *testing.T) {
}
converge_config {
delay = "7s"
timeout = "3m"
delay = "5s"
timeout = "60s"
}
}
`,
@ -1372,6 +1378,7 @@ func TestAccDockerService_updateNetworksConverge(t *testing.T) {
},
})
}
func TestAccDockerService_updateMountsConverge(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -1383,10 +1390,6 @@ func TestAccDockerService_updateMountsConverge(t *testing.T) {
name = "tftest-volume"
}
resource "docker_volume" "foo2" {
name = "tftest-volume2"
}
resource "docker_service" "foo" {
name = "tftest-service-up-mounts"
task_spec {
@ -1415,10 +1418,9 @@ func TestAccDockerService_updateMountsConverge(t *testing.T) {
}
}
converge_config {
delay = "7s"
timeout = "3m"
delay = "5s"
timeout = "60s"
}
}
`,
@ -1482,8 +1484,8 @@ func TestAccDockerService_updateMountsConverge(t *testing.T) {
}
converge_config {
delay = "7s"
timeout = "3m"
delay = "5s"
timeout = "60s"
}
}
`,
@ -1498,6 +1500,7 @@ func TestAccDockerService_updateMountsConverge(t *testing.T) {
},
})
}
func TestAccDockerService_updateHostsConverge(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -1526,7 +1529,6 @@ func TestAccDockerService_updateHostsConverge(t *testing.T) {
}
}
converge_config {
delay = "7s"
timeout = "3m"
@ -1622,6 +1624,7 @@ func TestAccDockerService_updateHostsConverge(t *testing.T) {
},
})
}
func TestAccDockerService_updateLoggingConverge(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -1639,7 +1642,7 @@ func TestAccDockerService_updateLoggingConverge(t *testing.T) {
log_driver {
name = "json-file"
options {
max-size = "10m"
max-file = "3"
@ -1681,7 +1684,7 @@ func TestAccDockerService_updateLoggingConverge(t *testing.T) {
}
log_driver {
name = "json-file"
options {
max-size = "15m"
max-file = "5"
@ -1760,7 +1763,7 @@ func TestAccDockerService_updateHealthcheckConverge(t *testing.T) {
container_spec {
image = "127.0.0.1:15000/tftest-service:v1"
stop_grace_period = "10s"
healthcheck {
test = ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval = "1s"
@ -1775,7 +1778,7 @@ func TestAccDockerService_updateHealthcheckConverge(t *testing.T) {
replicas = 2
}
}
update_config {
parallelism = 1
delay = "1s"
@ -1843,7 +1846,7 @@ func TestAccDockerService_updateHealthcheckConverge(t *testing.T) {
replicas = 2
}
}
update_config {
parallelism = 1
delay = "1s"
@ -1859,7 +1862,7 @@ func TestAccDockerService_updateHealthcheckConverge(t *testing.T) {
published_port = "8080"
}
}
converge_config {
delay = "7s"
timeout = "3m"
@ -1922,7 +1925,7 @@ func TestAccDockerService_updateIncreaseReplicasConverge(t *testing.T) {
replicas = 1
}
}
update_config {
parallelism = 1
delay = "1s"
@ -1977,7 +1980,7 @@ func TestAccDockerService_updateIncreaseReplicasConverge(t *testing.T) {
container_spec {
image = "127.0.0.1:15000/tftest-service:v1"
stop_grace_period = "10s"
healthcheck {
test = ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval = "1s"
@ -1992,7 +1995,7 @@ func TestAccDockerService_updateIncreaseReplicasConverge(t *testing.T) {
replicas = 3
}
}
update_config {
parallelism = 1
delay = "1s"
@ -2008,7 +2011,7 @@ func TestAccDockerService_updateIncreaseReplicasConverge(t *testing.T) {
published_port = "8080"
}
}
converge_config {
delay = "7s"
timeout = "3m"
@ -2042,6 +2045,7 @@ func TestAccDockerService_updateIncreaseReplicasConverge(t *testing.T) {
},
})
}
func TestAccDockerService_updateDecreaseReplicasConverge(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -2055,7 +2059,7 @@ func TestAccDockerService_updateDecreaseReplicasConverge(t *testing.T) {
container_spec {
image = "127.0.0.1:15000/tftest-service:v1"
stop_grace_period = "10s"
healthcheck {
test = ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval = "1s"
@ -2070,7 +2074,7 @@ func TestAccDockerService_updateDecreaseReplicasConverge(t *testing.T) {
replicas = 5
}
}
update_config {
parallelism = 1
delay = "1s"
@ -2139,7 +2143,7 @@ func TestAccDockerService_updateDecreaseReplicasConverge(t *testing.T) {
replicas = 1
}
}
update_config {
parallelism = 1
delay = "1s"
@ -2155,7 +2159,7 @@ func TestAccDockerService_updateDecreaseReplicasConverge(t *testing.T) {
published_port = "8080"
}
}
converge_config {
delay = "7s"
timeout = "3m"
@ -2381,7 +2385,7 @@ func TestAccDockerService_updateConfigConverge(t *testing.T) {
replicas = 2
}
}
update_config {
parallelism = 1
delay = "1s"
@ -2397,7 +2401,7 @@ func TestAccDockerService_updateConfigConverge(t *testing.T) {
published_port = "8080"
}
}
converge_config {
delay = "7s"
timeout = "30s"
@ -2467,7 +2471,7 @@ func TestAccDockerService_updateConfigConverge(t *testing.T) {
replicas = 2
}
}
update_config {
parallelism = 1
delay = "1s"
@ -2483,7 +2487,7 @@ func TestAccDockerService_updateConfigConverge(t *testing.T) {
published_port = "8080"
}
}
converge_config {
delay = "7s"
timeout = "30s"
@ -2516,6 +2520,7 @@ func TestAccDockerService_updateConfigConverge(t *testing.T) {
},
})
}
func TestAccDockerService_updateConfigAndSecretConverge(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -2578,7 +2583,7 @@ func TestAccDockerService_updateConfigAndSecretConverge(t *testing.T) {
replicas = 2
}
}
update_config {
parallelism = 1
delay = "1s"
@ -2595,7 +2600,6 @@ func TestAccDockerService_updateConfigAndSecretConverge(t *testing.T) {
}
}
converge_config {
delay = "7s"
timeout = "3m"
@ -2684,7 +2688,7 @@ func TestAccDockerService_updateConfigAndSecretConverge(t *testing.T) {
replicas = 2
}
}
update_config {
parallelism = 1
delay = "1s"
@ -2701,7 +2705,6 @@ func TestAccDockerService_updateConfigAndSecretConverge(t *testing.T) {
}
}
converge_config {
delay = "7s"
timeout = "3m"
@ -2736,6 +2739,7 @@ func TestAccDockerService_updateConfigAndSecretConverge(t *testing.T) {
},
})
}
func TestAccDockerService_updatePortConverge(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -2852,7 +2856,7 @@ func TestAccDockerService_updatePortConverge(t *testing.T) {
target_port = "8080"
published_port = "8082"
}
]
]
}
converge_config {
@ -2889,6 +2893,7 @@ func TestAccDockerService_updatePortConverge(t *testing.T) {
},
})
}
func TestAccDockerService_updateConfigReplicasImageAndHealthConverge(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -3038,7 +3043,7 @@ func TestAccDockerService_updateConfigReplicasImageAndHealthConverge(t *testing.
target_port = "8080"
published_port = "8082"
}
]
]
}
converge_config {
@ -3075,6 +3080,7 @@ func TestAccDockerService_updateConfigReplicasImageAndHealthConverge(t *testing.
},
})
}
func TestAccDockerService_updateConfigAndDecreaseReplicasConverge(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -3119,7 +3125,7 @@ func TestAccDockerService_updateConfigAndDecreaseReplicasConverge(t *testing.T)
replicas = 5
}
}
update_config {
parallelism = 1
delay = "1s"
@ -3204,7 +3210,7 @@ func TestAccDockerService_updateConfigAndDecreaseReplicasConverge(t *testing.T)
replicas = 1
}
}
update_config {
parallelism = 1
delay = "1s"
@ -3253,6 +3259,7 @@ func TestAccDockerService_updateConfigAndDecreaseReplicasConverge(t *testing.T)
},
})
}
func TestAccDockerService_updateConfigReplicasImageAndHealthIncreaseAndDecreaseReplicasConverge(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -3402,7 +3409,7 @@ func TestAccDockerService_updateConfigReplicasImageAndHealthIncreaseAndDecreaseR
target_port = "8080"
published_port = "8082"
}
]
]
}
converge_config {
@ -3495,7 +3502,7 @@ func TestAccDockerService_updateConfigReplicasImageAndHealthIncreaseAndDecreaseR
target_port = "8080"
published_port = "8082"
}
]
]
}
converge_config {
@ -3579,10 +3586,10 @@ func TestAccDockerService_privateConverge(t *testing.T) {
func isServiceRemoved(serviceName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
client := testAccProvider.Meta().(*ProviderConfig).DockerClient
filter := make(map[string][]string)
filter["name"] = []string{serviceName}
services, err := client.ListServices(dc.ListServicesOptions{
Filters: filter,
filters := filters.NewArgs()
filters.Add("name", serviceName)
services, err := client.ServiceList(context.Background(), types.ServiceListOptions{
Filters: filters,
})
if err != nil {
return fmt.Errorf("Error listing service for name %s: %v", serviceName, err)

View file

@ -1,13 +1,15 @@
package docker
import (
"context"
"fmt"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/volume"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"log"
"strings"
"time"
dc "github.com/fsouza/go-dockerclient"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceDockerVolume() *schema.Resource {
@ -44,8 +46,10 @@ func resourceDockerVolume() *schema.Resource {
func resourceDockerVolumeCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
ctx := context.Background()
createOpts := volume.VolumeCreateBody{}
createOpts := dc.CreateVolumeOptions{}
if v, ok := d.GetOk("name"); ok {
createOpts.Name = v.(string)
}
@ -57,34 +61,32 @@ func resourceDockerVolumeCreate(d *schema.ResourceData, meta interface{}) error
}
var err error
var retVolume *dc.Volume
if retVolume, err = client.CreateVolume(createOpts); err != nil {
var retVolume types.Volume
retVolume, err = client.VolumeCreate(ctx, createOpts)
if err != nil {
return fmt.Errorf("Unable to create volume: %s", err)
}
if retVolume == nil {
return fmt.Errorf("Returned volume is nil")
}
d.SetId(retVolume.Name)
d.Set("name", retVolume.Name)
d.Set("driver", retVolume.Driver)
d.Set("mountpoint", retVolume.Mountpoint)
return nil
return resourceDockerVolumeRead(d, meta)
}
func resourceDockerVolumeRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
ctx := context.Background()
var err error
var retVolume *dc.Volume
if retVolume, err = client.InspectVolume(d.Id()); err != nil && err != dc.ErrNoSuchVolume {
var retVolume types.Volume
retVolume, err = client.VolumeInspect(ctx, d.Id())
if err != nil {
return fmt.Errorf("Unable to inspect volume: %s", err)
}
if retVolume == nil {
d.SetId("")
return nil
}
d.Set("name", retVolume.Name)
d.Set("driver", retVolume.Driver)
@ -94,35 +96,42 @@ func resourceDockerVolumeRead(d *schema.ResourceData, meta interface{}) error {
}
func resourceDockerVolumeDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ProviderConfig).DockerClient
log.Printf("[INFO] Waiting for volume: '%s' to get removed: max '%v seconds'", d.Id(), 30)
// TODO catch error if removal is already in progress + fix with statefunc
if err := client.RemoveVolume(d.Id()); err != nil && err != dc.ErrNoSuchVolume {
if err == dc.ErrVolumeInUse {
loops := 20
sleepTime := 1000 * time.Millisecond
for i := loops; i > 0; i-- {
if err = client.RemoveVolume(d.Id()); err != nil {
if err == dc.ErrVolumeInUse {
log.Printf("[INFO] Volume remove loop: %d of %d due to error: %s", loops-i+1, loops, err)
time.Sleep(sleepTime)
continue
}
if err == dc.ErrNoSuchVolume {
log.Printf("[INFO] Volume successfully removed")
d.SetId("")
return nil
}
if !strings.Contains(err.Error(), "is already in progress") {
// if it's not in use any more (so it's deleted successfully) and another error occurred
return fmt.Errorf("Error deleting volume %s: %s", d.Id(), err)
}
}
}
return fmt.Errorf("Error deleting volume %s: %s after %d tries", d.Id(), err, loops)
}
stateConf := &resource.StateChangeConf{
Pending: []string{"in_use"},
Target: []string{"removed"},
Refresh: resourceDockerVolumeRemoveRefreshFunc(d.Id(), meta),
Timeout: 30 * time.Second,
MinTimeout: 5 * time.Second,
Delay: 2 * time.Second,
}
// Wait, catching any errors
_, err := stateConf.WaitForState()
if err != nil {
return err
}
d.SetId("")
return nil
}
func resourceDockerVolumeRemoveRefreshFunc(
volumeID string, meta interface{}) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
client := meta.(*ProviderConfig).DockerClient
forceDelete := true
if err := client.VolumeRemove(context.Background(), volumeID, forceDelete); err != nil {
if strings.Contains(err.Error(), "volume is in use") { // store.IsInUse(err)
log.Printf("[INFO] Volume with id '%v' is still in use", volumeID)
return volumeID, "in_use", nil
}
log.Printf("[INFO] Removing volume with id '%v' caused an error: %v", volumeID, err)
return nil, "", err
}
log.Printf("[INFO] Removing volume with id '%v' got removed", volumeID)
return volumeID, "removed", nil
}
}

View file

@ -1,16 +1,16 @@
package docker
import (
"context"
"fmt"
"testing"
dc "github.com/fsouza/go-dockerclient"
"github.com/docker/docker/api/types"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"testing"
)
func TestAccDockerVolume_basic(t *testing.T) {
var v dc.Volume
var v types.Volume
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -28,7 +28,7 @@ func TestAccDockerVolume_basic(t *testing.T) {
})
}
func checkDockerVolume(n string, volume *dc.Volume) resource.TestCheckFunc {
func checkDockerVolume(n string, volume *types.Volume) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
@ -39,24 +39,16 @@ func checkDockerVolume(n string, volume *dc.Volume) resource.TestCheckFunc {
return fmt.Errorf("No ID is set")
}
ctx := context.Background()
client := testAccProvider.Meta().(*ProviderConfig).DockerClient
volumes, err := client.ListVolumes(dc.ListVolumesOptions{})
v, err := client.VolumeInspect(ctx, rs.Primary.ID)
if err != nil {
return err
}
for _, v := range volumes {
if v.Name == rs.Primary.ID {
inspected, err := client.InspectVolume(v.Name)
if err != nil {
return fmt.Errorf("Volume could not be inspected: %s", err)
}
*volume = *inspected
return nil
}
}
*volume = v
return fmt.Errorf("Volume not found: %s", rs.Primary.ID)
return nil
}
}

View file

@ -122,3 +122,13 @@ func validateStringIsBase64Encoded() schema.SchemaValidateFunc {
return
}
}
func validateDockerContainerPath(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[a-zA-Z]:\\|^/`).MatchString(value) {
errors = append(errors, fmt.Errorf("%q must be an absolute path", k))
}
return
}

30
scripts/compile.sh Executable file
View file

@ -0,0 +1,30 @@
#!/bin/bash
set -e
# Prerequisites
if ! command -v gox > /dev/null; then
go get -u github.com/mitchellh/gox
fi
# setup environment
PROVIDER_NAME="docker"
TARGET_DIR="$(pwd)/results"
XC_ARCH=${XC_ARCH:-"386 amd64 arm"}
XC_OS=${XC_OS:=linux darwin windows freebsd openbsd solaris}
XC_EXCLUDE_OSARCH="!darwin/arm !darwin/386 !solaris/amd64"
LD_FLAGS="-s -w"
export CGO_ENABLED=0
rm -rf "${TARGET_DIR}"
mkdir -p "${TARGET_DIR}"
# Compile
gox \
-os="${XC_OS}" \
-arch="${XC_ARCH}" \
-osarch="${XC_EXCLUDE_OSARCH}" \
-ldflags "${LD_FLAGS}" \
-output "$TARGET_DIR/{{.OS}}_{{.Arch}}/terraform-provider-${PROVIDER_NAME}_v0.0.0_x4" \
-verbose \
-rebuild \
.

202
vendor/github.com/docker/distribution/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

247
vendor/github.com/docker/distribution/digestset/set.go generated vendored Normal file
View file

@ -0,0 +1,247 @@
package digestset
import (
"errors"
"sort"
"strings"
"sync"
digest "github.com/opencontainers/go-digest"
)
var (
// ErrDigestNotFound is used when a matching digest
// could not be found in a set.
ErrDigestNotFound = errors.New("digest not found")
// ErrDigestAmbiguous is used when multiple digests
// are found in a set. None of the matching digests
// should be considered valid matches.
ErrDigestAmbiguous = errors.New("ambiguous digest string")
)
// Set is used to hold a unique set of digests which
// may be easily referenced by easily referenced by a string
// representation of the digest as well as short representation.
// The uniqueness of the short representation is based on other
// digests in the set. If digests are omitted from this set,
// collisions in a larger set may not be detected, therefore it
// is important to always do short representation lookups on
// the complete set of digests. To mitigate collisions, an
// appropriately long short code should be used.
type Set struct {
mutex sync.RWMutex
entries digestEntries
}
// NewSet creates an empty set of digests
// which may have digests added.
func NewSet() *Set {
return &Set{
entries: digestEntries{},
}
}
// checkShortMatch checks whether two digests match as either whole
// values or short values. This function does not test equality,
// rather whether the second value could match against the first
// value.
func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
if len(hex) == len(shortHex) {
if hex != shortHex {
return false
}
if len(shortAlg) > 0 && string(alg) != shortAlg {
return false
}
} else if !strings.HasPrefix(hex, shortHex) {
return false
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
return false
}
return true
}
// Lookup looks for a digest matching the given string representation.
// If no digests could be found ErrDigestNotFound will be returned
// with an empty digest value. If multiple matches are found
// ErrDigestAmbiguous will be returned with an empty digest value.
func (dst *Set) Lookup(d string) (digest.Digest, error) {
dst.mutex.RLock()
defer dst.mutex.RUnlock()
if len(dst.entries) == 0 {
return "", ErrDigestNotFound
}
var (
searchFunc func(int) bool
alg digest.Algorithm
hex string
)
dgst, err := digest.Parse(d)
if err == digest.ErrDigestInvalidFormat {
hex = d
searchFunc = func(i int) bool {
return dst.entries[i].val >= d
}
} else {
hex = dgst.Hex()
alg = dgst.Algorithm()
searchFunc = func(i int) bool {
if dst.entries[i].val == hex {
return dst.entries[i].alg >= alg
}
return dst.entries[i].val >= hex
}
}
idx := sort.Search(len(dst.entries), searchFunc)
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
return "", ErrDigestNotFound
}
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
return dst.entries[idx].digest, nil
}
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
return "", ErrDigestAmbiguous
}
return dst.entries[idx].digest, nil
}
// Add adds the given digest to the set. An error will be returned
// if the given digest is invalid. If the digest already exists in the
// set, this operation will be a no-op.
func (dst *Set) Add(d digest.Digest) error {
if err := d.Validate(); err != nil {
return err
}
dst.mutex.Lock()
defer dst.mutex.Unlock()
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
searchFunc := func(i int) bool {
if dst.entries[i].val == entry.val {
return dst.entries[i].alg >= entry.alg
}
return dst.entries[i].val >= entry.val
}
idx := sort.Search(len(dst.entries), searchFunc)
if idx == len(dst.entries) {
dst.entries = append(dst.entries, entry)
return nil
} else if dst.entries[idx].digest == d {
return nil
}
entries := append(dst.entries, nil)
copy(entries[idx+1:], entries[idx:len(entries)-1])
entries[idx] = entry
dst.entries = entries
return nil
}
// Remove removes the given digest from the set. An err will be
// returned if the given digest is invalid. If the digest does
// not exist in the set, this operation will be a no-op.
func (dst *Set) Remove(d digest.Digest) error {
if err := d.Validate(); err != nil {
return err
}
dst.mutex.Lock()
defer dst.mutex.Unlock()
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
searchFunc := func(i int) bool {
if dst.entries[i].val == entry.val {
return dst.entries[i].alg >= entry.alg
}
return dst.entries[i].val >= entry.val
}
idx := sort.Search(len(dst.entries), searchFunc)
// Not found if idx is after or value at idx is not digest
if idx == len(dst.entries) || dst.entries[idx].digest != d {
return nil
}
entries := dst.entries
copy(entries[idx:], entries[idx+1:])
entries = entries[:len(entries)-1]
dst.entries = entries
return nil
}
// All returns all the digests in the set
func (dst *Set) All() []digest.Digest {
dst.mutex.RLock()
defer dst.mutex.RUnlock()
retValues := make([]digest.Digest, len(dst.entries))
for i := range dst.entries {
retValues[i] = dst.entries[i].digest
}
return retValues
}
// ShortCodeTable returns a map of Digest to unique short codes. The
// length represents the minimum value, the maximum length may be the
// entire value of digest if uniqueness cannot be achieved without the
// full value. This function will attempt to make short codes as short
// as possible to be unique.
func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
dst.mutex.RLock()
defer dst.mutex.RUnlock()
m := make(map[digest.Digest]string, len(dst.entries))
l := length
resetIdx := 0
for i := 0; i < len(dst.entries); i++ {
var short string
extended := true
for extended {
extended = false
if len(dst.entries[i].val) <= l {
short = dst.entries[i].digest.String()
} else {
short = dst.entries[i].val[:l]
for j := i + 1; j < len(dst.entries); j++ {
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
if j > resetIdx {
resetIdx = j
}
extended = true
} else {
break
}
}
if extended {
l++
}
}
}
m[dst.entries[i].digest] = short
if i >= resetIdx {
l = length
}
}
return m
}
type digestEntry struct {
alg digest.Algorithm
val string
digest digest.Digest
}
type digestEntries []*digestEntry
func (d digestEntries) Len() int {
return len(d)
}
func (d digestEntries) Less(i, j int) bool {
if d[i].val != d[j].val {
return d[i].val < d[j].val
}
return d[i].alg < d[j].alg
}
func (d digestEntries) Swap(i, j int) {
d[i], d[j] = d[j], d[i]
}

View file

@ -0,0 +1,42 @@
package reference
import "path"
// IsNameOnly returns true if reference only contains a repo name.
func IsNameOnly(ref Named) bool {
if _, ok := ref.(NamedTagged); ok {
return false
}
if _, ok := ref.(Canonical); ok {
return false
}
return true
}
// FamiliarName returns the familiar name string
// for the given named, familiarizing if needed.
func FamiliarName(ref Named) string {
if nn, ok := ref.(normalizedNamed); ok {
return nn.Familiar().Name()
}
return ref.Name()
}
// FamiliarString returns the familiar string representation
// for the given reference, familiarizing if needed.
func FamiliarString(ref Reference) string {
if nn, ok := ref.(normalizedNamed); ok {
return nn.Familiar().String()
}
return ref.String()
}
// FamiliarMatch reports whether ref matches the specified pattern.
// See https://godoc.org/path#Match for supported patterns.
func FamiliarMatch(pattern string, ref Reference) (bool, error) {
matched, err := path.Match(pattern, FamiliarString(ref))
if namedRef, isNamed := ref.(Named); isNamed && !matched {
matched, _ = path.Match(pattern, FamiliarName(namedRef))
}
return matched, err
}

View file

@ -0,0 +1,170 @@
package reference
import (
"errors"
"fmt"
"strings"
"github.com/docker/distribution/digestset"
"github.com/opencontainers/go-digest"
)
var (
legacyDefaultDomain = "index.docker.io"
defaultDomain = "docker.io"
officialRepoName = "library"
defaultTag = "latest"
)
// normalizedNamed represents a name which has been
// normalized and has a familiar form. A familiar name
// is what is used in Docker UI. An example normalized
// name is "docker.io/library/ubuntu" and corresponding
// familiar name of "ubuntu".
type normalizedNamed interface {
Named
Familiar() Named
}
// ParseNormalizedNamed parses a string into a named reference
// transforming a familiar name from Docker UI to a fully
// qualified reference. If the value may be an identifier
// use ParseAnyReference.
func ParseNormalizedNamed(s string) (Named, error) {
if ok := anchoredIdentifierRegexp.MatchString(s); ok {
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
}
domain, remainder := splitDockerDomain(s)
var remoteName string
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
remoteName = remainder[:tagSep]
} else {
remoteName = remainder
}
if strings.ToLower(remoteName) != remoteName {
return nil, errors.New("invalid reference format: repository name must be lowercase")
}
ref, err := Parse(domain + "/" + remainder)
if err != nil {
return nil, err
}
named, isNamed := ref.(Named)
if !isNamed {
return nil, fmt.Errorf("reference %s has no name", ref.String())
}
return named, nil
}
// splitDockerDomain splits a repository name to domain and remotename string.
// If no valid domain is found, the default domain is used. Repository name
// needs to be already validated before.
func splitDockerDomain(name string) (domain, remainder string) {
i := strings.IndexRune(name, '/')
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
domain, remainder = defaultDomain, name
} else {
domain, remainder = name[:i], name[i+1:]
}
if domain == legacyDefaultDomain {
domain = defaultDomain
}
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
remainder = officialRepoName + "/" + remainder
}
return
}
// familiarizeName returns a shortened version of the name familiar
// to to the Docker UI. Familiar names have the default domain
// "docker.io" and "library/" repository prefix removed.
// For example, "docker.io/library/redis" will have the familiar
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
// Returns a familiarized named only reference.
func familiarizeName(named namedRepository) repository {
repo := repository{
domain: named.Domain(),
path: named.Path(),
}
if repo.domain == defaultDomain {
repo.domain = ""
// Handle official repositories which have the pattern "library/<official repo name>"
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
repo.path = split[1]
}
}
return repo
}
func (r reference) Familiar() Named {
return reference{
namedRepository: familiarizeName(r.namedRepository),
tag: r.tag,
digest: r.digest,
}
}
func (r repository) Familiar() Named {
return familiarizeName(r)
}
func (t taggedReference) Familiar() Named {
return taggedReference{
namedRepository: familiarizeName(t.namedRepository),
tag: t.tag,
}
}
func (c canonicalReference) Familiar() Named {
return canonicalReference{
namedRepository: familiarizeName(c.namedRepository),
digest: c.digest,
}
}
// TagNameOnly adds the default tag "latest" to a reference if it only has
// a repo name.
func TagNameOnly(ref Named) Named {
if IsNameOnly(ref) {
namedTagged, err := WithTag(ref, defaultTag)
if err != nil {
// Default tag must be valid, to create a NamedTagged
// type with non-validated input the WithTag function
// should be used instead
panic(err)
}
return namedTagged
}
return ref
}
// ParseAnyReference parses a reference string as a possible identifier,
// full digest, or familiar name.
func ParseAnyReference(ref string) (Reference, error) {
if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
return digestReference("sha256:" + ref), nil
}
if dgst, err := digest.Parse(ref); err == nil {
return digestReference(dgst), nil
}
return ParseNormalizedNamed(ref)
}
// ParseAnyReferenceWithSet parses a reference string as a possible short
// identifier to be matched in a digest set, a full digest, or familiar name.
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
dgst, err := ds.Lookup(ref)
if err == nil {
return digestReference(dgst), nil
}
} else {
if dgst, err := digest.Parse(ref); err == nil {
return digestReference(dgst), nil
}
}
return ParseNormalizedNamed(ref)
}

View file

@ -0,0 +1,433 @@
// Package reference provides a general type to represent any way of referencing images within the registry.
// Its main purpose is to abstract tags and digests (content-addressable hash).
//
// Grammar
//
// reference := name [ ":" tag ] [ "@" digest ]
// name := [domain '/'] path-component ['/' path-component]*
// domain := domain-component ['.' domain-component]* [':' port-number]
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/
// path-component := alpha-numeric [separator alpha-numeric]*
// alpha-numeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/
//
// tag := /[\w][\w.-]{0,127}/
//
// digest := digest-algorithm ":" digest-hex
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
// digest-algorithm-separator := /[+.-_]/
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
//
// identifier := /[a-f0-9]{64}/
// short-identifier := /[a-f0-9]{6,64}/
package reference
import (
"errors"
"fmt"
"strings"
"github.com/opencontainers/go-digest"
)
const (
// NameTotalLengthMax is the maximum total number of characters in a repository name.
NameTotalLengthMax = 255
)
var (
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
ErrReferenceInvalidFormat = errors.New("invalid reference format")
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
ErrTagInvalidFormat = errors.New("invalid tag format")
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
ErrDigestInvalidFormat = errors.New("invalid digest format")
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
ErrNameContainsUppercase = errors.New("repository name must be lowercase")
// ErrNameEmpty is returned for empty, invalid repository names.
ErrNameEmpty = errors.New("repository name must have at least one component")
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
// ErrNameNotCanonical is returned when a name is not canonical.
ErrNameNotCanonical = errors.New("repository name must be canonical")
)
// Reference is an opaque object reference identifier that may include
// modifiers such as a hostname, name, tag, and digest.
type Reference interface {
// String returns the full reference
String() string
}
// Field provides a wrapper type for resolving correct reference types when
// working with encoding.
type Field struct {
reference Reference
}
// AsField wraps a reference in a Field for encoding.
func AsField(reference Reference) Field {
return Field{reference}
}
// Reference unwraps the reference type from the field to
// return the Reference object. This object should be
// of the appropriate type to further check for different
// reference types.
func (f Field) Reference() Reference {
return f.reference
}
// MarshalText serializes the field to byte text which
// is the string of the reference.
func (f Field) MarshalText() (p []byte, err error) {
return []byte(f.reference.String()), nil
}
// UnmarshalText parses text bytes by invoking the
// reference parser to ensure the appropriately
// typed reference object is wrapped by field.
func (f *Field) UnmarshalText(p []byte) error {
r, err := Parse(string(p))
if err != nil {
return err
}
f.reference = r
return nil
}
// Named is an object with a full name
type Named interface {
Reference
Name() string
}
// Tagged is an object which has a tag
type Tagged interface {
Reference
Tag() string
}
// NamedTagged is an object including a name and tag.
type NamedTagged interface {
Named
Tag() string
}
// Digested is an object which has a digest
// in which it can be referenced by
type Digested interface {
Reference
Digest() digest.Digest
}
// Canonical reference is an object with a fully unique
// name including a name with domain and digest
type Canonical interface {
Named
Digest() digest.Digest
}
// namedRepository is a reference to a repository with a name.
// A namedRepository has both domain and path components.
type namedRepository interface {
Named
Domain() string
Path() string
}
// Domain returns the domain part of the Named reference
func Domain(named Named) string {
if r, ok := named.(namedRepository); ok {
return r.Domain()
}
domain, _ := splitDomain(named.Name())
return domain
}
// Path returns the name without the domain part of the Named reference
func Path(named Named) (name string) {
if r, ok := named.(namedRepository); ok {
return r.Path()
}
_, path := splitDomain(named.Name())
return path
}
func splitDomain(name string) (string, string) {
match := anchoredNameRegexp.FindStringSubmatch(name)
if len(match) != 3 {
return "", name
}
return match[1], match[2]
}
// SplitHostname splits a named reference into a
// hostname and name string. If no valid hostname is
// found, the hostname is empty and the full value
// is returned as name
// DEPRECATED: Use Domain or Path
func SplitHostname(named Named) (string, string) {
if r, ok := named.(namedRepository); ok {
return r.Domain(), r.Path()
}
return splitDomain(named.Name())
}
// Parse parses s and returns a syntactically valid Reference.
// If an error was encountered it is returned, along with a nil Reference.
// NOTE: Parse will not handle short digests.
func Parse(s string) (Reference, error) {
matches := ReferenceRegexp.FindStringSubmatch(s)
if matches == nil {
if s == "" {
return nil, ErrNameEmpty
}
if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
return nil, ErrNameContainsUppercase
}
return nil, ErrReferenceInvalidFormat
}
if len(matches[1]) > NameTotalLengthMax {
return nil, ErrNameTooLong
}
var repo repository
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
if nameMatch != nil && len(nameMatch) == 3 {
repo.domain = nameMatch[1]
repo.path = nameMatch[2]
} else {
repo.domain = ""
repo.path = matches[1]
}
ref := reference{
namedRepository: repo,
tag: matches[2],
}
if matches[3] != "" {
var err error
ref.digest, err = digest.Parse(matches[3])
if err != nil {
return nil, err
}
}
r := getBestReferenceType(ref)
if r == nil {
return nil, ErrNameEmpty
}
return r, nil
}
// ParseNamed parses s and returns a syntactically valid reference implementing
// the Named interface. The reference must have a name and be in the canonical
// form, otherwise an error is returned.
// If an error was encountered it is returned, along with a nil Reference.
// NOTE: ParseNamed will not handle short digests.
func ParseNamed(s string) (Named, error) {
named, err := ParseNormalizedNamed(s)
if err != nil {
return nil, err
}
if named.String() != s {
return nil, ErrNameNotCanonical
}
return named, nil
}
// WithName returns a named object representing the given string. If the input
// is invalid ErrReferenceInvalidFormat will be returned.
func WithName(name string) (Named, error) {
if len(name) > NameTotalLengthMax {
return nil, ErrNameTooLong
}
match := anchoredNameRegexp.FindStringSubmatch(name)
if match == nil || len(match) != 3 {
return nil, ErrReferenceInvalidFormat
}
return repository{
domain: match[1],
path: match[2],
}, nil
}
// WithTag combines the name from "name" and the tag from "tag" to form a
// reference incorporating both the name and the tag.
func WithTag(name Named, tag string) (NamedTagged, error) {
if !anchoredTagRegexp.MatchString(tag) {
return nil, ErrTagInvalidFormat
}
var repo repository
if r, ok := name.(namedRepository); ok {
repo.domain = r.Domain()
repo.path = r.Path()
} else {
repo.path = name.Name()
}
if canonical, ok := name.(Canonical); ok {
return reference{
namedRepository: repo,
tag: tag,
digest: canonical.Digest(),
}, nil
}
return taggedReference{
namedRepository: repo,
tag: tag,
}, nil
}
// WithDigest combines the name from "name" and the digest from "digest" to form
// a reference incorporating both the name and the digest.
func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
if !anchoredDigestRegexp.MatchString(digest.String()) {
return nil, ErrDigestInvalidFormat
}
var repo repository
if r, ok := name.(namedRepository); ok {
repo.domain = r.Domain()
repo.path = r.Path()
} else {
repo.path = name.Name()
}
if tagged, ok := name.(Tagged); ok {
return reference{
namedRepository: repo,
tag: tagged.Tag(),
digest: digest,
}, nil
}
return canonicalReference{
namedRepository: repo,
digest: digest,
}, nil
}
// TrimNamed removes any tag or digest from the named reference.
func TrimNamed(ref Named) Named {
domain, path := SplitHostname(ref)
return repository{
domain: domain,
path: path,
}
}
func getBestReferenceType(ref reference) Reference {
if ref.Name() == "" {
// Allow digest only references
if ref.digest != "" {
return digestReference(ref.digest)
}
return nil
}
if ref.tag == "" {
if ref.digest != "" {
return canonicalReference{
namedRepository: ref.namedRepository,
digest: ref.digest,
}
}
return ref.namedRepository
}
if ref.digest == "" {
return taggedReference{
namedRepository: ref.namedRepository,
tag: ref.tag,
}
}
return ref
}
type reference struct {
namedRepository
tag string
digest digest.Digest
}
func (r reference) String() string {
return r.Name() + ":" + r.tag + "@" + r.digest.String()
}
func (r reference) Tag() string {
return r.tag
}
func (r reference) Digest() digest.Digest {
return r.digest
}
type repository struct {
domain string
path string
}
func (r repository) String() string {
return r.Name()
}
func (r repository) Name() string {
if r.domain == "" {
return r.path
}
return r.domain + "/" + r.path
}
func (r repository) Domain() string {
return r.domain
}
func (r repository) Path() string {
return r.path
}
type digestReference digest.Digest
func (d digestReference) String() string {
return digest.Digest(d).String()
}
func (d digestReference) Digest() digest.Digest {
return digest.Digest(d)
}
type taggedReference struct {
namedRepository
tag string
}
func (t taggedReference) String() string {
return t.Name() + ":" + t.tag
}
func (t taggedReference) Tag() string {
return t.tag
}
type canonicalReference struct {
namedRepository
digest digest.Digest
}
func (c canonicalReference) String() string {
return c.Name() + "@" + c.digest.String()
}
func (c canonicalReference) Digest() digest.Digest {
return c.digest
}

View file

@ -0,0 +1,143 @@
package reference
import "regexp"
var (
// alphaNumericRegexp defines the alpha numeric atom, typically a
// component of names. This only allows lower case characters and digits.
alphaNumericRegexp = match(`[a-z0-9]+`)
// separatorRegexp defines the separators allowed to be embedded in name
// components. This allow one period, one or two underscore and multiple
// dashes.
separatorRegexp = match(`(?:[._]|__|[-]*)`)
// nameComponentRegexp restricts registry path component names to start
// with at least one letter or number, with following parts able to be
// separated by one period, one or two underscore and multiple dashes.
nameComponentRegexp = expression(
alphaNumericRegexp,
optional(repeated(separatorRegexp, alphaNumericRegexp)))
// domainComponentRegexp restricts the registry domain component of a
// repository name to start with a component as defined by DomainRegexp
// and followed by an optional port.
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
// DomainRegexp defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image
// names.
DomainRegexp = expression(
domainComponentRegexp,
optional(repeated(literal(`.`), domainComponentRegexp)),
optional(literal(`:`), match(`[0-9]+`)))
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
TagRegexp = match(`[\w][\w.-]{0,127}`)
// anchoredTagRegexp matches valid tag names, anchored at the start and
// end of the matched string.
anchoredTagRegexp = anchored(TagRegexp)
// DigestRegexp matches valid digests.
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
// anchoredDigestRegexp matches valid digests, anchored at the start and
// end of the matched string.
anchoredDigestRegexp = anchored(DigestRegexp)
// NameRegexp is the format for the name component of references. The
// regexp has capturing groups for the domain and name part omitting
// the separating forward slash from either.
NameRegexp = expression(
optional(DomainRegexp, literal(`/`)),
nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp)))
// anchoredNameRegexp is used to parse a name value, capturing the
// domain and trailing components.
anchoredNameRegexp = anchored(
optional(capture(DomainRegexp), literal(`/`)),
capture(nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp))))
// ReferenceRegexp is the full supported format of a reference. The regexp
// is anchored and has capturing groups for name, tag, and digest
// components.
ReferenceRegexp = anchored(capture(NameRegexp),
optional(literal(":"), capture(TagRegexp)),
optional(literal("@"), capture(DigestRegexp)))
// IdentifierRegexp is the format for string identifier used as a
// content addressable identifier using sha256. These identifiers
// are like digests without the algorithm, since sha256 is used.
IdentifierRegexp = match(`([a-f0-9]{64})`)
// ShortIdentifierRegexp is the format used to represent a prefix
// of an identifier. A prefix may be used to match a sha256 identifier
// within a list of trusted identifiers.
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
// anchoredIdentifierRegexp is used to check or match an
// identifier value, anchored at start and end of string.
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
// anchoredShortIdentifierRegexp is used to check if a value
// is a possible identifier prefix, anchored at start and end
// of string.
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
)
// match compiles the string to a regular expression.
var match = regexp.MustCompile
// literal compiles s into a literal regular expression, escaping any regexp
// reserved characters.
func literal(s string) *regexp.Regexp {
re := match(regexp.QuoteMeta(s))
if _, complete := re.LiteralPrefix(); !complete {
panic("must be a literal")
}
return re
}
// expression defines a full expression, where each regular expression must
// follow the previous.
func expression(res ...*regexp.Regexp) *regexp.Regexp {
var s string
for _, re := range res {
s += re.String()
}
return match(s)
}
// optional wraps the expression in a non-capturing group and makes the
// production optional.
func optional(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `?`)
}
// repeated wraps the regexp in a non-capturing group to get one or more
// matches.
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `+`)
}
// group wraps the regexp in a non-capturing group.
func group(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(?:` + expression(res...).String() + `)`)
}
// capture wraps the expression in a capturing group.
func capture(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(` + expression(res...).String() + `)`)
}
// anchored anchors the regular expression by adding start and end delimiters.
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
return match(`^` + expression(res...).String() + `$`)
}

42
vendor/github.com/docker/docker/api/README.md generated vendored Normal file
View file

@ -0,0 +1,42 @@
# Working on the Engine API
The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon.
It consists of various components in this repository:
- `api/swagger.yaml` A Swagger definition of the API.
- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this.
- `cli/` The command-line client.
- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs.
- `daemon/` The daemon, which serves the API.
## Swagger definition
The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
1. Automatically generate documentation.
2. Automatically generate the Go server and client. (A work-in-progress.)
3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc.
## Updating the API documentation
The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation.
The file is split into two main sections:
- `definitions`, which defines re-usable objects used in requests and responses
- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable)
To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section.
There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919).
`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing.
## Viewing the API documentation
When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io).

11
vendor/github.com/docker/docker/api/common.go generated vendored Normal file
View file

@ -0,0 +1,11 @@
package api // import "github.com/docker/docker/api"
// Common constants for daemon and client.
const (
// DefaultVersion of Current REST API
DefaultVersion = "1.38"
// NoBaseImageSpecifier is the symbol used by the FROM
// command to specify that no base image is to be used.
NoBaseImageSpecifier = "scratch"
)

6
vendor/github.com/docker/docker/api/common_unix.go generated vendored Normal file
View file

@ -0,0 +1,6 @@
// +build !windows
package api // import "github.com/docker/docker/api"
// MinVersion represents Minimum REST API version supported
const MinVersion = "1.12"

View file

@ -0,0 +1,8 @@
package api // import "github.com/docker/docker/api"
// MinVersion represents Minimum REST API version supported
// Technically the first daemon API version released on Windows is v1.25 in
// engine version 1.13. However, some clients are explicitly using downlevel
// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive.
// Hence also allowing 1.24 on Windows.
const MinVersion string = "1.24"

12
vendor/github.com/docker/docker/api/swagger-gen.yaml generated vendored Normal file
View file

@ -0,0 +1,12 @@
layout:
models:
- name: definition
source: asset:model
target: "{{ joinFilePath .Target .ModelPackage }}"
file_name: "{{ (snakize (pascalize .Name)) }}.go"
operations:
- name: handler
source: asset:serverOperation
target: "{{ joinFilePath .Target .APIPackage .Package }}"
file_name: "{{ (snakize (pascalize .Name)) }}.go"

10115
vendor/github.com/docker/docker/api/swagger.yaml generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
package types
package types // import "github.com/docker/docker/api/types"
// AuthConfig contains authorization information for connecting to a Registry
type AuthConfig struct {

View file

@ -1,4 +1,4 @@
package blkiodev
package blkiodev // import "github.com/docker/docker/api/types/blkiodev"
import "fmt"

View file

@ -1,4 +1,4 @@
package types
package types // import "github.com/docker/docker/api/types"
import (
"bufio"
@ -7,7 +7,7 @@ import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
units "github.com/docker/go-units"
"github.com/docker/go-units"
)
// CheckpointCreateOptions holds parameters to create a checkpoint from a container

View file

@ -1,4 +1,4 @@
package types
package types // import "github.com/docker/docker/api/types"
import (
"github.com/docker/docker/api/types/container"
@ -25,19 +25,6 @@ type ContainerRmConfig struct {
ForceRemove, RemoveVolume, RemoveLink bool
}
// ContainerCommitConfig contains build configs for commit operation,
// and is used when making a commit with the current state of the container.
type ContainerCommitConfig struct {
Pause bool
Repo string
Tag string
Author string
Comment string
// merge container config into commit config before commit
MergeConfigs bool
Config *container.Config
}
// ExecConfig is a small subset of the Config struct that holds the configuration
// for the exec feature of docker.
type ExecConfig struct {

View file

@ -1,4 +1,4 @@
package container
package container // import "github.com/docker/docker/api/types/container"
import (
"time"

View file

@ -1,4 +1,4 @@
package container
package container // import "github.com/docker/docker/api/types/container"
import (
"strings"

View file

@ -1,6 +1,6 @@
// +build !windows
package container
package container // import "github.com/docker/docker/api/types/container"
// IsValid indicates if an isolation technology is valid
func (i Isolation) IsValid() bool {

View file

@ -1,4 +1,4 @@
package container
package container // import "github.com/docker/docker/api/types/container"
// IsBridge indicates whether container uses the bridge network stack
// in windows it is given the name NAT

View file

@ -1,4 +1,4 @@
package container
package container // import "github.com/docker/docker/api/types/container"
// WaitCondition is a type used to specify a container state for which
// to wait.

View file

@ -0,0 +1,52 @@
package events // import "github.com/docker/docker/api/types/events"
const (
// ContainerEventType is the event type that containers generate
ContainerEventType = "container"
// DaemonEventType is the event type that daemon generate
DaemonEventType = "daemon"
// ImageEventType is the event type that images generate
ImageEventType = "image"
// NetworkEventType is the event type that networks generate
NetworkEventType = "network"
// PluginEventType is the event type that plugins generate
PluginEventType = "plugin"
// VolumeEventType is the event type that volumes generate
VolumeEventType = "volume"
// ServiceEventType is the event type that services generate
ServiceEventType = "service"
// NodeEventType is the event type that nodes generate
NodeEventType = "node"
// SecretEventType is the event type that secrets generate
SecretEventType = "secret"
// ConfigEventType is the event type that configs generate
ConfigEventType = "config"
)
// Actor describes something that generates events,
// like a container, or a network, or a volume.
// It has a defined name and a set or attributes.
// The container attributes are its labels, other actors
// can generate these attributes from other properties.
type Actor struct {
ID string
Attributes map[string]string
}
// Message represents the information an event contains
type Message struct {
// Deprecated information from JSONMessage.
// With data only in container events.
Status string `json:"status,omitempty"`
ID string `json:"id,omitempty"`
From string `json:"from,omitempty"`
Type string
Action string
Actor Actor
// Engine events are local scope. Cluster events are swarm scope.
Scope string `json:"scope,omitempty"`
Time int64 `json:"time,omitempty"`
TimeNano int64 `json:"timeNano,omitempty"`
}

View file

@ -1,7 +1,7 @@
/*Package filters provides tools for encoding a mapping of keys to a set of
multiple values.
*/
package filters
package filters // import "github.com/docker/docker/api/types/filters"
import (
"encoding/json"

View file

@ -0,0 +1,37 @@
package image
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// HistoryResponseItem individual image layer information in response to ImageHistory operation
// swagger:model HistoryResponseItem
type HistoryResponseItem struct {
// comment
// Required: true
Comment string `json:"Comment"`
// created
// Required: true
Created int64 `json:"Created"`
// created by
// Required: true
CreatedBy string `json:"CreatedBy"`
// Id
// Required: true
ID string `json:"Id"`
// size
// Required: true
Size int64 `json:"Size"`
// tags
// Required: true
Tags []string `json:"Tags"`
}

View file

@ -1,4 +1,4 @@
package mount
package mount // import "github.com/docker/docker/api/types/mount"
import (
"os"

View file

@ -1,4 +1,4 @@
package network
package network // import "github.com/docker/docker/api/types/network"
// Address represents an IP address
type Address struct {

View file

@ -1,4 +1,4 @@
package types
package types // import "github.com/docker/docker/api/types"
import (
"encoding/json"

View file

@ -7,7 +7,7 @@ package types
// swagger:model Port
type Port struct {
// IP
// Host IP address that the container's port is mapped to
IP string `json:"IP,omitempty"`
// Port on the container

View file

@ -1,4 +1,4 @@
package registry
package registry // import "github.com/docker/docker/api/types/registry"
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE

View file

@ -1,4 +1,4 @@
package registry
package registry // import "github.com/docker/docker/api/types/registry"
import (
"encoding/json"

View file

@ -1,4 +1,4 @@
package types
package types // import "github.com/docker/docker/api/types"
// Seccomp represents the config for a seccomp profile for syscall restriction.
type Seccomp struct {

View file

@ -1,6 +1,6 @@
// Package types is used for API stability in the types and response to the
// consumers of the API stats endpoint.
package types
package types // import "github.com/docker/docker/api/types"
import "time"

View file

@ -1,4 +1,4 @@
package strslice
package strslice // import "github.com/docker/docker/api/types/strslice"
import "encoding/json"

View file

@ -1,4 +1,4 @@
package swarm
package swarm // import "github.com/docker/docker/api/types/swarm"
import "time"

View file

@ -1,4 +1,4 @@
package swarm
package swarm // import "github.com/docker/docker/api/types/swarm"
import "os"
@ -13,6 +13,10 @@ type Config struct {
type ConfigSpec struct {
Annotations
Data []byte `json:",omitempty"`
// Templating controls whether and how to evaluate the config payload as
// a template. If it is not set, no templating is used.
Templating *Driver `json:",omitempty"`
}
// ConfigReferenceFileTarget is a file target in a config reference

View file

@ -1,4 +1,4 @@
package swarm
package swarm // import "github.com/docker/docker/api/types/swarm"
import (
"time"

View file

@ -1,4 +1,4 @@
package swarm
package swarm // import "github.com/docker/docker/api/types/swarm"
import (
"github.com/docker/docker/api/types/network"
@ -62,6 +62,8 @@ const (
PortConfigProtocolTCP PortConfigProtocol = "tcp"
// PortConfigProtocolUDP UDP
PortConfigProtocolUDP PortConfigProtocol = "udp"
// PortConfigProtocolSCTP SCTP
PortConfigProtocolSCTP PortConfigProtocol = "sctp"
)
// EndpointVirtualIP represents the virtual ip of a port.

View file

@ -1,4 +1,4 @@
package swarm
package swarm // import "github.com/docker/docker/api/types/swarm"
// Node represents a node.
type Node struct {

View file

@ -1,4 +1,4 @@
package swarm
package swarm // import "github.com/docker/docker/api/types/swarm"
// RuntimeType is the type of runtime used for the TaskSpec
type RuntimeType string
@ -11,9 +11,17 @@ const (
RuntimeContainer RuntimeType = "container"
// RuntimePlugin is the plugin based runtime
RuntimePlugin RuntimeType = "plugin"
// RuntimeNetworkAttachment is the network attachment runtime
RuntimeNetworkAttachment RuntimeType = "attachment"
// RuntimeURLContainer is the proto url for the container type
RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer"
// RuntimeURLPlugin is the proto url for the plugin type
RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin"
)
// NetworkAttachmentSpec represents the runtime spec type for network
// attachment tasks
type NetworkAttachmentSpec struct {
ContainerID string
}

View file

@ -1,3 +1,3 @@
//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto
package runtime
package runtime // import "github.com/docker/docker/api/types/swarm/runtime"

View file

@ -1,4 +1,4 @@
package swarm
package swarm // import "github.com/docker/docker/api/types/swarm"
import "os"
@ -14,6 +14,10 @@ type SecretSpec struct {
Annotations
Data []byte `json:",omitempty"`
Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store
// Templating controls whether and how to evaluate the secret payload as
// a template. If it is not set, no templating is used.
Templating *Driver `json:",omitempty"`
}
// SecretReferenceFileTarget is a file target in a secret reference

View file

@ -1,4 +1,4 @@
package swarm
package swarm // import "github.com/docker/docker/api/types/swarm"
import "time"

View file

@ -1,4 +1,4 @@
package swarm
package swarm // import "github.com/docker/docker/api/types/swarm"
import "time"

View file

@ -1,4 +1,4 @@
package swarm
package swarm // import "github.com/docker/docker/api/types/swarm"
import (
"time"
@ -36,6 +36,10 @@ const (
TaskStateFailed TaskState = "failed"
// TaskStateRejected REJECTED
TaskStateRejected TaskState = "rejected"
// TaskStateRemove REMOVE
TaskStateRemove TaskState = "remove"
// TaskStateOrphaned ORPHANED
TaskStateOrphaned TaskState = "orphaned"
)
// Task represents a task.
@ -56,10 +60,13 @@ type Task struct {
// TaskSpec represents the spec of a task.
type TaskSpec struct {
// ContainerSpec and PluginSpec are mutually exclusive.
// PluginSpec will only be used when the `Runtime` field is set to `plugin`
ContainerSpec *ContainerSpec `json:",omitempty"`
PluginSpec *runtime.PluginSpec `json:",omitempty"`
// ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive.
// PluginSpec is only used when the `Runtime` field is set to `plugin`
// NetworkAttachmentSpec is used if the `Runtime` field is set to
// `attachment`.
ContainerSpec *ContainerSpec `json:",omitempty"`
PluginSpec *runtime.PluginSpec `json:",omitempty"`
NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"`
Resources *ResourceRequirements `json:",omitempty"`
RestartPolicy *RestartPolicy `json:",omitempty"`
@ -162,19 +169,19 @@ const (
// TaskStatus represents the status of a task.
type TaskStatus struct {
Timestamp time.Time `json:",omitempty"`
State TaskState `json:",omitempty"`
Message string `json:",omitempty"`
Err string `json:",omitempty"`
ContainerStatus ContainerStatus `json:",omitempty"`
PortStatus PortStatus `json:",omitempty"`
Timestamp time.Time `json:",omitempty"`
State TaskState `json:",omitempty"`
Message string `json:",omitempty"`
Err string `json:",omitempty"`
ContainerStatus *ContainerStatus `json:",omitempty"`
PortStatus PortStatus `json:",omitempty"`
}
// ContainerStatus represents the status of a container.
type ContainerStatus struct {
ContainerID string `json:",omitempty"`
PID int `json:",omitempty"`
ExitCode int `json:",omitempty"`
ContainerID string
PID int
ExitCode int
}
// PortStatus represents the port status of a task's host ports whose

View file

@ -0,0 +1,12 @@
package time // import "github.com/docker/docker/api/types/time"
import (
"strconv"
"time"
)
// DurationToSecondsString converts the specified duration to the number
// seconds it represents, formatted as a string.
func DurationToSecondsString(duration time.Duration) string {
return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64)
}

View file

@ -0,0 +1,129 @@
package time // import "github.com/docker/docker/api/types/time"
import (
"fmt"
"math"
"strconv"
"strings"
"time"
)
// These are additional predefined layouts for use in Time.Format and Time.Parse
// with --since and --until parameters for `docker logs` and `docker events`
const (
rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
)
// GetTimestamp tries to parse given string as golang duration,
// then RFC3339 time and finally as a Unix timestamp. If
// any of these were successful, it returns a Unix timestamp
// as string otherwise returns the given value back.
// In case of duration input, the returned timestamp is computed
// as the given reference time minus the amount of the duration.
func GetTimestamp(value string, reference time.Time) (string, error) {
if d, err := time.ParseDuration(value); value != "0" && err == nil {
return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil
}
var format string
// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
if strings.Contains(value, ".") {
if parseInLocation {
format = rFC3339NanoLocal
} else {
format = time.RFC3339Nano
}
} else if strings.Contains(value, "T") {
// we want the number of colons in the T portion of the timestamp
tcolons := strings.Count(value, ":")
// if parseInLocation is off and we have a +/- zone offset (not Z) then
// there will be an extra colon in the input for the tz offset subtract that
// colon from the tcolons count
if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 {
tcolons--
}
if parseInLocation {
switch tcolons {
case 0:
format = "2006-01-02T15"
case 1:
format = "2006-01-02T15:04"
default:
format = rFC3339Local
}
} else {
switch tcolons {
case 0:
format = "2006-01-02T15Z07:00"
case 1:
format = "2006-01-02T15:04Z07:00"
default:
format = time.RFC3339
}
}
} else if parseInLocation {
format = dateLocal
} else {
format = dateWithZone
}
var t time.Time
var err error
if parseInLocation {
t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone()))
} else {
t, err = time.Parse(format, value)
}
if err != nil {
// if there is a `-` then it's an RFC3339 like timestamp
if strings.Contains(value, "-") {
return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
}
if _, _, err := parseTimestamp(value); err != nil {
return "", fmt.Errorf("failed to parse value as time or duration: %q", value)
}
return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
}
return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil
}
// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the
// format "%d.%09d", time.Unix(), int64(time.Nanosecond()))
// if the incoming nanosecond portion is longer or shorter than 9 digits it is
// converted to nanoseconds. The expectation is that the seconds and
// seconds will be used to create a time variable. For example:
// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
// if err == nil since := time.Unix(seconds, nanoseconds)
// returns seconds as def(aultSeconds) if value == ""
func ParseTimestamps(value string, def int64) (int64, int64, error) {
if value == "" {
return def, 0, nil
}
return parseTimestamp(value)
}
func parseTimestamp(value string) (int64, int64, error) {
sa := strings.SplitN(value, ".", 2)
s, err := strconv.ParseInt(sa[0], 10, 64)
if err != nil {
return s, 0, err
}
if len(sa) != 2 {
return s, 0, nil
}
n, err := strconv.ParseInt(sa[1], 10, 64)
if err != nil {
return s, n, err
}
// should already be in nanoseconds but just in case convert n to nanoseconds
n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1]))))
return s, n, nil
}

View file

@ -1,4 +1,4 @@
package types
package types // import "github.com/docker/docker/api/types"
import (
"errors"

View file

@ -1,4 +1,4 @@
package versions
package versions // import "github.com/docker/docker/api/types/versions"
import (
"strconv"

View file

@ -0,0 +1,29 @@
package volume
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// VolumeCreateBody
// swagger:model VolumeCreateBody
type VolumeCreateBody struct {
// Name of the volume driver to use.
// Required: true
Driver string `json:"Driver"`
// A mapping of driver options and values. These options are passed directly to the driver and are driver specific.
// Required: true
DriverOpts map[string]string `json:"DriverOpts"`
// User-defined key/value metadata.
// Required: true
Labels map[string]string `json:"Labels"`
// The new volume's name. If not specified, Docker generates a name.
// Required: true
Name string `json:"Name"`
}

View file

@ -0,0 +1,23 @@
package volume
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
import "github.com/docker/docker/api/types"
// VolumeListOKBody
// swagger:model VolumeListOKBody
type VolumeListOKBody struct {
// List of volumes
// Required: true
Volumes []*types.Volume `json:"Volumes"`
// Warnings that occurred when fetching the list of volumes
// Required: true
Warnings []string `json:"Warnings"`
}

35
vendor/github.com/docker/docker/client/README.md generated vendored Normal file
View file

@ -0,0 +1,35 @@
# Go client for the Docker Engine API
The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does  running containers, pulling images, managing swarms, etc.
For example, to list running containers (the equivalent of `docker ps`):
```go
package main
import (
"context"
"fmt"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
)
func main() {
cli, err := client.NewEnvClient()
if err != nil {
panic(err)
}
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
panic(err)
}
for _, container := range containers {
fmt.Printf("%s %s\n", container.ID[:10], container.Image)
}
}
```
[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client)

30
vendor/github.com/docker/docker/client/build_prune.go generated vendored Normal file
View file

@ -0,0 +1,30 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"encoding/json"
"fmt"
"github.com/docker/docker/api/types"
)
// BuildCachePrune requests the daemon to delete unused cache data
func (cli *Client) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) {
if err := cli.NewVersionError("1.31", "build prune"); err != nil {
return nil, err
}
report := types.BuildCachePruneReport{}
serverResp, err := cli.post(ctx, "/build/prune", nil, nil, nil)
if err != nil {
return nil, err
}
defer ensureReaderClosed(serverResp)
if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
return nil, fmt.Errorf("Error retrieving disk usage: %v", err)
}
return &report, nil
}

View file

@ -0,0 +1,14 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"github.com/docker/docker/api/types"
)
// CheckpointCreate creates a checkpoint from the given container with the given name
func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error {
resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil)
ensureReaderClosed(resp)
return err
}

View file

@ -0,0 +1,20 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"net/url"
"github.com/docker/docker/api/types"
)
// CheckpointDelete deletes the checkpoint with the given name from the given container
func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error {
query := url.Values{}
if options.CheckpointDir != "" {
query.Set("dir", options.CheckpointDir)
}
resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil)
ensureReaderClosed(resp)
return err
}

View file

@ -0,0 +1,28 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"encoding/json"
"net/url"
"github.com/docker/docker/api/types"
)
// CheckpointList returns the checkpoints of the given container in the docker host
func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) {
var checkpoints []types.Checkpoint
query := url.Values{}
if options.CheckpointDir != "" {
query.Set("dir", options.CheckpointDir)
}
resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil)
if err != nil {
return checkpoints, wrapResponseError(err, resp, "container", container)
}
err = json.NewDecoder(resp.body).Decode(&checkpoints)
ensureReaderClosed(resp)
return checkpoints, err
}

402
vendor/github.com/docker/docker/client/client.go generated vendored Normal file
View file

@ -0,0 +1,402 @@
/*
Package client is a Go client for the Docker Engine API.
For more information about the Engine API, see the documentation:
https://docs.docker.com/engine/reference/api/
Usage
You use the library by creating a client object and calling methods on it. The
client can be created either from environment variables with NewEnvClient, or
configured manually with NewClient.
For example, to list running containers (the equivalent of "docker ps"):
package main
import (
"context"
"fmt"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
)
func main() {
cli, err := client.NewEnvClient()
if err != nil {
panic(err)
}
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
panic(err)
}
for _, container := range containers {
fmt.Printf("%s %s\n", container.ID[:10], container.Image)
}
}
*/
package client // import "github.com/docker/docker/client"
import (
"context"
"fmt"
"net"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/versions"
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
"github.com/pkg/errors"
)
// ErrRedirect is the error returned by checkRedirect when the request is non-GET.
var ErrRedirect = errors.New("unexpected redirect in response")
// Client is the API client that performs all operations
// against a docker server.
type Client struct {
// scheme sets the scheme for the client
scheme string
// host holds the server address to connect to
host string
// proto holds the client protocol i.e. unix.
proto string
// addr holds the client address.
addr string
// basePath holds the path to prepend to the requests.
basePath string
// client used to send and receive http requests.
client *http.Client
// version of the server to talk to.
version string
// custom http headers configured by users.
customHTTPHeaders map[string]string
// manualOverride is set to true when the version was set by users.
manualOverride bool
}
// CheckRedirect specifies the policy for dealing with redirect responses:
// If the request is non-GET return `ErrRedirect`. Otherwise use the last response.
//
// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client .
// The Docker client (and by extension docker API client) can be made to to send a request
// like POST /containers//start where what would normally be in the name section of the URL is empty.
// This triggers an HTTP 301 from the daemon.
// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon.
// This behavior change manifests in the client in that before the 301 was not followed and
// the client did not generate an error, but now results in a message like Error response from daemon: page not found.
func CheckRedirect(req *http.Request, via []*http.Request) error {
if via[0].Method == http.MethodGet {
return http.ErrUseLastResponse
}
return ErrRedirect
}
// NewEnvClient initializes a new API client based on environment variables.
// See FromEnv for a list of support environment variables.
//
// Deprecated: use NewClientWithOpts(FromEnv)
func NewEnvClient() (*Client, error) {
return NewClientWithOpts(FromEnv)
}
// FromEnv configures the client with values from environment variables.
//
// Supported environment variables:
// DOCKER_HOST to set the url to the docker server.
// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest.
// DOCKER_CERT_PATH to load the TLS certificates from.
// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
func FromEnv(c *Client) error {
if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" {
options := tlsconfig.Options{
CAFile: filepath.Join(dockerCertPath, "ca.pem"),
CertFile: filepath.Join(dockerCertPath, "cert.pem"),
KeyFile: filepath.Join(dockerCertPath, "key.pem"),
InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "",
}
tlsc, err := tlsconfig.Client(options)
if err != nil {
return err
}
c.client = &http.Client{
Transport: &http.Transport{TLSClientConfig: tlsc},
CheckRedirect: CheckRedirect,
}
}
if host := os.Getenv("DOCKER_HOST"); host != "" {
if err := WithHost(host)(c); err != nil {
return err
}
}
if version := os.Getenv("DOCKER_API_VERSION"); version != "" {
c.version = version
c.manualOverride = true
}
return nil
}
// WithTLSClientConfig applies a tls config to the client transport.
func WithTLSClientConfig(cacertPath, certPath, keyPath string) func(*Client) error {
return func(c *Client) error {
opts := tlsconfig.Options{
CAFile: cacertPath,
CertFile: certPath,
KeyFile: keyPath,
ExclusiveRootPools: true,
}
config, err := tlsconfig.Client(opts)
if err != nil {
return errors.Wrap(err, "failed to create tls config")
}
if transport, ok := c.client.Transport.(*http.Transport); ok {
transport.TLSClientConfig = config
return nil
}
return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport)
}
}
// WithDialer applies the dialer.DialContext to the client transport. This can be
// used to set the Timeout and KeepAlive settings of the client.
func WithDialer(dialer *net.Dialer) func(*Client) error {
return func(c *Client) error {
if transport, ok := c.client.Transport.(*http.Transport); ok {
transport.DialContext = dialer.DialContext
return nil
}
return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport)
}
}
// WithVersion overrides the client version with the specified one
func WithVersion(version string) func(*Client) error {
return func(c *Client) error {
c.version = version
return nil
}
}
// WithHost overrides the client host with the specified one.
func WithHost(host string) func(*Client) error {
return func(c *Client) error {
hostURL, err := ParseHostURL(host)
if err != nil {
return err
}
c.host = host
c.proto = hostURL.Scheme
c.addr = hostURL.Host
c.basePath = hostURL.Path
if transport, ok := c.client.Transport.(*http.Transport); ok {
return sockets.ConfigureTransport(transport, c.proto, c.addr)
}
return errors.Errorf("cannot apply host to transport: %T", c.client.Transport)
}
}
// WithHTTPClient overrides the client http client with the specified one
func WithHTTPClient(client *http.Client) func(*Client) error {
return func(c *Client) error {
if client != nil {
c.client = client
}
return nil
}
}
// WithHTTPHeaders overrides the client default http headers
func WithHTTPHeaders(headers map[string]string) func(*Client) error {
return func(c *Client) error {
c.customHTTPHeaders = headers
return nil
}
}
// NewClientWithOpts initializes a new API client with default values. It takes functors
// to modify values when creating it, like `NewClientWithOpts(WithVersion(…))`
// It also initializes the custom http headers to add to each request.
//
// It won't send any version information if the version number is empty. It is
// highly recommended that you set a version or your client may break if the
// server is upgraded.
func NewClientWithOpts(ops ...func(*Client) error) (*Client, error) {
client, err := defaultHTTPClient(DefaultDockerHost)
if err != nil {
return nil, err
}
c := &Client{
host: DefaultDockerHost,
version: api.DefaultVersion,
scheme: "http",
client: client,
proto: defaultProto,
addr: defaultAddr,
}
for _, op := range ops {
if err := op(c); err != nil {
return nil, err
}
}
if _, ok := c.client.Transport.(http.RoundTripper); !ok {
return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", c.client.Transport)
}
tlsConfig := resolveTLSConfig(c.client.Transport)
if tlsConfig != nil {
// TODO(stevvooe): This isn't really the right way to write clients in Go.
// `NewClient` should probably only take an `*http.Client` and work from there.
// Unfortunately, the model of having a host-ish/url-thingy as the connection
// string has us confusing protocol and transport layers. We continue doing
// this to avoid breaking existing clients but this should be addressed.
c.scheme = "https"
}
return c, nil
}
func defaultHTTPClient(host string) (*http.Client, error) {
url, err := ParseHostURL(host)
if err != nil {
return nil, err
}
transport := new(http.Transport)
sockets.ConfigureTransport(transport, url.Scheme, url.Host)
return &http.Client{
Transport: transport,
CheckRedirect: CheckRedirect,
}, nil
}
// NewClient initializes a new API client for the given host and API version.
// It uses the given http client as transport.
// It also initializes the custom http headers to add to each request.
//
// It won't send any version information if the version number is empty. It is
// highly recommended that you set a version or your client may break if the
// server is upgraded.
// Deprecated: use NewClientWithOpts
func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders))
}
// Close the transport used by the client
func (cli *Client) Close() error {
if t, ok := cli.client.Transport.(*http.Transport); ok {
t.CloseIdleConnections()
}
return nil
}
// getAPIPath returns the versioned request path to call the api.
// It appends the query parameters to the path if they are not empty.
func (cli *Client) getAPIPath(p string, query url.Values) string {
var apiPath string
if cli.version != "" {
v := strings.TrimPrefix(cli.version, "v")
apiPath = path.Join(cli.basePath, "/v"+v, p)
} else {
apiPath = path.Join(cli.basePath, p)
}
return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String()
}
// ClientVersion returns the API version used by this client.
func (cli *Client) ClientVersion() string {
return cli.version
}
// NegotiateAPIVersion queries the API and updates the version to match the
// API version. Any errors are silently ignored.
func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
ping, _ := cli.Ping(ctx)
cli.NegotiateAPIVersionPing(ping)
}
// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion
// if the ping version is less than the default version.
func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
if cli.manualOverride {
return
}
// try the latest version before versioning headers existed
if p.APIVersion == "" {
p.APIVersion = "1.24"
}
// if the client is not initialized with a version, start with the latest supported version
if cli.version == "" {
cli.version = api.DefaultVersion
}
// if server version is lower than the client version, downgrade
if versions.LessThan(p.APIVersion, cli.version) {
cli.version = p.APIVersion
}
}
// DaemonHost returns the host address used by the client
func (cli *Client) DaemonHost() string {
return cli.host
}
// HTTPClient returns a copy of the HTTP client bound to the server
func (cli *Client) HTTPClient() *http.Client {
return &*cli.client
}
// ParseHostURL parses a url string, validates the string is a host url, and
// returns the parsed URL
func ParseHostURL(host string) (*url.URL, error) {
protoAddrParts := strings.SplitN(host, "://", 2)
if len(protoAddrParts) == 1 {
return nil, fmt.Errorf("unable to parse docker host `%s`", host)
}
var basePath string
proto, addr := protoAddrParts[0], protoAddrParts[1]
if proto == "tcp" {
parsed, err := url.Parse("tcp://" + addr)
if err != nil {
return nil, err
}
addr = parsed.Host
basePath = parsed.Path
}
return &url.URL{
Scheme: proto,
Host: addr,
Path: basePath,
}, nil
}
// CustomHTTPHeaders returns the custom http headers stored by the client.
func (cli *Client) CustomHTTPHeaders() map[string]string {
m := make(map[string]string)
for k, v := range cli.customHTTPHeaders {
m[k] = v
}
return m
}
// SetCustomHTTPHeaders that will be set on every HTTP request made by the client.
// Deprecated: use WithHTTPHeaders when creating the client.
func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) {
cli.customHTTPHeaders = headers
}

View file

@ -0,0 +1,9 @@
// +build linux freebsd openbsd darwin
package client // import "github.com/docker/docker/client"
// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
const DefaultDockerHost = "unix:///var/run/docker.sock"
const defaultProto = "unix"
const defaultAddr = "/var/run/docker.sock"

View file

@ -0,0 +1,7 @@
package client // import "github.com/docker/docker/client"
// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
const DefaultDockerHost = "npipe:////./pipe/docker_engine"
const defaultProto = "npipe"
const defaultAddr = "//./pipe/docker_engine"

View file

@ -0,0 +1,25 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"encoding/json"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
)
// ConfigCreate creates a new Config.
func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
var response types.ConfigCreateResponse
if err := cli.NewVersionError("1.30", "config create"); err != nil {
return response, err
}
resp, err := cli.post(ctx, "/configs/create", nil, config, nil)
if err != nil {
return response, err
}
err = json.NewDecoder(resp.body).Decode(&response)
ensureReaderClosed(resp)
return response, err
}

View file

@ -0,0 +1,36 @@
package client // import "github.com/docker/docker/client"
import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"github.com/docker/docker/api/types/swarm"
)
// ConfigInspectWithRaw returns the config information with raw data
func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) {
if id == "" {
return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id}
}
if err := cli.NewVersionError("1.30", "config inspect"); err != nil {
return swarm.Config{}, nil, err
}
resp, err := cli.get(ctx, "/configs/"+id, nil, nil)
if err != nil {
return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id)
}
defer ensureReaderClosed(resp)
body, err := ioutil.ReadAll(resp.body)
if err != nil {
return swarm.Config{}, nil, err
}
var config swarm.Config
rdr := bytes.NewReader(body)
err = json.NewDecoder(rdr).Decode(&config)
return config, body, err
}

38
vendor/github.com/docker/docker/client/config_list.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"encoding/json"
"net/url"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
)
// ConfigList returns the list of configs.
func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) {
if err := cli.NewVersionError("1.30", "config list"); err != nil {
return nil, err
}
query := url.Values{}
if options.Filters.Len() > 0 {
filterJSON, err := filters.ToJSON(options.Filters)
if err != nil {
return nil, err
}
query.Set("filters", filterJSON)
}
resp, err := cli.get(ctx, "/configs", query, nil)
if err != nil {
return nil, err
}
var configs []swarm.Config
err = json.NewDecoder(resp.body).Decode(&configs)
ensureReaderClosed(resp)
return configs, err
}

View file

@ -0,0 +1,13 @@
package client // import "github.com/docker/docker/client"
import "context"
// ConfigRemove removes a Config.
func (cli *Client) ConfigRemove(ctx context.Context, id string) error {
if err := cli.NewVersionError("1.30", "config remove"); err != nil {
return err
}
resp, err := cli.delete(ctx, "/configs/"+id, nil, nil)
ensureReaderClosed(resp)
return wrapResponseError(err, resp, "config", id)
}

View file

@ -0,0 +1,21 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"net/url"
"strconv"
"github.com/docker/docker/api/types/swarm"
)
// ConfigUpdate attempts to update a Config
func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error {
if err := cli.NewVersionError("1.30", "config update"); err != nil {
return err
}
query := url.Values{}
query.Set("version", strconv.FormatUint(version.Index, 10))
resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil)
ensureReaderClosed(resp)
return err
}

View file

@ -0,0 +1,57 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"net/url"
"github.com/docker/docker/api/types"
)
// ContainerAttach attaches a connection to a container in the server.
// It returns a types.HijackedConnection with the hijacked connection
// and the a reader to get output. It's up to the called to close
// the hijacked connection by calling types.HijackedResponse.Close.
//
// The stream format on the response will be in one of two formats:
//
// If the container is using a TTY, there is only a single stream (stdout), and
// data is copied directly from the container output stream, no extra
// multiplexing or headers.
//
// If the container is *not* using a TTY, streams for stdout and stderr are
// multiplexed.
// The format of the multiplexed stream is as follows:
//
// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
//
// STREAM_TYPE can be 1 for stdout and 2 for stderr
//
// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian.
// This is the size of OUTPUT.
//
// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
// stream.
func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) {
query := url.Values{}
if options.Stream {
query.Set("stream", "1")
}
if options.Stdin {
query.Set("stdin", "1")
}
if options.Stdout {
query.Set("stdout", "1")
}
if options.Stderr {
query.Set("stderr", "1")
}
if options.DetachKeys != "" {
query.Set("detachKeys", options.DetachKeys)
}
if options.Logs {
query.Set("logs", "1")
}
headers := map[string][]string{"Content-Type": {"text/plain"}}
return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers)
}

View file

@ -0,0 +1,55 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"encoding/json"
"errors"
"net/url"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
)
// ContainerCommit applies changes into a container and creates a new tagged image.
func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) {
var repository, tag string
if options.Reference != "" {
ref, err := reference.ParseNormalizedNamed(options.Reference)
if err != nil {
return types.IDResponse{}, err
}
if _, isCanonical := ref.(reference.Canonical); isCanonical {
return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference")
}
ref = reference.TagNameOnly(ref)
if tagged, ok := ref.(reference.Tagged); ok {
tag = tagged.Tag()
}
repository = reference.FamiliarName(ref)
}
query := url.Values{}
query.Set("container", container)
query.Set("repo", repository)
query.Set("tag", tag)
query.Set("comment", options.Comment)
query.Set("author", options.Author)
for _, change := range options.Changes {
query.Add("changes", change)
}
if !options.Pause {
query.Set("pause", "0")
}
var response types.IDResponse
resp, err := cli.post(ctx, "/commit", query, options.Config, nil)
if err != nil {
return response, err
}
err = json.NewDecoder(resp.body).Decode(&response)
ensureReaderClosed(resp)
return response, err
}

View file

@ -0,0 +1,101 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"path/filepath"
"strings"
"github.com/docker/docker/api/types"
)
// ContainerStatPath returns Stat information about a path inside the container filesystem.
func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) {
query := url.Values{}
query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
urlStr := "/containers/" + containerID + "/archive"
response, err := cli.head(ctx, urlStr, query, nil)
if err != nil {
return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path)
}
defer ensureReaderClosed(response)
return getContainerPathStatFromHeader(response.header)
}
// CopyToContainer copies content into the container filesystem.
// Note that `content` must be a Reader for a TAR archive
func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error {
query := url.Values{}
query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API.
// Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
if !options.AllowOverwriteDirWithFile {
query.Set("noOverwriteDirNonDir", "true")
}
if options.CopyUIDGID {
query.Set("copyUIDGID", "true")
}
apiPath := "/containers/" + containerID + "/archive"
response, err := cli.putRaw(ctx, apiPath, query, content, nil)
if err != nil {
return wrapResponseError(err, response, "container:path", containerID+":"+dstPath)
}
defer ensureReaderClosed(response)
if response.statusCode != http.StatusOK {
return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
}
return nil
}
// CopyFromContainer gets the content from the container and returns it as a Reader
// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader.
func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) {
query := make(url.Values, 1)
query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
apiPath := "/containers/" + containerID + "/archive"
response, err := cli.get(ctx, apiPath, query, nil)
if err != nil {
return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath)
}
if response.statusCode != http.StatusOK {
return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
}
// In order to get the copy behavior right, we need to know information
// about both the source and the destination. The response headers include
// stat info about the source that we can use in deciding exactly how to
// copy it locally. Along with the stat info about the local destination,
// we have everything we need to handle the multiple possibilities there
// can be when copying a file/dir from one location to another file/dir.
stat, err := getContainerPathStatFromHeader(response.header)
if err != nil {
return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err)
}
return response.body, stat, err
}
func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) {
var stat types.ContainerPathStat
encodedStat := header.Get("X-Docker-Container-Path-Stat")
statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat))
err := json.NewDecoder(statDecoder).Decode(&stat)
if err != nil {
err = fmt.Errorf("unable to decode container path stat header: %s", err)
}
return stat, err
}

View file

@ -0,0 +1,56 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"encoding/json"
"net/url"
"strings"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/versions"
)
type configWrapper struct {
*container.Config
HostConfig *container.HostConfig
NetworkingConfig *network.NetworkingConfig
}
// ContainerCreate creates a new container based in the given configuration.
// It can be associated with a name, but it's not mandatory.
func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) {
var response container.ContainerCreateCreatedBody
if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil {
return response, err
}
// When using API 1.24 and under, the client is responsible for removing the container
if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") {
hostConfig.AutoRemove = false
}
query := url.Values{}
if containerName != "" {
query.Set("name", containerName)
}
body := configWrapper{
Config: config,
HostConfig: hostConfig,
NetworkingConfig: networkingConfig,
}
serverResp, err := cli.post(ctx, "/containers/create", query, body, nil)
if err != nil {
if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") {
return response, objectNotFoundError{object: "image", id: config.Image}
}
return response, err
}
err = json.NewDecoder(serverResp.body).Decode(&response)
ensureReaderClosed(serverResp)
return response, err
}

View file

@ -0,0 +1,23 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"encoding/json"
"net/url"
"github.com/docker/docker/api/types/container"
)
// ContainerDiff shows differences in a container filesystem since it was started.
func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) {
var changes []container.ContainerChangeResponseItem
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil)
if err != nil {
return changes, err
}
err = json.NewDecoder(serverResp.body).Decode(&changes)
ensureReaderClosed(serverResp)
return changes, err
}

View file

@ -0,0 +1,54 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"encoding/json"
"github.com/docker/docker/api/types"
)
// ContainerExecCreate creates a new exec configuration to run an exec process.
func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) {
var response types.IDResponse
if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil {
return response, err
}
resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil)
if err != nil {
return response, err
}
err = json.NewDecoder(resp.body).Decode(&response)
ensureReaderClosed(resp)
return response, err
}
// ContainerExecStart starts an exec process already created in the docker host.
func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error {
resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil)
ensureReaderClosed(resp)
return err
}
// ContainerExecAttach attaches a connection to an exec process in the server.
// It returns a types.HijackedConnection with the hijacked connection
// and the a reader to get output. It's up to the called to close
// the hijacked connection by calling types.HijackedResponse.Close.
func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) {
headers := map[string][]string{"Content-Type": {"application/json"}}
return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers)
}
// ContainerExecInspect returns information about a specific exec process on the docker host.
func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) {
var response types.ContainerExecInspect
resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil)
if err != nil {
return response, err
}
err = json.NewDecoder(resp.body).Decode(&response)
ensureReaderClosed(resp)
return response, err
}

View file

@ -0,0 +1,19 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"io"
"net/url"
)
// ContainerExport retrieves the raw contents of a container
// and returns them as an io.ReadCloser. It's up to the caller
// to close the stream.
func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) {
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil)
if err != nil {
return nil, err
}
return serverResp.body, nil
}

View file

@ -0,0 +1,53 @@
package client // import "github.com/docker/docker/client"
import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"net/url"
"github.com/docker/docker/api/types"
)
// ContainerInspect returns the container information.
func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
if containerID == "" {
return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID}
}
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
if err != nil {
return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID)
}
var response types.ContainerJSON
err = json.NewDecoder(serverResp.body).Decode(&response)
ensureReaderClosed(serverResp)
return response, err
}
// ContainerInspectWithRaw returns the container information and its raw representation.
func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) {
if containerID == "" {
return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID}
}
query := url.Values{}
if getSize {
query.Set("size", "1")
}
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
if err != nil {
return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID)
}
defer ensureReaderClosed(serverResp)
body, err := ioutil.ReadAll(serverResp.body)
if err != nil {
return types.ContainerJSON{}, nil, err
}
var response types.ContainerJSON
rdr := bytes.NewReader(body)
err = json.NewDecoder(rdr).Decode(&response)
return response, body, err
}

View file

@ -0,0 +1,16 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"net/url"
)
// ContainerKill terminates the container process but does not remove the container from the docker host.
func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error {
query := url.Values{}
query.Set("signal", signal)
resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil)
ensureReaderClosed(resp)
return err
}

View file

@ -0,0 +1,56 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"encoding/json"
"net/url"
"strconv"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
)
// ContainerList returns the list of containers in the docker host.
func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
query := url.Values{}
if options.All {
query.Set("all", "1")
}
if options.Limit != -1 {
query.Set("limit", strconv.Itoa(options.Limit))
}
if options.Since != "" {
query.Set("since", options.Since)
}
if options.Before != "" {
query.Set("before", options.Before)
}
if options.Size {
query.Set("size", "1")
}
if options.Filters.Len() > 0 {
filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
if err != nil {
return nil, err
}
query.Set("filters", filterJSON)
}
resp, err := cli.get(ctx, "/containers/json", query, nil)
if err != nil {
return nil, err
}
var containers []types.Container
err = json.NewDecoder(resp.body).Decode(&containers)
ensureReaderClosed(resp)
return containers, err
}

View file

@ -0,0 +1,80 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"io"
"net/url"
"time"
"github.com/docker/docker/api/types"
timetypes "github.com/docker/docker/api/types/time"
"github.com/pkg/errors"
)
// ContainerLogs returns the logs generated by a container in an io.ReadCloser.
// It's up to the caller to close the stream.
//
// The stream format on the response will be in one of two formats:
//
// If the container is using a TTY, there is only a single stream (stdout), and
// data is copied directly from the container output stream, no extra
// multiplexing or headers.
//
// If the container is *not* using a TTY, streams for stdout and stderr are
// multiplexed.
// The format of the multiplexed stream is as follows:
//
// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
//
// STREAM_TYPE can be 1 for stdout and 2 for stderr
//
// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian.
// This is the size of OUTPUT.
//
// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
// stream.
func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
query := url.Values{}
if options.ShowStdout {
query.Set("stdout", "1")
}
if options.ShowStderr {
query.Set("stderr", "1")
}
if options.Since != "" {
ts, err := timetypes.GetTimestamp(options.Since, time.Now())
if err != nil {
return nil, errors.Wrap(err, `invalid value for "since"`)
}
query.Set("since", ts)
}
if options.Until != "" {
ts, err := timetypes.GetTimestamp(options.Until, time.Now())
if err != nil {
return nil, errors.Wrap(err, `invalid value for "until"`)
}
query.Set("until", ts)
}
if options.Timestamps {
query.Set("timestamps", "1")
}
if options.Details {
query.Set("details", "1")
}
if options.Follow {
query.Set("follow", "1")
}
query.Set("tail", options.Tail)
resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil)
if err != nil {
return nil, wrapResponseError(err, resp, "container", container)
}
return resp.body, nil
}

View file

@ -0,0 +1,10 @@
package client // import "github.com/docker/docker/client"
import "context"
// ContainerPause pauses the main process of a given container without terminating it.
func (cli *Client) ContainerPause(ctx context.Context, containerID string) error {
resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil)
ensureReaderClosed(resp)
return err
}

View file

@ -0,0 +1,36 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"encoding/json"
"fmt"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
)
// ContainersPrune requests the daemon to delete unused data
func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) {
var report types.ContainersPruneReport
if err := cli.NewVersionError("1.25", "container prune"); err != nil {
return report, err
}
query, err := getFiltersQuery(pruneFilters)
if err != nil {
return report, err
}
serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil)
if err != nil {
return report, err
}
defer ensureReaderClosed(serverResp)
if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
return report, fmt.Errorf("Error retrieving disk usage: %v", err)
}
return report, nil
}

View file

@ -0,0 +1,27 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"net/url"
"github.com/docker/docker/api/types"
)
// ContainerRemove kills and removes a container from the docker host.
func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error {
query := url.Values{}
if options.RemoveVolumes {
query.Set("v", "1")
}
if options.RemoveLinks {
query.Set("link", "1")
}
if options.Force {
query.Set("force", "1")
}
resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
ensureReaderClosed(resp)
return wrapResponseError(err, resp, "container", containerID)
}

Some files were not shown because too many files have changed in this diff Show more