mirror of
https://github.com/kreuzwerker/terraform-provider-docker.git
synced 2026-01-18 04:32:55 -05:00
Merge branch 'akomic-image_build' into master
This commit is contained in:
commit
7432d8677e
348 changed files with 24768 additions and 833 deletions
|
|
@ -42,6 +42,84 @@ func resourceDockerImage() *schema.Resource {
|
|||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"output": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
|
||||
"build": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
ConflictsWith: []string{"pull_triggers", "pull_trigger"},
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"path": {
|
||||
Type: schema.TypeString,
|
||||
Description: "Context path",
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"dockerfile": {
|
||||
Type: schema.TypeString,
|
||||
Description: "Name of the Dockerfile (Default is 'PATH/Dockerfile')",
|
||||
Optional: true,
|
||||
Default: "Dockerfile",
|
||||
ForceNew: true,
|
||||
},
|
||||
"tag": {
|
||||
Type: schema.TypeList,
|
||||
Description: "Name and optionally a tag in the 'name:tag' format",
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"force_remove": {
|
||||
Type: schema.TypeBool,
|
||||
Description: "Always remove intermediate containers",
|
||||
Optional: true,
|
||||
},
|
||||
"remove": {
|
||||
Type: schema.TypeBool,
|
||||
Description: "Remove intermediate containers after a successful build (default true)",
|
||||
Default: true,
|
||||
Optional: true,
|
||||
},
|
||||
"no_cache": {
|
||||
Type: schema.TypeBool,
|
||||
Description: "Do not use cache when building the image",
|
||||
Optional: true,
|
||||
},
|
||||
"target": {
|
||||
Type: schema.TypeString,
|
||||
Description: "Set the target build stage to build",
|
||||
Optional: true,
|
||||
},
|
||||
"build_arg": {
|
||||
Type: schema.TypeMap,
|
||||
Description: "Set build-time variables",
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
ForceNew: true,
|
||||
},
|
||||
"label": {
|
||||
Type: schema.TypeMap,
|
||||
Description: "Set metadata for an image",
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package docker
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
|
|
@ -10,14 +11,60 @@ import (
|
|||
"encoding/base64"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/docker/cli/cli/command/image/build"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
)
|
||||
|
||||
func getBuildContext(filePath string, excludes []string) io.Reader {
|
||||
filePath, _ = homedir.Expand(filePath)
|
||||
ctx, _ := archive.TarWithOptions(filePath, &archive.TarOptions{
|
||||
ExcludePatterns: excludes,
|
||||
})
|
||||
return ctx
|
||||
}
|
||||
|
||||
func decodeBuildMessages(response types.ImageBuildResponse) (string, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
buildErr := error(nil)
|
||||
|
||||
dec := json.NewDecoder(response.Body)
|
||||
for dec.More() {
|
||||
var m jsonmessage.JSONMessage
|
||||
err := dec.Decode(&m)
|
||||
if err != nil {
|
||||
return buf.String(), fmt.Errorf("Problem decoding message from docker daemon: %s", err)
|
||||
}
|
||||
|
||||
m.Display(buf, false)
|
||||
|
||||
if m.Error != nil {
|
||||
buildErr = fmt.Errorf("Unable to build image")
|
||||
}
|
||||
}
|
||||
log.Printf("[DEBUG] %s", buf.String())
|
||||
|
||||
return buf.String(), buildErr
|
||||
}
|
||||
|
||||
func resourceDockerImageCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*ProviderConfig).DockerClient
|
||||
imageName := d.Get("name").(string)
|
||||
|
||||
if value, ok := d.GetOk("build"); ok {
|
||||
for _, rawBuild := range value.(*schema.Set).List() {
|
||||
rawBuild := rawBuild.(map[string]interface{})
|
||||
|
||||
err := buildDockerImage(rawBuild, imageName, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
apiImage, err := findImage(imageName, client, meta.(*ProviderConfig).AuthConfigs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to read Docker image into resource: %s", err)
|
||||
|
|
@ -248,3 +295,56 @@ func findImage(imageName string, client *client.Client, authConfig *AuthConfigs)
|
|||
|
||||
return nil, fmt.Errorf("Unable to find or pull image %s", imageName)
|
||||
}
|
||||
|
||||
func buildDockerImage(rawBuild map[string]interface{}, imageName string, client *client.Client) error {
|
||||
buildOptions := types.ImageBuildOptions{}
|
||||
|
||||
buildOptions.Version = types.BuilderV1
|
||||
buildOptions.Dockerfile = rawBuild["dockerfile"].(string)
|
||||
|
||||
tags := []string{imageName}
|
||||
for _, t := range rawBuild["tag"].([]interface{}) {
|
||||
tags = append(tags, t.(string))
|
||||
}
|
||||
buildOptions.Tags = tags
|
||||
|
||||
buildOptions.ForceRemove = rawBuild["force_remove"].(bool)
|
||||
buildOptions.Remove = rawBuild["remove"].(bool)
|
||||
buildOptions.NoCache = rawBuild["no_cache"].(bool)
|
||||
buildOptions.Target = rawBuild["target"].(string)
|
||||
|
||||
buildArgs := make(map[string]*string)
|
||||
for k, v := range rawBuild["build_arg"].(map[string]interface{}) {
|
||||
val := v.(string)
|
||||
buildArgs[k] = &val
|
||||
}
|
||||
buildOptions.BuildArgs = buildArgs
|
||||
log.Printf("[DEBUG] Build Args: %v\n", buildArgs)
|
||||
|
||||
labels := make(map[string]string)
|
||||
for k, v := range rawBuild["label"].(map[string]interface{}) {
|
||||
labels[k] = v.(string)
|
||||
}
|
||||
buildOptions.Labels = labels
|
||||
log.Printf("[DEBUG] Labels: %v\n", labels)
|
||||
|
||||
contextDir := rawBuild["path"].(string)
|
||||
excludes, err := build.ReadDockerignore(contextDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
excludes = build.TrimBuildFilesFromExcludes(excludes, buildOptions.Dockerfile, false)
|
||||
|
||||
var response types.ImageBuildResponse
|
||||
response, err = client.ImageBuild(context.Background(), getBuildContext(contextDir, excludes), buildOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
buildResult, err := decodeBuildMessages(response)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s\n\n%s", err, buildResult)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,9 @@ package docker
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
|
|
@ -201,6 +203,26 @@ func testAccDockerImageDestroy(s *terraform.State) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestAccDockerImage_build(t *testing.T) {
|
||||
wd, _ := os.Getwd()
|
||||
dfPath := path.Join(wd, "Dockerfile")
|
||||
ioutil.WriteFile(dfPath, []byte(testDockerFileExample), 0644)
|
||||
defer os.Remove(dfPath)
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccDockerImageDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testCreateDockerImage,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestMatchResourceAttr("docker_image.test", "name", contentDigestRegexp),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
const testAccDockerImageConfig = `
|
||||
resource "docker_image" "foo" {
|
||||
name = "alpine:3.1"
|
||||
|
|
@ -292,3 +314,33 @@ resource "docker_image" "foobar" {
|
|||
name = "stocard/gotthard@sha256:ed752380c07940c651b46c97ca2101034b3be112f4d86198900aa6141f37fe7b"
|
||||
}
|
||||
`
|
||||
|
||||
const testCreateDockerImage = `
|
||||
resource "docker_image" "test" {
|
||||
name = "ubuntu:11"
|
||||
build {
|
||||
path = "."
|
||||
dockerfile = "Dockerfile"
|
||||
force_remove = true
|
||||
build_arg = {
|
||||
test_arg = "kenobi"
|
||||
}
|
||||
label = {
|
||||
test_label1 = "han"
|
||||
test_label2 = "solo"
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
const testDockerFileExample = `
|
||||
FROM python:3-stretch
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ARG test_arg
|
||||
|
||||
RUN echo ${test_arg} > test_arg.txt
|
||||
|
||||
RUN apt-get update -qq
|
||||
`
|
||||
|
|
|
|||
14
go.mod
14
go.mod
|
|
@ -2,22 +2,20 @@ module github.com/terraform-providers/terraform-provider-docker
|
|||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/Microsoft/go-winio v0.4.11 // indirect
|
||||
github.com/Microsoft/hcsshim v0.8.9 // indirect
|
||||
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe // indirect
|
||||
github.com/docker/cli v0.0.0-20200303215952-eb310fca4956 // v19.03.8
|
||||
github.com/docker/distribution v0.0.0-20180522175653-f0cc92778478 // indirect
|
||||
github.com/docker/docker v0.7.3-0.20190525203055-f25e0c6f3093
|
||||
github.com/docker/docker-credential-helpers v0.6.3 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.3
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-units v0.0.0-20171221200356-d59758554a3d
|
||||
github.com/gogo/protobuf v1.3.0 // indirect
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/gorilla/mux v1.7.2 // indirect
|
||||
github.com/hashicorp/terraform-plugin-sdk v1.0.0
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
|
||||
github.com/opencontainers/image-spec v0.0.0-20171125024018-577479e4dc27 // indirect
|
||||
github.com/pkg/errors v0.8.1 // indirect
|
||||
github.com/sirupsen/logrus v1.4.2 // indirect
|
||||
gotest.tools v2.2.0+incompatible // indirect
|
||||
github.com/opencontainers/runc v0.1.1 // indirect
|
||||
)
|
||||
|
||||
go 1.13
|
||||
|
|
|
|||
81
go.sum
81
go.sum
|
|
@ -1,3 +1,4 @@
|
|||
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
|
|
@ -7,17 +8,21 @@ cloud.google.com/go v0.45.1 h1:lRi0CHyU+ytlvylOlFKKq0af6JncuyoRh1J+QJBqQx0=
|
|||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q=
|
||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
|
||||
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
|
||||
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||
github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE=
|
||||
github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0=
|
||||
github.com/apparentlymart/go-cidr v1.0.1 h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U=
|
||||
github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc=
|
||||
github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3 h1:ZSTrOEhiM5J5RFxEaFvMZVEAM1KvT1YzbEOwB2EAGjA=
|
||||
github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=
|
||||
github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0=
|
||||
github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk=
|
||||
|
|
@ -34,6 +39,17 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
|
|||
github.com/bsm/go-vlq v0.0.0-20150828105119-ec6e8d4f5f4e/go.mod h1:N+BjUcTjSxc2mtRGSCPsat1kze3CUtvJN3/jTXlp29k=
|
||||
github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
|
||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe h1:PEmIrUvwG9Yyv+0WKZqjXfSFDeZjs/q15g0m08BYS9k=
|
||||
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
|
@ -47,17 +63,23 @@ github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGl
|
|||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.0.0-20171221200356-d59758554a3d h1:QEYTOUa4JROW5CSAHU8qc0rD8uhx1tsUkBsSDk8P5eM=
|
||||
github.com/docker/go-units v0.0.0-20171221200356-d59758554a3d/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
|
||||
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/gogo/protobuf v1.3.0 h1:G8O7TerXerS4F6sx9OV7/nRfJdnXgHZu/S/7F2SN+UE=
|
||||
github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
|
|
@ -70,6 +92,7 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
|
|||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
|
|
@ -78,6 +101,7 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
|||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I=
|
||||
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
|
|
@ -121,20 +145,25 @@ github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKe
|
|||
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
|
||||
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
|
|
@ -162,36 +191,55 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
|
|||
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE=
|
||||
github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v0.0.0-20171125024018-577479e4dc27 h1:eNXDpuTnIrrQmG7HYGA2YljYjhFPQF10c1vV3iutuyY=
|
||||
github.com/opencontainers/image-spec v0.0.0-20171125024018-577479e4dc27/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
|
||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI=
|
||||
github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E=
|
||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
|
||||
github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/vmihailenco/msgpack v3.3.3+incompatible h1:wapg9xDUZDzGCNFlwc5SqI1rvcciqcxEHac4CYj89xI=
|
||||
github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
|
||||
github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s=
|
||||
|
|
@ -202,6 +250,7 @@ github.com/zclconf/go-cty-yaml v1.0.1/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgK
|
|||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
|
|
@ -229,8 +278,9 @@ golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn
|
|||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
|
|
@ -239,9 +289,11 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
|
@ -251,15 +303,19 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
|
@ -294,15 +350,22 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE
|
|||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ fi
|
|||
PROVIDER_NAME="docker"
|
||||
TARGET_DIR="$(pwd)/results"
|
||||
XC_ARCH=${XC_ARCH:-"386 amd64 arm"}
|
||||
XC_OS=${XC_OS:=linux darwin windows freebsd openbsd solaris}
|
||||
XC_OS=${XC_OS:=linux darwin windows openbsd solaris}
|
||||
XC_EXCLUDE_OSARCH="!darwin/arm !darwin/386 !solaris/amd64"
|
||||
LD_FLAGS="-s -w"
|
||||
export CGO_ENABLED=0
|
||||
|
|
|
|||
21
vendor/github.com/Azure/go-ansiterm/LICENSE
generated
vendored
Normal file
21
vendor/github.com/Azure/go-ansiterm/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Microsoft Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
12
vendor/github.com/Azure/go-ansiterm/README.md
generated
vendored
Normal file
12
vendor/github.com/Azure/go-ansiterm/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
# go-ansiterm
|
||||
|
||||
This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent.
|
||||
|
||||
For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position.
|
||||
|
||||
The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go).
|
||||
|
||||
See parser_test.go for examples exercising the state machine and generating appropriate function calls.
|
||||
|
||||
-----
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
188
vendor/github.com/Azure/go-ansiterm/constants.go
generated
vendored
Normal file
188
vendor/github.com/Azure/go-ansiterm/constants.go
generated
vendored
Normal file
|
|
@ -0,0 +1,188 @@
|
|||
package ansiterm
|
||||
|
||||
const LogEnv = "DEBUG_TERMINAL"
|
||||
|
||||
// ANSI constants
|
||||
// References:
|
||||
// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm
|
||||
// -- http://man7.org/linux/man-pages/man4/console_codes.4.html
|
||||
// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
|
||||
// -- http://en.wikipedia.org/wiki/ANSI_escape_code
|
||||
// -- http://vt100.net/emu/dec_ansi_parser
|
||||
// -- http://vt100.net/emu/vt500_parser.svg
|
||||
// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
|
||||
// -- http://www.inwap.com/pdp10/ansicode.txt
|
||||
const (
|
||||
// ECMA-48 Set Graphics Rendition
|
||||
// Note:
|
||||
// -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved
|
||||
// -- Fonts could possibly be supported via SetCurrentConsoleFontEx
|
||||
// -- Windows does not expose the per-window cursor (i.e., caret) blink times
|
||||
ANSI_SGR_RESET = 0
|
||||
ANSI_SGR_BOLD = 1
|
||||
ANSI_SGR_DIM = 2
|
||||
_ANSI_SGR_ITALIC = 3
|
||||
ANSI_SGR_UNDERLINE = 4
|
||||
_ANSI_SGR_BLINKSLOW = 5
|
||||
_ANSI_SGR_BLINKFAST = 6
|
||||
ANSI_SGR_REVERSE = 7
|
||||
_ANSI_SGR_INVISIBLE = 8
|
||||
_ANSI_SGR_LINETHROUGH = 9
|
||||
_ANSI_SGR_FONT_00 = 10
|
||||
_ANSI_SGR_FONT_01 = 11
|
||||
_ANSI_SGR_FONT_02 = 12
|
||||
_ANSI_SGR_FONT_03 = 13
|
||||
_ANSI_SGR_FONT_04 = 14
|
||||
_ANSI_SGR_FONT_05 = 15
|
||||
_ANSI_SGR_FONT_06 = 16
|
||||
_ANSI_SGR_FONT_07 = 17
|
||||
_ANSI_SGR_FONT_08 = 18
|
||||
_ANSI_SGR_FONT_09 = 19
|
||||
_ANSI_SGR_FONT_10 = 20
|
||||
_ANSI_SGR_DOUBLEUNDERLINE = 21
|
||||
ANSI_SGR_BOLD_DIM_OFF = 22
|
||||
_ANSI_SGR_ITALIC_OFF = 23
|
||||
ANSI_SGR_UNDERLINE_OFF = 24
|
||||
_ANSI_SGR_BLINK_OFF = 25
|
||||
_ANSI_SGR_RESERVED_00 = 26
|
||||
ANSI_SGR_REVERSE_OFF = 27
|
||||
_ANSI_SGR_INVISIBLE_OFF = 28
|
||||
_ANSI_SGR_LINETHROUGH_OFF = 29
|
||||
ANSI_SGR_FOREGROUND_BLACK = 30
|
||||
ANSI_SGR_FOREGROUND_RED = 31
|
||||
ANSI_SGR_FOREGROUND_GREEN = 32
|
||||
ANSI_SGR_FOREGROUND_YELLOW = 33
|
||||
ANSI_SGR_FOREGROUND_BLUE = 34
|
||||
ANSI_SGR_FOREGROUND_MAGENTA = 35
|
||||
ANSI_SGR_FOREGROUND_CYAN = 36
|
||||
ANSI_SGR_FOREGROUND_WHITE = 37
|
||||
_ANSI_SGR_RESERVED_01 = 38
|
||||
ANSI_SGR_FOREGROUND_DEFAULT = 39
|
||||
ANSI_SGR_BACKGROUND_BLACK = 40
|
||||
ANSI_SGR_BACKGROUND_RED = 41
|
||||
ANSI_SGR_BACKGROUND_GREEN = 42
|
||||
ANSI_SGR_BACKGROUND_YELLOW = 43
|
||||
ANSI_SGR_BACKGROUND_BLUE = 44
|
||||
ANSI_SGR_BACKGROUND_MAGENTA = 45
|
||||
ANSI_SGR_BACKGROUND_CYAN = 46
|
||||
ANSI_SGR_BACKGROUND_WHITE = 47
|
||||
_ANSI_SGR_RESERVED_02 = 48
|
||||
ANSI_SGR_BACKGROUND_DEFAULT = 49
|
||||
// 50 - 65: Unsupported
|
||||
|
||||
ANSI_MAX_CMD_LENGTH = 4096
|
||||
|
||||
MAX_INPUT_EVENTS = 128
|
||||
DEFAULT_WIDTH = 80
|
||||
DEFAULT_HEIGHT = 24
|
||||
|
||||
ANSI_BEL = 0x07
|
||||
ANSI_BACKSPACE = 0x08
|
||||
ANSI_TAB = 0x09
|
||||
ANSI_LINE_FEED = 0x0A
|
||||
ANSI_VERTICAL_TAB = 0x0B
|
||||
ANSI_FORM_FEED = 0x0C
|
||||
ANSI_CARRIAGE_RETURN = 0x0D
|
||||
ANSI_ESCAPE_PRIMARY = 0x1B
|
||||
ANSI_ESCAPE_SECONDARY = 0x5B
|
||||
ANSI_OSC_STRING_ENTRY = 0x5D
|
||||
ANSI_COMMAND_FIRST = 0x40
|
||||
ANSI_COMMAND_LAST = 0x7E
|
||||
DCS_ENTRY = 0x90
|
||||
CSI_ENTRY = 0x9B
|
||||
OSC_STRING = 0x9D
|
||||
ANSI_PARAMETER_SEP = ";"
|
||||
ANSI_CMD_G0 = '('
|
||||
ANSI_CMD_G1 = ')'
|
||||
ANSI_CMD_G2 = '*'
|
||||
ANSI_CMD_G3 = '+'
|
||||
ANSI_CMD_DECPNM = '>'
|
||||
ANSI_CMD_DECPAM = '='
|
||||
ANSI_CMD_OSC = ']'
|
||||
ANSI_CMD_STR_TERM = '\\'
|
||||
|
||||
KEY_CONTROL_PARAM_2 = ";2"
|
||||
KEY_CONTROL_PARAM_3 = ";3"
|
||||
KEY_CONTROL_PARAM_4 = ";4"
|
||||
KEY_CONTROL_PARAM_5 = ";5"
|
||||
KEY_CONTROL_PARAM_6 = ";6"
|
||||
KEY_CONTROL_PARAM_7 = ";7"
|
||||
KEY_CONTROL_PARAM_8 = ";8"
|
||||
KEY_ESC_CSI = "\x1B["
|
||||
KEY_ESC_N = "\x1BN"
|
||||
KEY_ESC_O = "\x1BO"
|
||||
|
||||
FILL_CHARACTER = ' '
|
||||
)
|
||||
|
||||
func getByteRange(start byte, end byte) []byte {
|
||||
bytes := make([]byte, 0, 32)
|
||||
for i := start; i <= end; i++ {
|
||||
bytes = append(bytes, byte(i))
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
var toGroundBytes = getToGroundBytes()
|
||||
var executors = getExecuteBytes()
|
||||
|
||||
// SPACE 20+A0 hex Always and everywhere a blank space
|
||||
// Intermediate 20-2F hex !"#$%&'()*+,-./
|
||||
var intermeds = getByteRange(0x20, 0x2F)
|
||||
|
||||
// Parameters 30-3F hex 0123456789:;<=>?
|
||||
// CSI Parameters 30-39, 3B hex 0123456789;
|
||||
var csiParams = getByteRange(0x30, 0x3F)
|
||||
|
||||
var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...)
|
||||
|
||||
// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
|
||||
var upperCase = getByteRange(0x40, 0x5F)
|
||||
|
||||
// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~
|
||||
var lowerCase = getByteRange(0x60, 0x7E)
|
||||
|
||||
// Alphabetics 40-7E hex (all of upper and lower case)
|
||||
var alphabetics = append(upperCase, lowerCase...)
|
||||
|
||||
var printables = getByteRange(0x20, 0x7F)
|
||||
|
||||
var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E)
|
||||
var escapeToGroundBytes = getEscapeToGroundBytes()
|
||||
|
||||
// See http://www.vt100.net/emu/vt500_parser.png for description of the complex
|
||||
// byte ranges below
|
||||
|
||||
func getEscapeToGroundBytes() []byte {
|
||||
escapeToGroundBytes := getByteRange(0x30, 0x4F)
|
||||
escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...)
|
||||
escapeToGroundBytes = append(escapeToGroundBytes, 0x59)
|
||||
escapeToGroundBytes = append(escapeToGroundBytes, 0x5A)
|
||||
escapeToGroundBytes = append(escapeToGroundBytes, 0x5C)
|
||||
escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...)
|
||||
return escapeToGroundBytes
|
||||
}
|
||||
|
||||
func getExecuteBytes() []byte {
|
||||
executeBytes := getByteRange(0x00, 0x17)
|
||||
executeBytes = append(executeBytes, 0x19)
|
||||
executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...)
|
||||
return executeBytes
|
||||
}
|
||||
|
||||
func getToGroundBytes() []byte {
|
||||
groundBytes := []byte{0x18}
|
||||
groundBytes = append(groundBytes, 0x1A)
|
||||
groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...)
|
||||
groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...)
|
||||
groundBytes = append(groundBytes, 0x99)
|
||||
groundBytes = append(groundBytes, 0x9A)
|
||||
groundBytes = append(groundBytes, 0x9C)
|
||||
return groundBytes
|
||||
}
|
||||
|
||||
// Delete 7F hex Always and everywhere ignored
|
||||
// C1 Control 80-9F hex 32 additional control characters
|
||||
// G1 Displayable A1-FE hex 94 additional displayable characters
|
||||
// Special A0+FF hex Same as SPACE and DELETE
|
||||
7
vendor/github.com/Azure/go-ansiterm/context.go
generated
vendored
Normal file
7
vendor/github.com/Azure/go-ansiterm/context.go
generated
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
package ansiterm
|
||||
|
||||
type ansiContext struct {
|
||||
currentChar byte
|
||||
paramBuffer []byte
|
||||
interBuffer []byte
|
||||
}
|
||||
49
vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
generated
vendored
Normal file
49
vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
generated
vendored
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
package ansiterm
|
||||
|
||||
type csiEntryState struct {
|
||||
baseState
|
||||
}
|
||||
|
||||
func (csiState csiEntryState) Handle(b byte) (s state, e error) {
|
||||
csiState.parser.logf("CsiEntry::Handle %#x", b)
|
||||
|
||||
nextState, err := csiState.baseState.Handle(b)
|
||||
if nextState != nil || err != nil {
|
||||
return nextState, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case sliceContains(alphabetics, b):
|
||||
return csiState.parser.ground, nil
|
||||
case sliceContains(csiCollectables, b):
|
||||
return csiState.parser.csiParam, nil
|
||||
case sliceContains(executors, b):
|
||||
return csiState, csiState.parser.execute()
|
||||
}
|
||||
|
||||
return csiState, nil
|
||||
}
|
||||
|
||||
func (csiState csiEntryState) Transition(s state) error {
|
||||
csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
|
||||
csiState.baseState.Transition(s)
|
||||
|
||||
switch s {
|
||||
case csiState.parser.ground:
|
||||
return csiState.parser.csiDispatch()
|
||||
case csiState.parser.csiParam:
|
||||
switch {
|
||||
case sliceContains(csiParams, csiState.parser.context.currentChar):
|
||||
csiState.parser.collectParam()
|
||||
case sliceContains(intermeds, csiState.parser.context.currentChar):
|
||||
csiState.parser.collectInter()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (csiState csiEntryState) Enter() error {
|
||||
csiState.parser.clear()
|
||||
return nil
|
||||
}
|
||||
38
vendor/github.com/Azure/go-ansiterm/csi_param_state.go
generated
vendored
Normal file
38
vendor/github.com/Azure/go-ansiterm/csi_param_state.go
generated
vendored
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
package ansiterm
|
||||
|
||||
type csiParamState struct {
|
||||
baseState
|
||||
}
|
||||
|
||||
func (csiState csiParamState) Handle(b byte) (s state, e error) {
|
||||
csiState.parser.logf("CsiParam::Handle %#x", b)
|
||||
|
||||
nextState, err := csiState.baseState.Handle(b)
|
||||
if nextState != nil || err != nil {
|
||||
return nextState, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case sliceContains(alphabetics, b):
|
||||
return csiState.parser.ground, nil
|
||||
case sliceContains(csiCollectables, b):
|
||||
csiState.parser.collectParam()
|
||||
return csiState, nil
|
||||
case sliceContains(executors, b):
|
||||
return csiState, csiState.parser.execute()
|
||||
}
|
||||
|
||||
return csiState, nil
|
||||
}
|
||||
|
||||
func (csiState csiParamState) Transition(s state) error {
|
||||
csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
|
||||
csiState.baseState.Transition(s)
|
||||
|
||||
switch s {
|
||||
case csiState.parser.ground:
|
||||
return csiState.parser.csiDispatch()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
36
vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
generated
vendored
Normal file
36
vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
generated
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
package ansiterm
|
||||
|
||||
type escapeIntermediateState struct {
|
||||
baseState
|
||||
}
|
||||
|
||||
func (escState escapeIntermediateState) Handle(b byte) (s state, e error) {
|
||||
escState.parser.logf("escapeIntermediateState::Handle %#x", b)
|
||||
nextState, err := escState.baseState.Handle(b)
|
||||
if nextState != nil || err != nil {
|
||||
return nextState, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case sliceContains(intermeds, b):
|
||||
return escState, escState.parser.collectInter()
|
||||
case sliceContains(executors, b):
|
||||
return escState, escState.parser.execute()
|
||||
case sliceContains(escapeIntermediateToGroundBytes, b):
|
||||
return escState.parser.ground, nil
|
||||
}
|
||||
|
||||
return escState, nil
|
||||
}
|
||||
|
||||
func (escState escapeIntermediateState) Transition(s state) error {
|
||||
escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
|
||||
escState.baseState.Transition(s)
|
||||
|
||||
switch s {
|
||||
case escState.parser.ground:
|
||||
return escState.parser.escDispatch()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
47
vendor/github.com/Azure/go-ansiterm/escape_state.go
generated
vendored
Normal file
47
vendor/github.com/Azure/go-ansiterm/escape_state.go
generated
vendored
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
package ansiterm
|
||||
|
||||
type escapeState struct {
|
||||
baseState
|
||||
}
|
||||
|
||||
func (escState escapeState) Handle(b byte) (s state, e error) {
|
||||
escState.parser.logf("escapeState::Handle %#x", b)
|
||||
nextState, err := escState.baseState.Handle(b)
|
||||
if nextState != nil || err != nil {
|
||||
return nextState, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case b == ANSI_ESCAPE_SECONDARY:
|
||||
return escState.parser.csiEntry, nil
|
||||
case b == ANSI_OSC_STRING_ENTRY:
|
||||
return escState.parser.oscString, nil
|
||||
case sliceContains(executors, b):
|
||||
return escState, escState.parser.execute()
|
||||
case sliceContains(escapeToGroundBytes, b):
|
||||
return escState.parser.ground, nil
|
||||
case sliceContains(intermeds, b):
|
||||
return escState.parser.escapeIntermediate, nil
|
||||
}
|
||||
|
||||
return escState, nil
|
||||
}
|
||||
|
||||
func (escState escapeState) Transition(s state) error {
|
||||
escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name())
|
||||
escState.baseState.Transition(s)
|
||||
|
||||
switch s {
|
||||
case escState.parser.ground:
|
||||
return escState.parser.escDispatch()
|
||||
case escState.parser.escapeIntermediate:
|
||||
return escState.parser.collectInter()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (escState escapeState) Enter() error {
|
||||
escState.parser.clear()
|
||||
return nil
|
||||
}
|
||||
90
vendor/github.com/Azure/go-ansiterm/event_handler.go
generated
vendored
Normal file
90
vendor/github.com/Azure/go-ansiterm/event_handler.go
generated
vendored
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
package ansiterm
|
||||
|
||||
type AnsiEventHandler interface {
|
||||
// Print
|
||||
Print(b byte) error
|
||||
|
||||
// Execute C0 commands
|
||||
Execute(b byte) error
|
||||
|
||||
// CUrsor Up
|
||||
CUU(int) error
|
||||
|
||||
// CUrsor Down
|
||||
CUD(int) error
|
||||
|
||||
// CUrsor Forward
|
||||
CUF(int) error
|
||||
|
||||
// CUrsor Backward
|
||||
CUB(int) error
|
||||
|
||||
// Cursor to Next Line
|
||||
CNL(int) error
|
||||
|
||||
// Cursor to Previous Line
|
||||
CPL(int) error
|
||||
|
||||
// Cursor Horizontal position Absolute
|
||||
CHA(int) error
|
||||
|
||||
// Vertical line Position Absolute
|
||||
VPA(int) error
|
||||
|
||||
// CUrsor Position
|
||||
CUP(int, int) error
|
||||
|
||||
// Horizontal and Vertical Position (depends on PUM)
|
||||
HVP(int, int) error
|
||||
|
||||
// Text Cursor Enable Mode
|
||||
DECTCEM(bool) error
|
||||
|
||||
// Origin Mode
|
||||
DECOM(bool) error
|
||||
|
||||
// 132 Column Mode
|
||||
DECCOLM(bool) error
|
||||
|
||||
// Erase in Display
|
||||
ED(int) error
|
||||
|
||||
// Erase in Line
|
||||
EL(int) error
|
||||
|
||||
// Insert Line
|
||||
IL(int) error
|
||||
|
||||
// Delete Line
|
||||
DL(int) error
|
||||
|
||||
// Insert Character
|
||||
ICH(int) error
|
||||
|
||||
// Delete Character
|
||||
DCH(int) error
|
||||
|
||||
// Set Graphics Rendition
|
||||
SGR([]int) error
|
||||
|
||||
// Pan Down
|
||||
SU(int) error
|
||||
|
||||
// Pan Up
|
||||
SD(int) error
|
||||
|
||||
// Device Attributes
|
||||
DA([]string) error
|
||||
|
||||
// Set Top and Bottom Margins
|
||||
DECSTBM(int, int) error
|
||||
|
||||
// Index
|
||||
IND() error
|
||||
|
||||
// Reverse Index
|
||||
RI() error
|
||||
|
||||
// Flush updates from previous commands
|
||||
Flush() error
|
||||
}
|
||||
24
vendor/github.com/Azure/go-ansiterm/ground_state.go
generated
vendored
Normal file
24
vendor/github.com/Azure/go-ansiterm/ground_state.go
generated
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
package ansiterm
|
||||
|
||||
type groundState struct {
|
||||
baseState
|
||||
}
|
||||
|
||||
func (gs groundState) Handle(b byte) (s state, e error) {
|
||||
gs.parser.context.currentChar = b
|
||||
|
||||
nextState, err := gs.baseState.Handle(b)
|
||||
if nextState != nil || err != nil {
|
||||
return nextState, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case sliceContains(printables, b):
|
||||
return gs, gs.parser.print()
|
||||
|
||||
case sliceContains(executors, b):
|
||||
return gs, gs.parser.execute()
|
||||
}
|
||||
|
||||
return gs, nil
|
||||
}
|
||||
31
vendor/github.com/Azure/go-ansiterm/osc_string_state.go
generated
vendored
Normal file
31
vendor/github.com/Azure/go-ansiterm/osc_string_state.go
generated
vendored
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
package ansiterm
|
||||
|
||||
type oscStringState struct {
|
||||
baseState
|
||||
}
|
||||
|
||||
func (oscState oscStringState) Handle(b byte) (s state, e error) {
|
||||
oscState.parser.logf("OscString::Handle %#x", b)
|
||||
nextState, err := oscState.baseState.Handle(b)
|
||||
if nextState != nil || err != nil {
|
||||
return nextState, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case isOscStringTerminator(b):
|
||||
return oscState.parser.ground, nil
|
||||
}
|
||||
|
||||
return oscState, nil
|
||||
}
|
||||
|
||||
// See below for OSC string terminators for linux
|
||||
// http://man7.org/linux/man-pages/man4/console_codes.4.html
|
||||
func isOscStringTerminator(b byte) bool {
|
||||
|
||||
if b == ANSI_BEL || b == 0x5C {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
151
vendor/github.com/Azure/go-ansiterm/parser.go
generated
vendored
Normal file
151
vendor/github.com/Azure/go-ansiterm/parser.go
generated
vendored
Normal file
|
|
@ -0,0 +1,151 @@
|
|||
package ansiterm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
type AnsiParser struct {
|
||||
currState state
|
||||
eventHandler AnsiEventHandler
|
||||
context *ansiContext
|
||||
csiEntry state
|
||||
csiParam state
|
||||
dcsEntry state
|
||||
escape state
|
||||
escapeIntermediate state
|
||||
error state
|
||||
ground state
|
||||
oscString state
|
||||
stateMap []state
|
||||
|
||||
logf func(string, ...interface{})
|
||||
}
|
||||
|
||||
type Option func(*AnsiParser)
|
||||
|
||||
func WithLogf(f func(string, ...interface{})) Option {
|
||||
return func(ap *AnsiParser) {
|
||||
ap.logf = f
|
||||
}
|
||||
}
|
||||
|
||||
func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser {
|
||||
ap := &AnsiParser{
|
||||
eventHandler: evtHandler,
|
||||
context: &ansiContext{},
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(ap)
|
||||
}
|
||||
|
||||
if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
|
||||
logFile, _ := os.Create("ansiParser.log")
|
||||
logger := log.New(logFile, "", log.LstdFlags)
|
||||
if ap.logf != nil {
|
||||
l := ap.logf
|
||||
ap.logf = func(s string, v ...interface{}) {
|
||||
l(s, v...)
|
||||
logger.Printf(s, v...)
|
||||
}
|
||||
} else {
|
||||
ap.logf = logger.Printf
|
||||
}
|
||||
}
|
||||
|
||||
if ap.logf == nil {
|
||||
ap.logf = func(string, ...interface{}) {}
|
||||
}
|
||||
|
||||
ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}}
|
||||
ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}}
|
||||
ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}}
|
||||
ap.escape = escapeState{baseState{name: "Escape", parser: ap}}
|
||||
ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}}
|
||||
ap.error = errorState{baseState{name: "Error", parser: ap}}
|
||||
ap.ground = groundState{baseState{name: "Ground", parser: ap}}
|
||||
ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}}
|
||||
|
||||
ap.stateMap = []state{
|
||||
ap.csiEntry,
|
||||
ap.csiParam,
|
||||
ap.dcsEntry,
|
||||
ap.escape,
|
||||
ap.escapeIntermediate,
|
||||
ap.error,
|
||||
ap.ground,
|
||||
ap.oscString,
|
||||
}
|
||||
|
||||
ap.currState = getState(initialState, ap.stateMap)
|
||||
|
||||
ap.logf("CreateParser: parser %p", ap)
|
||||
return ap
|
||||
}
|
||||
|
||||
func getState(name string, states []state) state {
|
||||
for _, el := range states {
|
||||
if el.Name() == name {
|
||||
return el
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) Parse(bytes []byte) (int, error) {
|
||||
for i, b := range bytes {
|
||||
if err := ap.handle(b); err != nil {
|
||||
return i, err
|
||||
}
|
||||
}
|
||||
|
||||
return len(bytes), ap.eventHandler.Flush()
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) handle(b byte) error {
|
||||
ap.context.currentChar = b
|
||||
newState, err := ap.currState.Handle(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if newState == nil {
|
||||
ap.logf("WARNING: newState is nil")
|
||||
return errors.New("New state of 'nil' is invalid.")
|
||||
}
|
||||
|
||||
if newState != ap.currState {
|
||||
if err := ap.changeState(newState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) changeState(newState state) error {
|
||||
ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
|
||||
|
||||
// Exit old state
|
||||
if err := ap.currState.Exit(); err != nil {
|
||||
ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Perform transition action
|
||||
if err := ap.currState.Transition(newState); err != nil {
|
||||
ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Enter new state
|
||||
if err := newState.Enter(); err != nil {
|
||||
ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
ap.currState = newState
|
||||
return nil
|
||||
}
|
||||
99
vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
generated
vendored
Normal file
99
vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
generated
vendored
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
package ansiterm
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func parseParams(bytes []byte) ([]string, error) {
|
||||
paramBuff := make([]byte, 0, 0)
|
||||
params := []string{}
|
||||
|
||||
for _, v := range bytes {
|
||||
if v == ';' {
|
||||
if len(paramBuff) > 0 {
|
||||
// Completed parameter, append it to the list
|
||||
s := string(paramBuff)
|
||||
params = append(params, s)
|
||||
paramBuff = make([]byte, 0, 0)
|
||||
}
|
||||
} else {
|
||||
paramBuff = append(paramBuff, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Last parameter may not be terminated with ';'
|
||||
if len(paramBuff) > 0 {
|
||||
s := string(paramBuff)
|
||||
params = append(params, s)
|
||||
}
|
||||
|
||||
return params, nil
|
||||
}
|
||||
|
||||
func parseCmd(context ansiContext) (string, error) {
|
||||
return string(context.currentChar), nil
|
||||
}
|
||||
|
||||
func getInt(params []string, dflt int) int {
|
||||
i := getInts(params, 1, dflt)[0]
|
||||
return i
|
||||
}
|
||||
|
||||
func getInts(params []string, minCount int, dflt int) []int {
|
||||
ints := []int{}
|
||||
|
||||
for _, v := range params {
|
||||
i, _ := strconv.Atoi(v)
|
||||
// Zero is mapped to the default value in VT100.
|
||||
if i == 0 {
|
||||
i = dflt
|
||||
}
|
||||
ints = append(ints, i)
|
||||
}
|
||||
|
||||
if len(ints) < minCount {
|
||||
remaining := minCount - len(ints)
|
||||
for i := 0; i < remaining; i++ {
|
||||
ints = append(ints, dflt)
|
||||
}
|
||||
}
|
||||
|
||||
return ints
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) modeDispatch(param string, set bool) error {
|
||||
switch param {
|
||||
case "?3":
|
||||
return ap.eventHandler.DECCOLM(set)
|
||||
case "?6":
|
||||
return ap.eventHandler.DECOM(set)
|
||||
case "?25":
|
||||
return ap.eventHandler.DECTCEM(set)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) hDispatch(params []string) error {
|
||||
if len(params) == 1 {
|
||||
return ap.modeDispatch(params[0], true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) lDispatch(params []string) error {
|
||||
if len(params) == 1 {
|
||||
return ap.modeDispatch(params[0], false)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEraseParam(params []string) int {
|
||||
param := getInt(params, 0)
|
||||
if param < 0 || 3 < param {
|
||||
param = 0
|
||||
}
|
||||
|
||||
return param
|
||||
}
|
||||
119
vendor/github.com/Azure/go-ansiterm/parser_actions.go
generated
vendored
Normal file
119
vendor/github.com/Azure/go-ansiterm/parser_actions.go
generated
vendored
Normal file
|
|
@ -0,0 +1,119 @@
|
|||
package ansiterm
|
||||
|
||||
func (ap *AnsiParser) collectParam() error {
|
||||
currChar := ap.context.currentChar
|
||||
ap.logf("collectParam %#x", currChar)
|
||||
ap.context.paramBuffer = append(ap.context.paramBuffer, currChar)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) collectInter() error {
|
||||
currChar := ap.context.currentChar
|
||||
ap.logf("collectInter %#x", currChar)
|
||||
ap.context.paramBuffer = append(ap.context.interBuffer, currChar)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) escDispatch() error {
|
||||
cmd, _ := parseCmd(*ap.context)
|
||||
intermeds := ap.context.interBuffer
|
||||
ap.logf("escDispatch currentChar: %#x", ap.context.currentChar)
|
||||
ap.logf("escDispatch: %v(%v)", cmd, intermeds)
|
||||
|
||||
switch cmd {
|
||||
case "D": // IND
|
||||
return ap.eventHandler.IND()
|
||||
case "E": // NEL, equivalent to CRLF
|
||||
err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN)
|
||||
if err == nil {
|
||||
err = ap.eventHandler.Execute(ANSI_LINE_FEED)
|
||||
}
|
||||
return err
|
||||
case "M": // RI
|
||||
return ap.eventHandler.RI()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) csiDispatch() error {
|
||||
cmd, _ := parseCmd(*ap.context)
|
||||
params, _ := parseParams(ap.context.paramBuffer)
|
||||
ap.logf("Parsed params: %v with length: %d", params, len(params))
|
||||
|
||||
ap.logf("csiDispatch: %v(%v)", cmd, params)
|
||||
|
||||
switch cmd {
|
||||
case "@":
|
||||
return ap.eventHandler.ICH(getInt(params, 1))
|
||||
case "A":
|
||||
return ap.eventHandler.CUU(getInt(params, 1))
|
||||
case "B":
|
||||
return ap.eventHandler.CUD(getInt(params, 1))
|
||||
case "C":
|
||||
return ap.eventHandler.CUF(getInt(params, 1))
|
||||
case "D":
|
||||
return ap.eventHandler.CUB(getInt(params, 1))
|
||||
case "E":
|
||||
return ap.eventHandler.CNL(getInt(params, 1))
|
||||
case "F":
|
||||
return ap.eventHandler.CPL(getInt(params, 1))
|
||||
case "G":
|
||||
return ap.eventHandler.CHA(getInt(params, 1))
|
||||
case "H":
|
||||
ints := getInts(params, 2, 1)
|
||||
x, y := ints[0], ints[1]
|
||||
return ap.eventHandler.CUP(x, y)
|
||||
case "J":
|
||||
param := getEraseParam(params)
|
||||
return ap.eventHandler.ED(param)
|
||||
case "K":
|
||||
param := getEraseParam(params)
|
||||
return ap.eventHandler.EL(param)
|
||||
case "L":
|
||||
return ap.eventHandler.IL(getInt(params, 1))
|
||||
case "M":
|
||||
return ap.eventHandler.DL(getInt(params, 1))
|
||||
case "P":
|
||||
return ap.eventHandler.DCH(getInt(params, 1))
|
||||
case "S":
|
||||
return ap.eventHandler.SU(getInt(params, 1))
|
||||
case "T":
|
||||
return ap.eventHandler.SD(getInt(params, 1))
|
||||
case "c":
|
||||
return ap.eventHandler.DA(params)
|
||||
case "d":
|
||||
return ap.eventHandler.VPA(getInt(params, 1))
|
||||
case "f":
|
||||
ints := getInts(params, 2, 1)
|
||||
x, y := ints[0], ints[1]
|
||||
return ap.eventHandler.HVP(x, y)
|
||||
case "h":
|
||||
return ap.hDispatch(params)
|
||||
case "l":
|
||||
return ap.lDispatch(params)
|
||||
case "m":
|
||||
return ap.eventHandler.SGR(getInts(params, 1, 0))
|
||||
case "r":
|
||||
ints := getInts(params, 2, 1)
|
||||
top, bottom := ints[0], ints[1]
|
||||
return ap.eventHandler.DECSTBM(top, bottom)
|
||||
default:
|
||||
ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) print() error {
|
||||
return ap.eventHandler.Print(ap.context.currentChar)
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) clear() error {
|
||||
ap.context = &ansiContext{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) execute() error {
|
||||
return ap.eventHandler.Execute(ap.context.currentChar)
|
||||
}
|
||||
71
vendor/github.com/Azure/go-ansiterm/states.go
generated
vendored
Normal file
71
vendor/github.com/Azure/go-ansiterm/states.go
generated
vendored
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
package ansiterm
|
||||
|
||||
type stateID int
|
||||
|
||||
type state interface {
|
||||
Enter() error
|
||||
Exit() error
|
||||
Handle(byte) (state, error)
|
||||
Name() string
|
||||
Transition(state) error
|
||||
}
|
||||
|
||||
type baseState struct {
|
||||
name string
|
||||
parser *AnsiParser
|
||||
}
|
||||
|
||||
func (base baseState) Enter() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (base baseState) Exit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (base baseState) Handle(b byte) (s state, e error) {
|
||||
|
||||
switch {
|
||||
case b == CSI_ENTRY:
|
||||
return base.parser.csiEntry, nil
|
||||
case b == DCS_ENTRY:
|
||||
return base.parser.dcsEntry, nil
|
||||
case b == ANSI_ESCAPE_PRIMARY:
|
||||
return base.parser.escape, nil
|
||||
case b == OSC_STRING:
|
||||
return base.parser.oscString, nil
|
||||
case sliceContains(toGroundBytes, b):
|
||||
return base.parser.ground, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (base baseState) Name() string {
|
||||
return base.name
|
||||
}
|
||||
|
||||
func (base baseState) Transition(s state) error {
|
||||
if s == base.parser.ground {
|
||||
execBytes := []byte{0x18}
|
||||
execBytes = append(execBytes, 0x1A)
|
||||
execBytes = append(execBytes, getByteRange(0x80, 0x8F)...)
|
||||
execBytes = append(execBytes, getByteRange(0x91, 0x97)...)
|
||||
execBytes = append(execBytes, 0x99)
|
||||
execBytes = append(execBytes, 0x9A)
|
||||
|
||||
if sliceContains(execBytes, base.parser.context.currentChar) {
|
||||
return base.parser.execute()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type dcsEntryState struct {
|
||||
baseState
|
||||
}
|
||||
|
||||
type errorState struct {
|
||||
baseState
|
||||
}
|
||||
21
vendor/github.com/Azure/go-ansiterm/utilities.go
generated
vendored
Normal file
21
vendor/github.com/Azure/go-ansiterm/utilities.go
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
package ansiterm
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func sliceContains(bytes []byte, b byte) bool {
|
||||
for _, v := range bytes {
|
||||
if v == b {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func convertBytesToInteger(bytes []byte) int {
|
||||
s := string(bytes)
|
||||
i, _ := strconv.Atoi(s)
|
||||
return i
|
||||
}
|
||||
182
vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
generated
vendored
Normal file
182
vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
generated
vendored
Normal file
|
|
@ -0,0 +1,182 @@
|
|||
// +build windows
|
||||
|
||||
package winterm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/Azure/go-ansiterm"
|
||||
)
|
||||
|
||||
// Windows keyboard constants
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx.
|
||||
const (
|
||||
VK_PRIOR = 0x21 // PAGE UP key
|
||||
VK_NEXT = 0x22 // PAGE DOWN key
|
||||
VK_END = 0x23 // END key
|
||||
VK_HOME = 0x24 // HOME key
|
||||
VK_LEFT = 0x25 // LEFT ARROW key
|
||||
VK_UP = 0x26 // UP ARROW key
|
||||
VK_RIGHT = 0x27 // RIGHT ARROW key
|
||||
VK_DOWN = 0x28 // DOWN ARROW key
|
||||
VK_SELECT = 0x29 // SELECT key
|
||||
VK_PRINT = 0x2A // PRINT key
|
||||
VK_EXECUTE = 0x2B // EXECUTE key
|
||||
VK_SNAPSHOT = 0x2C // PRINT SCREEN key
|
||||
VK_INSERT = 0x2D // INS key
|
||||
VK_DELETE = 0x2E // DEL key
|
||||
VK_HELP = 0x2F // HELP key
|
||||
VK_F1 = 0x70 // F1 key
|
||||
VK_F2 = 0x71 // F2 key
|
||||
VK_F3 = 0x72 // F3 key
|
||||
VK_F4 = 0x73 // F4 key
|
||||
VK_F5 = 0x74 // F5 key
|
||||
VK_F6 = 0x75 // F6 key
|
||||
VK_F7 = 0x76 // F7 key
|
||||
VK_F8 = 0x77 // F8 key
|
||||
VK_F9 = 0x78 // F9 key
|
||||
VK_F10 = 0x79 // F10 key
|
||||
VK_F11 = 0x7A // F11 key
|
||||
VK_F12 = 0x7B // F12 key
|
||||
|
||||
RIGHT_ALT_PRESSED = 0x0001
|
||||
LEFT_ALT_PRESSED = 0x0002
|
||||
RIGHT_CTRL_PRESSED = 0x0004
|
||||
LEFT_CTRL_PRESSED = 0x0008
|
||||
SHIFT_PRESSED = 0x0010
|
||||
NUMLOCK_ON = 0x0020
|
||||
SCROLLLOCK_ON = 0x0040
|
||||
CAPSLOCK_ON = 0x0080
|
||||
ENHANCED_KEY = 0x0100
|
||||
)
|
||||
|
||||
type ansiCommand struct {
|
||||
CommandBytes []byte
|
||||
Command string
|
||||
Parameters []string
|
||||
IsSpecial bool
|
||||
}
|
||||
|
||||
func newAnsiCommand(command []byte) *ansiCommand {
|
||||
|
||||
if isCharacterSelectionCmdChar(command[1]) {
|
||||
// Is Character Set Selection commands
|
||||
return &ansiCommand{
|
||||
CommandBytes: command,
|
||||
Command: string(command),
|
||||
IsSpecial: true,
|
||||
}
|
||||
}
|
||||
|
||||
// last char is command character
|
||||
lastCharIndex := len(command) - 1
|
||||
|
||||
ac := &ansiCommand{
|
||||
CommandBytes: command,
|
||||
Command: string(command[lastCharIndex]),
|
||||
IsSpecial: false,
|
||||
}
|
||||
|
||||
// more than a single escape
|
||||
if lastCharIndex != 0 {
|
||||
start := 1
|
||||
// skip if double char escape sequence
|
||||
if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY {
|
||||
start++
|
||||
}
|
||||
// convert this to GetNextParam method
|
||||
ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP)
|
||||
}
|
||||
|
||||
return ac
|
||||
}
|
||||
|
||||
func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 {
|
||||
if index < 0 || index >= len(ac.Parameters) {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
param, err := strconv.ParseInt(ac.Parameters[index], 10, 16)
|
||||
if err != nil {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
return int16(param)
|
||||
}
|
||||
|
||||
func (ac *ansiCommand) String() string {
|
||||
return fmt.Sprintf("0x%v \"%v\" (\"%v\")",
|
||||
bytesToHex(ac.CommandBytes),
|
||||
ac.Command,
|
||||
strings.Join(ac.Parameters, "\",\""))
|
||||
}
|
||||
|
||||
// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands.
|
||||
// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html.
|
||||
func isAnsiCommandChar(b byte) bool {
|
||||
switch {
|
||||
case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY:
|
||||
return true
|
||||
case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM:
|
||||
// non-CSI escape sequence terminator
|
||||
return true
|
||||
case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL:
|
||||
// String escape sequence terminator
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isXtermOscSequence(command []byte, current byte) bool {
|
||||
return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL)
|
||||
}
|
||||
|
||||
func isCharacterSelectionCmdChar(b byte) bool {
|
||||
return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3)
|
||||
}
|
||||
|
||||
// bytesToHex converts a slice of bytes to a human-readable string.
|
||||
func bytesToHex(b []byte) string {
|
||||
hex := make([]string, len(b))
|
||||
for i, ch := range b {
|
||||
hex[i] = fmt.Sprintf("%X", ch)
|
||||
}
|
||||
return strings.Join(hex, "")
|
||||
}
|
||||
|
||||
// ensureInRange adjusts the passed value, if necessary, to ensure it is within
|
||||
// the passed min / max range.
|
||||
func ensureInRange(n int16, min int16, max int16) int16 {
|
||||
if n < min {
|
||||
return min
|
||||
} else if n > max {
|
||||
return max
|
||||
} else {
|
||||
return n
|
||||
}
|
||||
}
|
||||
|
||||
func GetStdFile(nFile int) (*os.File, uintptr) {
|
||||
var file *os.File
|
||||
switch nFile {
|
||||
case syscall.STD_INPUT_HANDLE:
|
||||
file = os.Stdin
|
||||
case syscall.STD_OUTPUT_HANDLE:
|
||||
file = os.Stdout
|
||||
case syscall.STD_ERROR_HANDLE:
|
||||
file = os.Stderr
|
||||
default:
|
||||
panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile))
|
||||
}
|
||||
|
||||
fd, err := syscall.GetStdHandle(nFile)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err))
|
||||
}
|
||||
|
||||
return file, uintptr(fd)
|
||||
}
|
||||
327
vendor/github.com/Azure/go-ansiterm/winterm/api.go
generated
vendored
Normal file
327
vendor/github.com/Azure/go-ansiterm/winterm/api.go
generated
vendored
Normal file
|
|
@ -0,0 +1,327 @@
|
|||
// +build windows
|
||||
|
||||
package winterm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//===========================================================================================================
|
||||
// IMPORTANT NOTE:
|
||||
//
|
||||
// The methods below make extensive use of the "unsafe" package to obtain the required pointers.
|
||||
// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack
|
||||
// variables) the pointers reference *before* the API completes.
|
||||
//
|
||||
// As a result, in those cases, the code must hint that the variables remain in active by invoking the
|
||||
// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer
|
||||
// require unsafe pointers.
|
||||
//
|
||||
// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform
|
||||
// the garbage collector the variables remain in use if:
|
||||
//
|
||||
// -- The value is not a pointer (e.g., int32, struct)
|
||||
// -- The value is not referenced by the method after passing the pointer to Windows
|
||||
//
|
||||
// See http://golang.org/doc/go1.3.
|
||||
//===========================================================================================================
|
||||
|
||||
var (
|
||||
kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
|
||||
|
||||
getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo")
|
||||
setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo")
|
||||
setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition")
|
||||
setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode")
|
||||
getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo")
|
||||
setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize")
|
||||
scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA")
|
||||
setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute")
|
||||
setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo")
|
||||
writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW")
|
||||
readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW")
|
||||
waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject")
|
||||
)
|
||||
|
||||
// Windows Console constants
|
||||
const (
|
||||
// Console modes
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
|
||||
ENABLE_PROCESSED_INPUT = 0x0001
|
||||
ENABLE_LINE_INPUT = 0x0002
|
||||
ENABLE_ECHO_INPUT = 0x0004
|
||||
ENABLE_WINDOW_INPUT = 0x0008
|
||||
ENABLE_MOUSE_INPUT = 0x0010
|
||||
ENABLE_INSERT_MODE = 0x0020
|
||||
ENABLE_QUICK_EDIT_MODE = 0x0040
|
||||
ENABLE_EXTENDED_FLAGS = 0x0080
|
||||
ENABLE_AUTO_POSITION = 0x0100
|
||||
ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200
|
||||
|
||||
ENABLE_PROCESSED_OUTPUT = 0x0001
|
||||
ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
|
||||
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
|
||||
DISABLE_NEWLINE_AUTO_RETURN = 0x0008
|
||||
ENABLE_LVB_GRID_WORLDWIDE = 0x0010
|
||||
|
||||
// Character attributes
|
||||
// Note:
|
||||
// -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan).
|
||||
// Clearing all foreground or background colors results in black; setting all creates white.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes.
|
||||
FOREGROUND_BLUE uint16 = 0x0001
|
||||
FOREGROUND_GREEN uint16 = 0x0002
|
||||
FOREGROUND_RED uint16 = 0x0004
|
||||
FOREGROUND_INTENSITY uint16 = 0x0008
|
||||
FOREGROUND_MASK uint16 = 0x000F
|
||||
|
||||
BACKGROUND_BLUE uint16 = 0x0010
|
||||
BACKGROUND_GREEN uint16 = 0x0020
|
||||
BACKGROUND_RED uint16 = 0x0040
|
||||
BACKGROUND_INTENSITY uint16 = 0x0080
|
||||
BACKGROUND_MASK uint16 = 0x00F0
|
||||
|
||||
COMMON_LVB_MASK uint16 = 0xFF00
|
||||
COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000
|
||||
COMMON_LVB_UNDERSCORE uint16 = 0x8000
|
||||
|
||||
// Input event types
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
|
||||
KEY_EVENT = 0x0001
|
||||
MOUSE_EVENT = 0x0002
|
||||
WINDOW_BUFFER_SIZE_EVENT = 0x0004
|
||||
MENU_EVENT = 0x0008
|
||||
FOCUS_EVENT = 0x0010
|
||||
|
||||
// WaitForSingleObject return codes
|
||||
WAIT_ABANDONED = 0x00000080
|
||||
WAIT_FAILED = 0xFFFFFFFF
|
||||
WAIT_SIGNALED = 0x0000000
|
||||
WAIT_TIMEOUT = 0x00000102
|
||||
|
||||
// WaitForSingleObject wait duration
|
||||
WAIT_INFINITE = 0xFFFFFFFF
|
||||
WAIT_ONE_SECOND = 1000
|
||||
WAIT_HALF_SECOND = 500
|
||||
WAIT_QUARTER_SECOND = 250
|
||||
)
|
||||
|
||||
// Windows API Console types
|
||||
// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD)
|
||||
// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment
|
||||
type (
|
||||
CHAR_INFO struct {
|
||||
UnicodeChar uint16
|
||||
Attributes uint16
|
||||
}
|
||||
|
||||
CONSOLE_CURSOR_INFO struct {
|
||||
Size uint32
|
||||
Visible int32
|
||||
}
|
||||
|
||||
CONSOLE_SCREEN_BUFFER_INFO struct {
|
||||
Size COORD
|
||||
CursorPosition COORD
|
||||
Attributes uint16
|
||||
Window SMALL_RECT
|
||||
MaximumWindowSize COORD
|
||||
}
|
||||
|
||||
COORD struct {
|
||||
X int16
|
||||
Y int16
|
||||
}
|
||||
|
||||
SMALL_RECT struct {
|
||||
Left int16
|
||||
Top int16
|
||||
Right int16
|
||||
Bottom int16
|
||||
}
|
||||
|
||||
// INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
|
||||
INPUT_RECORD struct {
|
||||
EventType uint16
|
||||
KeyEvent KEY_EVENT_RECORD
|
||||
}
|
||||
|
||||
KEY_EVENT_RECORD struct {
|
||||
KeyDown int32
|
||||
RepeatCount uint16
|
||||
VirtualKeyCode uint16
|
||||
VirtualScanCode uint16
|
||||
UnicodeChar uint16
|
||||
ControlKeyState uint32
|
||||
}
|
||||
|
||||
WINDOW_BUFFER_SIZE struct {
|
||||
Size COORD
|
||||
}
|
||||
)
|
||||
|
||||
// boolToBOOL converts a Go bool into a Windows int32.
|
||||
func boolToBOOL(f bool) int32 {
|
||||
if f {
|
||||
return int32(1)
|
||||
} else {
|
||||
return int32(0)
|
||||
}
|
||||
}
|
||||
|
||||
// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx.
|
||||
func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
|
||||
r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// SetConsoleCursorInfo sets the size and visiblity of the console cursor.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx.
|
||||
func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
|
||||
r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// SetConsoleCursorPosition location of the console cursor.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx.
|
||||
func SetConsoleCursorPosition(handle uintptr, coord COORD) error {
|
||||
r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord))
|
||||
use(coord)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// GetConsoleMode gets the console mode for given file descriptor
|
||||
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx.
|
||||
func GetConsoleMode(handle uintptr) (mode uint32, err error) {
|
||||
err = syscall.GetConsoleMode(syscall.Handle(handle), &mode)
|
||||
return mode, err
|
||||
}
|
||||
|
||||
// SetConsoleMode sets the console mode for given file descriptor
|
||||
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
|
||||
func SetConsoleMode(handle uintptr, mode uint32) error {
|
||||
r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0)
|
||||
use(mode)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer.
|
||||
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx.
|
||||
func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) {
|
||||
info := CONSOLE_SCREEN_BUFFER_INFO{}
|
||||
err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error {
|
||||
r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char)))
|
||||
use(scrollRect)
|
||||
use(clipRect)
|
||||
use(destOrigin)
|
||||
use(char)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// SetConsoleScreenBufferSize sets the size of the console screen buffer.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx.
|
||||
func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error {
|
||||
r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord))
|
||||
use(coord)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// SetConsoleTextAttribute sets the attributes of characters written to the
|
||||
// console screen buffer by the WriteFile or WriteConsole function.
|
||||
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx.
|
||||
func SetConsoleTextAttribute(handle uintptr, attribute uint16) error {
|
||||
r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)
|
||||
use(attribute)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// SetConsoleWindowInfo sets the size and position of the console screen buffer's window.
|
||||
// Note that the size and location must be within and no larger than the backing console screen buffer.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx.
|
||||
func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error {
|
||||
r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect)))
|
||||
use(isAbsolute)
|
||||
use(rect)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx.
|
||||
func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error {
|
||||
r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))
|
||||
use(buffer)
|
||||
use(bufferSize)
|
||||
use(bufferCoord)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// ReadConsoleInput reads (and removes) data from the console input buffer.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx.
|
||||
func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error {
|
||||
r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count)))
|
||||
use(buffer)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// WaitForSingleObject waits for the passed handle to be signaled.
|
||||
// It returns true if the handle was signaled; false otherwise.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx.
|
||||
func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) {
|
||||
r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait)))
|
||||
switch r1 {
|
||||
case WAIT_ABANDONED, WAIT_TIMEOUT:
|
||||
return false, nil
|
||||
case WAIT_SIGNALED:
|
||||
return true, nil
|
||||
}
|
||||
use(msWait)
|
||||
return false, err
|
||||
}
|
||||
|
||||
// String helpers
|
||||
func (info CONSOLE_SCREEN_BUFFER_INFO) String() string {
|
||||
return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize)
|
||||
}
|
||||
|
||||
func (coord COORD) String() string {
|
||||
return fmt.Sprintf("%v,%v", coord.X, coord.Y)
|
||||
}
|
||||
|
||||
func (rect SMALL_RECT) String() string {
|
||||
return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom)
|
||||
}
|
||||
|
||||
// checkError evaluates the results of a Windows API call and returns the error if it failed.
|
||||
func checkError(r1, r2 uintptr, err error) error {
|
||||
// Windows APIs return non-zero to indicate success
|
||||
if r1 != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return the error if provided, otherwise default to EINVAL
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return syscall.EINVAL
|
||||
}
|
||||
|
||||
// coordToPointer converts a COORD into a uintptr (by fooling the type system).
|
||||
func coordToPointer(c COORD) uintptr {
|
||||
// Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass.
|
||||
return uintptr(*((*uint32)(unsafe.Pointer(&c))))
|
||||
}
|
||||
|
||||
// use is a no-op, but the compiler cannot see that it is.
|
||||
// Calling use(p) ensures that p is kept live until that point.
|
||||
func use(p interface{}) {}
|
||||
100
vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
generated
vendored
Normal file
100
vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
generated
vendored
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
// +build windows
|
||||
|
||||
package winterm
|
||||
|
||||
import "github.com/Azure/go-ansiterm"
|
||||
|
||||
const (
|
||||
FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
|
||||
BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
|
||||
)
|
||||
|
||||
// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the
|
||||
// request represented by the passed ANSI mode.
|
||||
func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) {
|
||||
switch ansiMode {
|
||||
|
||||
// Mode styles
|
||||
case ansiterm.ANSI_SGR_BOLD:
|
||||
windowsMode = windowsMode | FOREGROUND_INTENSITY
|
||||
|
||||
case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF:
|
||||
windowsMode &^= FOREGROUND_INTENSITY
|
||||
|
||||
case ansiterm.ANSI_SGR_UNDERLINE:
|
||||
windowsMode = windowsMode | COMMON_LVB_UNDERSCORE
|
||||
|
||||
case ansiterm.ANSI_SGR_REVERSE:
|
||||
inverted = true
|
||||
|
||||
case ansiterm.ANSI_SGR_REVERSE_OFF:
|
||||
inverted = false
|
||||
|
||||
case ansiterm.ANSI_SGR_UNDERLINE_OFF:
|
||||
windowsMode &^= COMMON_LVB_UNDERSCORE
|
||||
|
||||
// Foreground colors
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK)
|
||||
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_BLACK:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK)
|
||||
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_RED:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED
|
||||
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_GREEN:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN
|
||||
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_YELLOW:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN
|
||||
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_BLUE:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE
|
||||
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE
|
||||
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_CYAN:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE
|
||||
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_WHITE:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
|
||||
|
||||
// Background colors
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT:
|
||||
// Black with no intensity
|
||||
windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK)
|
||||
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_BLACK:
|
||||
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK)
|
||||
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_RED:
|
||||
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED
|
||||
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_GREEN:
|
||||
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN
|
||||
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_YELLOW:
|
||||
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN
|
||||
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_BLUE:
|
||||
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE
|
||||
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA:
|
||||
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE
|
||||
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_CYAN:
|
||||
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE
|
||||
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_WHITE:
|
||||
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
|
||||
}
|
||||
|
||||
return windowsMode, inverted
|
||||
}
|
||||
|
||||
// invertAttributes inverts the foreground and background colors of a Windows attributes value
|
||||
func invertAttributes(windowsMode uint16) uint16 {
|
||||
return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4)
|
||||
}
|
||||
101
vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
generated
vendored
Normal file
101
vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
generated
vendored
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
// +build windows
|
||||
|
||||
package winterm
|
||||
|
||||
const (
|
||||
horizontal = iota
|
||||
vertical
|
||||
)
|
||||
|
||||
func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT {
|
||||
if h.originMode {
|
||||
sr := h.effectiveSr(info.Window)
|
||||
return SMALL_RECT{
|
||||
Top: sr.top,
|
||||
Bottom: sr.bottom,
|
||||
Left: 0,
|
||||
Right: info.Size.X - 1,
|
||||
}
|
||||
} else {
|
||||
return SMALL_RECT{
|
||||
Top: info.Window.Top,
|
||||
Bottom: info.Window.Bottom,
|
||||
Left: 0,
|
||||
Right: info.Size.X - 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setCursorPosition sets the cursor to the specified position, bounded to the screen size
|
||||
func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error {
|
||||
position.X = ensureInRange(position.X, window.Left, window.Right)
|
||||
position.Y = ensureInRange(position.Y, window.Top, window.Bottom)
|
||||
err := SetConsoleCursorPosition(h.fd, position)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("Cursor position set: (%d, %d)", position.X, position.Y)
|
||||
return err
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error {
|
||||
return h.moveCursor(vertical, param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error {
|
||||
return h.moveCursor(horizontal, param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error {
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
position := info.CursorPosition
|
||||
switch moveMode {
|
||||
case horizontal:
|
||||
position.X += int16(param)
|
||||
case vertical:
|
||||
position.Y += int16(param)
|
||||
}
|
||||
|
||||
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) moveCursorLine(param int) error {
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
position := info.CursorPosition
|
||||
position.X = 0
|
||||
position.Y += int16(param)
|
||||
|
||||
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error {
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
position := info.CursorPosition
|
||||
position.X = int16(param) - 1
|
||||
|
||||
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
84
vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
generated
vendored
Normal file
84
vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
generated
vendored
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
// +build windows
|
||||
|
||||
package winterm
|
||||
|
||||
import "github.com/Azure/go-ansiterm"
|
||||
|
||||
func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error {
|
||||
// Ignore an invalid (negative area) request
|
||||
if toCoord.Y < fromCoord.Y {
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
var coordStart = COORD{}
|
||||
var coordEnd = COORD{}
|
||||
|
||||
xCurrent, yCurrent := fromCoord.X, fromCoord.Y
|
||||
xEnd, yEnd := toCoord.X, toCoord.Y
|
||||
|
||||
// Clear any partial initial line
|
||||
if xCurrent > 0 {
|
||||
coordStart.X, coordStart.Y = xCurrent, yCurrent
|
||||
coordEnd.X, coordEnd.Y = xEnd, yCurrent
|
||||
|
||||
err = h.clearRect(attributes, coordStart, coordEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
xCurrent = 0
|
||||
yCurrent += 1
|
||||
}
|
||||
|
||||
// Clear intervening rectangular section
|
||||
if yCurrent < yEnd {
|
||||
coordStart.X, coordStart.Y = xCurrent, yCurrent
|
||||
coordEnd.X, coordEnd.Y = xEnd, yEnd-1
|
||||
|
||||
err = h.clearRect(attributes, coordStart, coordEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
xCurrent = 0
|
||||
yCurrent = yEnd
|
||||
}
|
||||
|
||||
// Clear remaining partial ending line
|
||||
coordStart.X, coordStart.Y = xCurrent, yCurrent
|
||||
coordEnd.X, coordEnd.Y = xEnd, yEnd
|
||||
|
||||
err = h.clearRect(attributes, coordStart, coordEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error {
|
||||
region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X}
|
||||
width := toCoord.X - fromCoord.X + 1
|
||||
height := toCoord.Y - fromCoord.Y + 1
|
||||
size := uint32(width) * uint32(height)
|
||||
|
||||
if size <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
buffer := make([]CHAR_INFO, size)
|
||||
|
||||
char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes}
|
||||
for i := 0; i < int(size); i++ {
|
||||
buffer[i] = char
|
||||
}
|
||||
|
||||
err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
118
vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
generated
vendored
Normal file
118
vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
generated
vendored
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
// +build windows
|
||||
|
||||
package winterm
|
||||
|
||||
// effectiveSr gets the current effective scroll region in buffer coordinates
|
||||
func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion {
|
||||
top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom)
|
||||
bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom)
|
||||
if top >= bottom {
|
||||
top = window.Top
|
||||
bottom = window.Bottom
|
||||
}
|
||||
return scrollRegion{top: top, bottom: bottom}
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) scrollUp(param int) error {
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sr := h.effectiveSr(info.Window)
|
||||
return h.scroll(param, sr, info)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) scrollDown(param int) error {
|
||||
return h.scrollUp(-param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) deleteLines(param int) error {
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
start := info.CursorPosition.Y
|
||||
sr := h.effectiveSr(info.Window)
|
||||
// Lines cannot be inserted or deleted outside the scrolling region.
|
||||
if start >= sr.top && start <= sr.bottom {
|
||||
sr.top = start
|
||||
return h.scroll(param, sr, info)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) insertLines(param int) error {
|
||||
return h.deleteLines(-param)
|
||||
}
|
||||
|
||||
// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates.
|
||||
func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error {
|
||||
h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
|
||||
h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
|
||||
|
||||
// Copy from and clip to the scroll region (full buffer width)
|
||||
scrollRect := SMALL_RECT{
|
||||
Top: sr.top,
|
||||
Bottom: sr.bottom,
|
||||
Left: 0,
|
||||
Right: info.Size.X - 1,
|
||||
}
|
||||
|
||||
// Origin to which area should be copied
|
||||
destOrigin := COORD{
|
||||
X: 0,
|
||||
Y: sr.top - int16(param),
|
||||
}
|
||||
|
||||
char := CHAR_INFO{
|
||||
UnicodeChar: ' ',
|
||||
Attributes: h.attributes,
|
||||
}
|
||||
|
||||
if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) deleteCharacters(param int) error {
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return h.scrollLine(param, info.CursorPosition, info)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) insertCharacters(param int) error {
|
||||
return h.deleteCharacters(-param)
|
||||
}
|
||||
|
||||
// scrollLine scrolls a line horizontally starting at the provided position by a number of columns.
|
||||
func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error {
|
||||
// Copy from and clip to the scroll region (full buffer width)
|
||||
scrollRect := SMALL_RECT{
|
||||
Top: position.Y,
|
||||
Bottom: position.Y,
|
||||
Left: position.X,
|
||||
Right: info.Size.X - 1,
|
||||
}
|
||||
|
||||
// Origin to which area should be copied
|
||||
destOrigin := COORD{
|
||||
X: position.X - int16(columns),
|
||||
Y: position.Y,
|
||||
}
|
||||
|
||||
char := CHAR_INFO{
|
||||
UnicodeChar: ' ',
|
||||
Attributes: h.attributes,
|
||||
}
|
||||
|
||||
if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
9
vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
generated
vendored
Normal file
9
vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
generated
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
// +build windows
|
||||
|
||||
package winterm
|
||||
|
||||
// AddInRange increments a value by the passed quantity while ensuring the values
|
||||
// always remain within the supplied min / max range.
|
||||
func addInRange(n int16, increment int16, min int16, max int16) int16 {
|
||||
return ensureInRange(n+increment, min, max)
|
||||
}
|
||||
743
vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
generated
vendored
Normal file
743
vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
generated
vendored
Normal file
|
|
@ -0,0 +1,743 @@
|
|||
// +build windows
|
||||
|
||||
package winterm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/Azure/go-ansiterm"
|
||||
)
|
||||
|
||||
type windowsAnsiEventHandler struct {
|
||||
fd uintptr
|
||||
file *os.File
|
||||
infoReset *CONSOLE_SCREEN_BUFFER_INFO
|
||||
sr scrollRegion
|
||||
buffer bytes.Buffer
|
||||
attributes uint16
|
||||
inverted bool
|
||||
wrapNext bool
|
||||
drewMarginByte bool
|
||||
originMode bool
|
||||
marginByte byte
|
||||
curInfo *CONSOLE_SCREEN_BUFFER_INFO
|
||||
curPos COORD
|
||||
logf func(string, ...interface{})
|
||||
}
|
||||
|
||||
type Option func(*windowsAnsiEventHandler)
|
||||
|
||||
func WithLogf(f func(string, ...interface{})) Option {
|
||||
return func(w *windowsAnsiEventHandler) {
|
||||
w.logf = f
|
||||
}
|
||||
}
|
||||
|
||||
func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler {
|
||||
infoReset, err := GetConsoleScreenBufferInfo(fd)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
h := &windowsAnsiEventHandler{
|
||||
fd: fd,
|
||||
file: file,
|
||||
infoReset: infoReset,
|
||||
attributes: infoReset.Attributes,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(h)
|
||||
}
|
||||
|
||||
if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
|
||||
logFile, _ := os.Create("winEventHandler.log")
|
||||
logger := log.New(logFile, "", log.LstdFlags)
|
||||
if h.logf != nil {
|
||||
l := h.logf
|
||||
h.logf = func(s string, v ...interface{}) {
|
||||
l(s, v...)
|
||||
logger.Printf(s, v...)
|
||||
}
|
||||
} else {
|
||||
h.logf = logger.Printf
|
||||
}
|
||||
}
|
||||
|
||||
if h.logf == nil {
|
||||
h.logf = func(string, ...interface{}) {}
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
type scrollRegion struct {
|
||||
top int16
|
||||
bottom int16
|
||||
}
|
||||
|
||||
// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the
|
||||
// current cursor position and scroll region settings, in which case it returns
|
||||
// true. If no special handling is necessary, then it does nothing and returns
|
||||
// false.
|
||||
//
|
||||
// In the false case, the caller should ensure that a carriage return
|
||||
// and line feed are inserted or that the text is otherwise wrapped.
|
||||
func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) {
|
||||
if h.wrapNext {
|
||||
if err := h.Flush(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
h.clearWrap()
|
||||
}
|
||||
pos, info, err := h.getCurrentInfo()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sr := h.effectiveSr(info.Window)
|
||||
if pos.Y == sr.bottom {
|
||||
// Scrolling is necessary. Let Windows automatically scroll if the scrolling region
|
||||
// is the full window.
|
||||
if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom {
|
||||
if includeCR {
|
||||
pos.X = 0
|
||||
h.updatePos(pos)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A custom scroll region is active. Scroll the window manually to simulate
|
||||
// the LF.
|
||||
if err := h.Flush(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
h.logf("Simulating LF inside scroll region")
|
||||
if err := h.scrollUp(1); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if includeCR {
|
||||
pos.X = 0
|
||||
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
|
||||
} else if pos.Y < info.Window.Bottom {
|
||||
// Let Windows handle the LF.
|
||||
pos.Y++
|
||||
if includeCR {
|
||||
pos.X = 0
|
||||
}
|
||||
h.updatePos(pos)
|
||||
return false, nil
|
||||
} else {
|
||||
// The cursor is at the bottom of the screen but outside the scroll
|
||||
// region. Skip the LF.
|
||||
h.logf("Simulating LF outside scroll region")
|
||||
if includeCR {
|
||||
if err := h.Flush(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
pos.X = 0
|
||||
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// executeLF executes a LF without a CR.
|
||||
func (h *windowsAnsiEventHandler) executeLF() error {
|
||||
handled, err := h.simulateLF(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !handled {
|
||||
// Windows LF will reset the cursor column position. Write the LF
|
||||
// and restore the cursor position.
|
||||
pos, _, err := h.getCurrentInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
|
||||
if pos.X != 0 {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("Resetting cursor position for LF without CR")
|
||||
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) Print(b byte) error {
|
||||
if h.wrapNext {
|
||||
h.buffer.WriteByte(h.marginByte)
|
||||
h.clearWrap()
|
||||
if _, err := h.simulateLF(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
pos, info, err := h.getCurrentInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pos.X == info.Size.X-1 {
|
||||
h.wrapNext = true
|
||||
h.marginByte = b
|
||||
} else {
|
||||
pos.X++
|
||||
h.updatePos(pos)
|
||||
h.buffer.WriteByte(b)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) Execute(b byte) error {
|
||||
switch b {
|
||||
case ansiterm.ANSI_TAB:
|
||||
h.logf("Execute(TAB)")
|
||||
// Move to the next tab stop, but preserve auto-wrap if already set.
|
||||
if !h.wrapNext {
|
||||
pos, info, err := h.getCurrentInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pos.X = (pos.X + 8) - pos.X%8
|
||||
if pos.X >= info.Size.X {
|
||||
pos.X = info.Size.X - 1
|
||||
}
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
case ansiterm.ANSI_BEL:
|
||||
h.buffer.WriteByte(ansiterm.ANSI_BEL)
|
||||
return nil
|
||||
|
||||
case ansiterm.ANSI_BACKSPACE:
|
||||
if h.wrapNext {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.clearWrap()
|
||||
}
|
||||
pos, _, err := h.getCurrentInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pos.X > 0 {
|
||||
pos.X--
|
||||
h.updatePos(pos)
|
||||
h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE)
|
||||
}
|
||||
return nil
|
||||
|
||||
case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED:
|
||||
// Treat as true LF.
|
||||
return h.executeLF()
|
||||
|
||||
case ansiterm.ANSI_LINE_FEED:
|
||||
// Simulate a CR and LF for now since there is no way in go-ansiterm
|
||||
// to tell if the LF should include CR (and more things break when it's
|
||||
// missing than when it's incorrectly added).
|
||||
handled, err := h.simulateLF(true)
|
||||
if handled || err != nil {
|
||||
return err
|
||||
}
|
||||
return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
|
||||
|
||||
case ansiterm.ANSI_CARRIAGE_RETURN:
|
||||
if h.wrapNext {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.clearWrap()
|
||||
}
|
||||
pos, _, err := h.getCurrentInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pos.X != 0 {
|
||||
pos.X = 0
|
||||
h.updatePos(pos)
|
||||
h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN)
|
||||
}
|
||||
return nil
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) CUU(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("CUU: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.moveCursorVertical(-param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) CUD(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("CUD: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.moveCursorVertical(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) CUF(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("CUF: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.moveCursorHorizontal(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) CUB(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("CUB: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.moveCursorHorizontal(-param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) CNL(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("CNL: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.moveCursorLine(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) CPL(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("CPL: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.moveCursorLine(-param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) CHA(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("CHA: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.moveCursorColumn(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) VPA(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("VPA: [[%d]]", param)
|
||||
h.clearWrap()
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
window := h.getCursorWindow(info)
|
||||
position := info.CursorPosition
|
||||
position.Y = window.Top + int16(param) - 1
|
||||
return h.setCursorPosition(position, window)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) CUP(row int, col int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("CUP: [[%d %d]]", row, col)
|
||||
h.clearWrap()
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
window := h.getCursorWindow(info)
|
||||
position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1}
|
||||
return h.setCursorPosition(position, window)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) HVP(row int, col int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("HVP: [[%d %d]]", row, col)
|
||||
h.clearWrap()
|
||||
return h.CUP(row, col)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
|
||||
h.clearWrap()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) DECOM(enable bool) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)})
|
||||
h.clearWrap()
|
||||
h.originMode = enable
|
||||
return h.CUP(1, 1)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
|
||||
h.clearWrap()
|
||||
if err := h.ED(2); err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
targetWidth := int16(80)
|
||||
if use132 {
|
||||
targetWidth = 132
|
||||
}
|
||||
if info.Size.X < targetWidth {
|
||||
if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
|
||||
h.logf("set buffer failed: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
window := info.Window
|
||||
window.Left = 0
|
||||
window.Right = targetWidth - 1
|
||||
if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
|
||||
h.logf("set window failed: %v", err)
|
||||
return err
|
||||
}
|
||||
if info.Size.X > targetWidth {
|
||||
if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
|
||||
h.logf("set buffer failed: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return SetConsoleCursorPosition(h.fd, COORD{0, 0})
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) ED(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("ED: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
|
||||
// [J -- Erases from the cursor to the end of the screen, including the cursor position.
|
||||
// [1J -- Erases from the beginning of the screen to the cursor, including the cursor position.
|
||||
// [2J -- Erases the complete display. The cursor does not move.
|
||||
// Notes:
|
||||
// -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles
|
||||
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var start COORD
|
||||
var end COORD
|
||||
|
||||
switch param {
|
||||
case 0:
|
||||
start = info.CursorPosition
|
||||
end = COORD{info.Size.X - 1, info.Size.Y - 1}
|
||||
|
||||
case 1:
|
||||
start = COORD{0, 0}
|
||||
end = info.CursorPosition
|
||||
|
||||
case 2:
|
||||
start = COORD{0, 0}
|
||||
end = COORD{info.Size.X - 1, info.Size.Y - 1}
|
||||
}
|
||||
|
||||
err = h.clearRange(h.attributes, start, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the whole buffer was cleared, move the window to the top while preserving
|
||||
// the window-relative cursor position.
|
||||
if param == 2 {
|
||||
pos := info.CursorPosition
|
||||
window := info.Window
|
||||
pos.Y -= window.Top
|
||||
window.Bottom -= window.Top
|
||||
window.Top = 0
|
||||
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) EL(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("EL: [%v]", strconv.Itoa(param))
|
||||
h.clearWrap()
|
||||
|
||||
// [K -- Erases from the cursor to the end of the line, including the cursor position.
|
||||
// [1K -- Erases from the beginning of the line to the cursor, including the cursor position.
|
||||
// [2K -- Erases the complete line.
|
||||
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var start COORD
|
||||
var end COORD
|
||||
|
||||
switch param {
|
||||
case 0:
|
||||
start = info.CursorPosition
|
||||
end = COORD{info.Size.X, info.CursorPosition.Y}
|
||||
|
||||
case 1:
|
||||
start = COORD{0, info.CursorPosition.Y}
|
||||
end = info.CursorPosition
|
||||
|
||||
case 2:
|
||||
start = COORD{0, info.CursorPosition.Y}
|
||||
end = COORD{info.Size.X, info.CursorPosition.Y}
|
||||
}
|
||||
|
||||
err = h.clearRange(h.attributes, start, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) IL(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("IL: [%v]", strconv.Itoa(param))
|
||||
h.clearWrap()
|
||||
return h.insertLines(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) DL(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("DL: [%v]", strconv.Itoa(param))
|
||||
h.clearWrap()
|
||||
return h.deleteLines(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) ICH(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("ICH: [%v]", strconv.Itoa(param))
|
||||
h.clearWrap()
|
||||
return h.insertCharacters(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) DCH(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("DCH: [%v]", strconv.Itoa(param))
|
||||
h.clearWrap()
|
||||
return h.deleteCharacters(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) SGR(params []int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
strings := []string{}
|
||||
for _, v := range params {
|
||||
strings = append(strings, strconv.Itoa(v))
|
||||
}
|
||||
|
||||
h.logf("SGR: [%v]", strings)
|
||||
|
||||
if len(params) <= 0 {
|
||||
h.attributes = h.infoReset.Attributes
|
||||
h.inverted = false
|
||||
} else {
|
||||
for _, attr := range params {
|
||||
|
||||
if attr == ansiterm.ANSI_SGR_RESET {
|
||||
h.attributes = h.infoReset.Attributes
|
||||
h.inverted = false
|
||||
continue
|
||||
}
|
||||
|
||||
h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr))
|
||||
}
|
||||
}
|
||||
|
||||
attributes := h.attributes
|
||||
if h.inverted {
|
||||
attributes = invertAttributes(attributes)
|
||||
}
|
||||
err := SetConsoleTextAttribute(h.fd, attributes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) SU(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("SU: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.scrollUp(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) SD(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("SD: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.scrollDown(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) DA(params []string) error {
|
||||
h.logf("DA: [%v]", params)
|
||||
// DA cannot be implemented because it must send data on the VT100 input stream,
|
||||
// which is not available to go-ansiterm.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("DECSTBM: [%d, %d]", top, bottom)
|
||||
|
||||
// Windows is 0 indexed, Linux is 1 indexed
|
||||
h.sr.top = int16(top - 1)
|
||||
h.sr.bottom = int16(bottom - 1)
|
||||
|
||||
// This command also moves the cursor to the origin.
|
||||
h.clearWrap()
|
||||
return h.CUP(1, 1)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) RI() error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("RI: []")
|
||||
h.clearWrap()
|
||||
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sr := h.effectiveSr(info.Window)
|
||||
if info.CursorPosition.Y == sr.top {
|
||||
return h.scrollDown(1)
|
||||
}
|
||||
|
||||
return h.moveCursorVertical(-1)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) IND() error {
|
||||
h.logf("IND: []")
|
||||
return h.executeLF()
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) Flush() error {
|
||||
h.curInfo = nil
|
||||
if h.buffer.Len() > 0 {
|
||||
h.logf("Flush: [%s]", h.buffer.Bytes())
|
||||
if _, err := h.buffer.WriteTo(h.file); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if h.wrapNext && !h.drewMarginByte {
|
||||
h.logf("Flush: drawing margin byte '%c'", h.marginByte)
|
||||
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}}
|
||||
size := COORD{1, 1}
|
||||
position := COORD{0, 0}
|
||||
region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y}
|
||||
if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil {
|
||||
return err
|
||||
}
|
||||
h.drewMarginByte = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// cacheConsoleInfo ensures that the current console screen information has been queried
|
||||
// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos.
|
||||
func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) {
|
||||
if h.curInfo == nil {
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return COORD{}, nil, err
|
||||
}
|
||||
h.curInfo = info
|
||||
h.curPos = info.CursorPosition
|
||||
}
|
||||
return h.curPos, h.curInfo, nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) updatePos(pos COORD) {
|
||||
if h.curInfo == nil {
|
||||
panic("failed to call getCurrentInfo before calling updatePos")
|
||||
}
|
||||
h.curPos = pos
|
||||
}
|
||||
|
||||
// clearWrap clears the state where the cursor is in the margin
|
||||
// waiting for the next character before wrapping the line. This must
|
||||
// be done before most operations that act on the cursor.
|
||||
func (h *windowsAnsiEventHandler) clearWrap() {
|
||||
h.wrapNext = false
|
||||
h.drewMarginByte = false
|
||||
}
|
||||
18
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
18
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
|
|
@ -16,6 +16,7 @@ import (
|
|||
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
||||
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||
//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
|
||||
|
||||
type atomicBool int32
|
||||
|
||||
|
|
@ -79,6 +80,7 @@ type win32File struct {
|
|||
wg sync.WaitGroup
|
||||
wgLock sync.RWMutex
|
||||
closing atomicBool
|
||||
socket bool
|
||||
readDeadline deadlineHandler
|
||||
writeDeadline deadlineHandler
|
||||
}
|
||||
|
|
@ -109,7 +111,13 @@ func makeWin32File(h syscall.Handle) (*win32File, error) {
|
|||
}
|
||||
|
||||
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
||||
return makeWin32File(h)
|
||||
// If we return the result of makeWin32File directly, it can result in an
|
||||
// interface-wrapped nil, rather than a nil interface value.
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// closeHandle closes the resources associated with a Win32 handle
|
||||
|
|
@ -190,6 +198,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
|||
if f.closing.isSet() {
|
||||
err = ErrFileClosed
|
||||
}
|
||||
} else if err != nil && f.socket {
|
||||
// err is from Win32. Query the overlapped structure to get the winsock error.
|
||||
var bytes, flags uint32
|
||||
err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
|
||||
}
|
||||
case <-timeout:
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
|
|
@ -265,6 +277,10 @@ func (f *win32File) Flush() error {
|
|||
return syscall.FlushFileBuffers(f.handle)
|
||||
}
|
||||
|
||||
func (f *win32File) Fd() uintptr {
|
||||
return uintptr(f.handle)
|
||||
}
|
||||
|
||||
func (d *deadlineHandler) set(deadline time.Time) error {
|
||||
d.setLock.Lock()
|
||||
defer d.setLock.Unlock()
|
||||
|
|
|
|||
9
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
Normal file
9
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
module github.com/Microsoft/go-winio
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/sirupsen/logrus v1.4.1
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3
|
||||
)
|
||||
18
vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
Normal file
18
vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
305
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
Normal file
305
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
Normal file
|
|
@ -0,0 +1,305 @@
|
|||
package winio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
)
|
||||
|
||||
//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
|
||||
|
||||
const (
|
||||
afHvSock = 34 // AF_HYPERV
|
||||
|
||||
socketError = ^uintptr(0)
|
||||
)
|
||||
|
||||
// An HvsockAddr is an address for a AF_HYPERV socket.
|
||||
type HvsockAddr struct {
|
||||
VMID guid.GUID
|
||||
ServiceID guid.GUID
|
||||
}
|
||||
|
||||
type rawHvsockAddr struct {
|
||||
Family uint16
|
||||
_ uint16
|
||||
VMID guid.GUID
|
||||
ServiceID guid.GUID
|
||||
}
|
||||
|
||||
// Network returns the address's network name, "hvsock".
|
||||
func (addr *HvsockAddr) Network() string {
|
||||
return "hvsock"
|
||||
}
|
||||
|
||||
func (addr *HvsockAddr) String() string {
|
||||
return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID)
|
||||
}
|
||||
|
||||
// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
|
||||
func VsockServiceID(port uint32) guid.GUID {
|
||||
g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3")
|
||||
g.Data1 = port
|
||||
return g
|
||||
}
|
||||
|
||||
func (addr *HvsockAddr) raw() rawHvsockAddr {
|
||||
return rawHvsockAddr{
|
||||
Family: afHvSock,
|
||||
VMID: addr.VMID,
|
||||
ServiceID: addr.ServiceID,
|
||||
}
|
||||
}
|
||||
|
||||
func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) {
|
||||
addr.VMID = raw.VMID
|
||||
addr.ServiceID = raw.ServiceID
|
||||
}
|
||||
|
||||
// HvsockListener is a socket listener for the AF_HYPERV address family.
|
||||
type HvsockListener struct {
|
||||
sock *win32File
|
||||
addr HvsockAddr
|
||||
}
|
||||
|
||||
// HvsockConn is a connected socket of the AF_HYPERV address family.
|
||||
type HvsockConn struct {
|
||||
sock *win32File
|
||||
local, remote HvsockAddr
|
||||
}
|
||||
|
||||
func newHvSocket() (*win32File, error) {
|
||||
fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("socket", err)
|
||||
}
|
||||
f, err := makeWin32File(fd)
|
||||
if err != nil {
|
||||
syscall.Close(fd)
|
||||
return nil, err
|
||||
}
|
||||
f.socket = true
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// ListenHvsock listens for connections on the specified hvsock address.
|
||||
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
|
||||
l := &HvsockListener{addr: *addr}
|
||||
sock, err := newHvSocket()
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", err)
|
||||
}
|
||||
sa := addr.raw()
|
||||
err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa)))
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
|
||||
}
|
||||
err = syscall.Listen(sock.handle, 16)
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", os.NewSyscallError("listen", err))
|
||||
}
|
||||
return &HvsockListener{sock: sock, addr: *addr}, nil
|
||||
}
|
||||
|
||||
func (l *HvsockListener) opErr(op string, err error) error {
|
||||
return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err}
|
||||
}
|
||||
|
||||
// Addr returns the listener's network address.
|
||||
func (l *HvsockListener) Addr() net.Addr {
|
||||
return &l.addr
|
||||
}
|
||||
|
||||
// Accept waits for the next connection and returns it.
|
||||
func (l *HvsockListener) Accept() (_ net.Conn, err error) {
|
||||
sock, err := newHvSocket()
|
||||
if err != nil {
|
||||
return nil, l.opErr("accept", err)
|
||||
}
|
||||
defer func() {
|
||||
if sock != nil {
|
||||
sock.Close()
|
||||
}
|
||||
}()
|
||||
c, err := l.sock.prepareIo()
|
||||
if err != nil {
|
||||
return nil, l.opErr("accept", err)
|
||||
}
|
||||
defer l.sock.wg.Done()
|
||||
|
||||
// AcceptEx, per documentation, requires an extra 16 bytes per address.
|
||||
const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{}))
|
||||
var addrbuf [addrlen * 2]byte
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o)
|
||||
_, err = l.sock.asyncIo(c, nil, bytes, err)
|
||||
if err != nil {
|
||||
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
|
||||
}
|
||||
conn := &HvsockConn{
|
||||
sock: sock,
|
||||
}
|
||||
conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0])))
|
||||
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
|
||||
sock = nil
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// Close closes the listener, causing any pending Accept calls to fail.
|
||||
func (l *HvsockListener) Close() error {
|
||||
return l.sock.Close()
|
||||
}
|
||||
|
||||
/* Need to finish ConnectEx handling
|
||||
func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) {
|
||||
sock, err := newHvSocket()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if sock != nil {
|
||||
sock.Close()
|
||||
}
|
||||
}()
|
||||
c, err := sock.prepareIo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer sock.wg.Done()
|
||||
var bytes uint32
|
||||
err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o)
|
||||
_, err = sock.asyncIo(ctx, c, nil, bytes, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn := &HvsockConn{
|
||||
sock: sock,
|
||||
remote: *addr,
|
||||
}
|
||||
sock = nil
|
||||
return conn, nil
|
||||
}
|
||||
*/
|
||||
|
||||
func (conn *HvsockConn) opErr(op string, err error) error {
|
||||
return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err}
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) Read(b []byte) (int, error) {
|
||||
c, err := conn.sock.prepareIo()
|
||||
if err != nil {
|
||||
return 0, conn.opErr("read", err)
|
||||
}
|
||||
defer conn.sock.wg.Done()
|
||||
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||
var flags, bytes uint32
|
||||
err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
|
||||
n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err)
|
||||
if err != nil {
|
||||
if _, ok := err.(syscall.Errno); ok {
|
||||
err = os.NewSyscallError("wsarecv", err)
|
||||
}
|
||||
return 0, conn.opErr("read", err)
|
||||
} else if n == 0 {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) Write(b []byte) (int, error) {
|
||||
t := 0
|
||||
for len(b) != 0 {
|
||||
n, err := conn.write(b)
|
||||
if err != nil {
|
||||
return t + n, err
|
||||
}
|
||||
t += n
|
||||
b = b[n:]
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) write(b []byte) (int, error) {
|
||||
c, err := conn.sock.prepareIo()
|
||||
if err != nil {
|
||||
return 0, conn.opErr("write", err)
|
||||
}
|
||||
defer conn.sock.wg.Done()
|
||||
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||
var bytes uint32
|
||||
err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
|
||||
n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err)
|
||||
if err != nil {
|
||||
if _, ok := err.(syscall.Errno); ok {
|
||||
err = os.NewSyscallError("wsasend", err)
|
||||
}
|
||||
return 0, conn.opErr("write", err)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close closes the socket connection, failing any pending read or write calls.
|
||||
func (conn *HvsockConn) Close() error {
|
||||
return conn.sock.Close()
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) shutdown(how int) error {
|
||||
err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD)
|
||||
if err != nil {
|
||||
return os.NewSyscallError("shutdown", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseRead shuts down the read end of the socket.
|
||||
func (conn *HvsockConn) CloseRead() error {
|
||||
err := conn.shutdown(syscall.SHUT_RD)
|
||||
if err != nil {
|
||||
return conn.opErr("close", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
|
||||
// no more data will be written.
|
||||
func (conn *HvsockConn) CloseWrite() error {
|
||||
err := conn.shutdown(syscall.SHUT_WR)
|
||||
if err != nil {
|
||||
return conn.opErr("close", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LocalAddr returns the local address of the connection.
|
||||
func (conn *HvsockConn) LocalAddr() net.Addr {
|
||||
return &conn.local
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote address of the connection.
|
||||
func (conn *HvsockConn) RemoteAddr() net.Addr {
|
||||
return &conn.remote
|
||||
}
|
||||
|
||||
// SetDeadline implements the net.Conn SetDeadline method.
|
||||
func (conn *HvsockConn) SetDeadline(t time.Time) error {
|
||||
conn.SetReadDeadline(t)
|
||||
conn.SetWriteDeadline(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetReadDeadline implements the net.Conn SetReadDeadline method.
|
||||
func (conn *HvsockConn) SetReadDeadline(t time.Time) error {
|
||||
return conn.sock.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
|
||||
func (conn *HvsockConn) SetWriteDeadline(t time.Time) error {
|
||||
return conn.sock.SetWriteDeadline(t)
|
||||
}
|
||||
247
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
247
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
|
|
@ -3,10 +3,13 @@
|
|||
package winio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
|
@ -18,6 +21,48 @@ import (
|
|||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile
|
||||
//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
|
||||
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U
|
||||
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl
|
||||
|
||||
type ioStatusBlock struct {
|
||||
Status, Information uintptr
|
||||
}
|
||||
|
||||
type objectAttributes struct {
|
||||
Length uintptr
|
||||
RootDirectory uintptr
|
||||
ObjectName *unicodeString
|
||||
Attributes uintptr
|
||||
SecurityDescriptor *securityDescriptor
|
||||
SecurityQoS uintptr
|
||||
}
|
||||
|
||||
type unicodeString struct {
|
||||
Length uint16
|
||||
MaximumLength uint16
|
||||
Buffer uintptr
|
||||
}
|
||||
|
||||
type securityDescriptor struct {
|
||||
Revision byte
|
||||
Sbz1 byte
|
||||
Control uint16
|
||||
Owner uintptr
|
||||
Group uintptr
|
||||
Sacl uintptr
|
||||
Dacl uintptr
|
||||
}
|
||||
|
||||
type ntstatus int32
|
||||
|
||||
func (status ntstatus) Err() error {
|
||||
if status >= 0 {
|
||||
return nil
|
||||
}
|
||||
return rtlNtStatusToDosError(status)
|
||||
}
|
||||
|
||||
const (
|
||||
cERROR_PIPE_BUSY = syscall.Errno(231)
|
||||
|
|
@ -25,21 +70,20 @@ const (
|
|||
cERROR_PIPE_CONNECTED = syscall.Errno(535)
|
||||
cERROR_SEM_TIMEOUT = syscall.Errno(121)
|
||||
|
||||
cPIPE_ACCESS_DUPLEX = 0x3
|
||||
cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000
|
||||
cSECURITY_SQOS_PRESENT = 0x100000
|
||||
cSECURITY_ANONYMOUS = 0
|
||||
|
||||
cPIPE_REJECT_REMOTE_CLIENTS = 0x8
|
||||
|
||||
cPIPE_UNLIMITED_INSTANCES = 255
|
||||
|
||||
cNMPWAIT_USE_DEFAULT_WAIT = 0
|
||||
cNMPWAIT_NOWAIT = 1
|
||||
cSECURITY_SQOS_PRESENT = 0x100000
|
||||
cSECURITY_ANONYMOUS = 0
|
||||
|
||||
cPIPE_TYPE_MESSAGE = 4
|
||||
|
||||
cPIPE_READMODE_MESSAGE = 2
|
||||
|
||||
cFILE_OPEN = 1
|
||||
cFILE_CREATE = 2
|
||||
|
||||
cFILE_PIPE_MESSAGE_TYPE = 1
|
||||
cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2
|
||||
|
||||
cSE_DACL_PRESENT = 4
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -137,9 +181,30 @@ func (s pipeAddress) String() string {
|
|||
return string(s)
|
||||
}
|
||||
|
||||
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||
func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return syscall.Handle(0), ctx.Err()
|
||||
default:
|
||||
h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
if err == nil {
|
||||
return h, nil
|
||||
}
|
||||
if err != cERROR_PIPE_BUSY {
|
||||
return h, &os.PathError{Err: err, Op: "open", Path: *path}
|
||||
}
|
||||
// Wait 10 msec and try again. This is a rather simplistic
|
||||
// view, as we always try each 10 milliseconds.
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DialPipe connects to a named pipe by path, timing out if the connection
|
||||
// takes longer than the specified duration. If timeout is nil, then we use
|
||||
// a default timeout of 5 seconds. (We do not use WaitNamedPipe.)
|
||||
// a default timeout of 2 seconds. (We do not use WaitNamedPipe.)
|
||||
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||
var absTimeout time.Time
|
||||
if timeout != nil {
|
||||
|
|
@ -147,23 +212,22 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
|||
} else {
|
||||
absTimeout = time.Now().Add(time.Second * 2)
|
||||
}
|
||||
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
|
||||
conn, err := DialPipeContext(ctx, path)
|
||||
if err == context.DeadlineExceeded {
|
||||
return nil, ErrTimeout
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
|
||||
// cancellation or timeout.
|
||||
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
||||
var err error
|
||||
var h syscall.Handle
|
||||
for {
|
||||
h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
if err != cERROR_PIPE_BUSY {
|
||||
break
|
||||
}
|
||||
if time.Now().After(absTimeout) {
|
||||
return nil, ErrTimeout
|
||||
}
|
||||
|
||||
// Wait 10 msec and try again. This is a rather simplistic
|
||||
// view, as we always try each 10 milliseconds.
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
h, err = tryDialPipe(ctx, &path)
|
||||
if err != nil {
|
||||
return nil, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var flags uint32
|
||||
|
|
@ -194,43 +258,87 @@ type acceptResponse struct {
|
|||
}
|
||||
|
||||
type win32PipeListener struct {
|
||||
firstHandle syscall.Handle
|
||||
path string
|
||||
securityDescriptor []byte
|
||||
config PipeConfig
|
||||
acceptCh chan (chan acceptResponse)
|
||||
closeCh chan int
|
||||
doneCh chan int
|
||||
firstHandle syscall.Handle
|
||||
path string
|
||||
config PipeConfig
|
||||
acceptCh chan (chan acceptResponse)
|
||||
closeCh chan int
|
||||
doneCh chan int
|
||||
}
|
||||
|
||||
func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
|
||||
var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED
|
||||
if first {
|
||||
flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE
|
||||
}
|
||||
|
||||
var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS
|
||||
if c.MessageMode {
|
||||
mode |= cPIPE_TYPE_MESSAGE
|
||||
}
|
||||
|
||||
sa := &syscall.SecurityAttributes{}
|
||||
sa.Length = uint32(unsafe.Sizeof(*sa))
|
||||
if securityDescriptor != nil {
|
||||
len := uint32(len(securityDescriptor))
|
||||
sa.SecurityDescriptor = localAlloc(0, len)
|
||||
defer localFree(sa.SecurityDescriptor)
|
||||
copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor)
|
||||
}
|
||||
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
|
||||
func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
|
||||
path16, err := syscall.UTF16FromString(path)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
|
||||
var oa objectAttributes
|
||||
oa.Length = unsafe.Sizeof(oa)
|
||||
|
||||
var ntPath unicodeString
|
||||
if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
defer localFree(ntPath.Buffer)
|
||||
oa.ObjectName = &ntPath
|
||||
|
||||
// The security descriptor is only needed for the first pipe.
|
||||
if first {
|
||||
if sd != nil {
|
||||
len := uint32(len(sd))
|
||||
sdb := localAlloc(0, len)
|
||||
defer localFree(sdb)
|
||||
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
|
||||
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
|
||||
} else {
|
||||
// Construct the default named pipe security descriptor.
|
||||
var dacl uintptr
|
||||
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
|
||||
return 0, fmt.Errorf("getting default named pipe ACL: %s", err)
|
||||
}
|
||||
defer localFree(dacl)
|
||||
|
||||
sdb := &securityDescriptor{
|
||||
Revision: 1,
|
||||
Control: cSE_DACL_PRESENT,
|
||||
Dacl: dacl,
|
||||
}
|
||||
oa.SecurityDescriptor = sdb
|
||||
}
|
||||
}
|
||||
|
||||
typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS)
|
||||
if c.MessageMode {
|
||||
typ |= cFILE_PIPE_MESSAGE_TYPE
|
||||
}
|
||||
|
||||
disposition := uint32(cFILE_OPEN)
|
||||
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE)
|
||||
if first {
|
||||
disposition = cFILE_CREATE
|
||||
// By not asking for read or write access, the named pipe file system
|
||||
// will put this pipe into an initially disconnected state, blocking
|
||||
// client connections until the next call with first == false.
|
||||
access = syscall.SYNCHRONIZE
|
||||
}
|
||||
|
||||
timeout := int64(-50 * 10000) // 50ms
|
||||
|
||||
var (
|
||||
h syscall.Handle
|
||||
iosb ioStatusBlock
|
||||
)
|
||||
err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err()
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(ntPath)
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
|
||||
h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false)
|
||||
h, err := makeServerPipeHandle(l.path, nil, &l.config, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -341,32 +449,13 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create a client handle and connect it. This results in the pipe
|
||||
// instance always existing, so that clients see ERROR_PIPE_BUSY
|
||||
// rather than ERROR_FILE_NOT_FOUND. This ties the first instance
|
||||
// up so that no other instances can be used. This would have been
|
||||
// cleaner if the Win32 API matched CreateFile with ConnectNamedPipe
|
||||
// instead of CreateNamedPipe. (Apparently created named pipes are
|
||||
// considered to be in listening state regardless of whether any
|
||||
// active calls to ConnectNamedPipe are outstanding.)
|
||||
h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
return nil, err
|
||||
}
|
||||
// Close the client handle. The server side of the instance will
|
||||
// still be busy, leading to ERROR_PIPE_BUSY instead of
|
||||
// ERROR_NOT_FOUND, as long as we don't close the server handle,
|
||||
// or disconnect the client with DisconnectNamedPipe.
|
||||
syscall.Close(h2)
|
||||
l := &win32PipeListener{
|
||||
firstHandle: h,
|
||||
path: path,
|
||||
securityDescriptor: sd,
|
||||
config: *c,
|
||||
acceptCh: make(chan (chan acceptResponse)),
|
||||
closeCh: make(chan int),
|
||||
doneCh: make(chan int),
|
||||
firstHandle: h,
|
||||
path: path,
|
||||
config: *c,
|
||||
acceptCh: make(chan (chan acceptResponse)),
|
||||
closeCh: make(chan int),
|
||||
doneCh: make(chan int),
|
||||
}
|
||||
go l.listenerRoutine()
|
||||
return l, nil
|
||||
|
|
|
|||
235
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
Normal file
235
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
Normal file
|
|
@ -0,0 +1,235 @@
|
|||
// Package guid provides a GUID type. The backing structure for a GUID is
|
||||
// identical to that used by the golang.org/x/sys/windows GUID type.
|
||||
// There are two main binary encodings used for a GUID, the big-endian encoding,
|
||||
// and the Windows (mixed-endian) encoding. See here for details:
|
||||
// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding
|
||||
package guid
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Variant specifies which GUID variant (or "type") of the GUID. It determines
|
||||
// how the entirety of the rest of the GUID is interpreted.
|
||||
type Variant uint8
|
||||
|
||||
// The variants specified by RFC 4122.
|
||||
const (
|
||||
// VariantUnknown specifies a GUID variant which does not conform to one of
|
||||
// the variant encodings specified in RFC 4122.
|
||||
VariantUnknown Variant = iota
|
||||
VariantNCS
|
||||
VariantRFC4122
|
||||
VariantMicrosoft
|
||||
VariantFuture
|
||||
)
|
||||
|
||||
// Version specifies how the bits in the GUID were generated. For instance, a
|
||||
// version 4 GUID is randomly generated, and a version 5 is generated from the
|
||||
// hash of an input string.
|
||||
type Version uint8
|
||||
|
||||
var _ = (encoding.TextMarshaler)(GUID{})
|
||||
var _ = (encoding.TextUnmarshaler)(&GUID{})
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type so that stringification and
|
||||
// marshaling can be supported. The representation matches that used by native
|
||||
// Windows code.
|
||||
type GUID windows.GUID
|
||||
|
||||
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
|
||||
func NewV4() (GUID, error) {
|
||||
var b [16]byte
|
||||
if _, err := rand.Read(b[:]); err != nil {
|
||||
return GUID{}, err
|
||||
}
|
||||
|
||||
g := FromArray(b)
|
||||
g.setVersion(4) // Version 4 means randomly generated.
|
||||
g.setVariant(VariantRFC4122)
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing)
|
||||
// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name,
|
||||
// and the sample code treats it as a series of bytes, so we do the same here.
|
||||
//
|
||||
// Some implementations, such as those found on Windows, treat the name as a
|
||||
// big-endian UTF16 stream of bytes. If that is desired, the string can be
|
||||
// encoded as such before being passed to this function.
|
||||
func NewV5(namespace GUID, name []byte) (GUID, error) {
|
||||
b := sha1.New()
|
||||
namespaceBytes := namespace.ToArray()
|
||||
b.Write(namespaceBytes[:])
|
||||
b.Write(name)
|
||||
|
||||
a := [16]byte{}
|
||||
copy(a[:], b.Sum(nil))
|
||||
|
||||
g := FromArray(a)
|
||||
g.setVersion(5) // Version 5 means generated from a string.
|
||||
g.setVariant(VariantRFC4122)
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
func fromArray(b [16]byte, order binary.ByteOrder) GUID {
|
||||
var g GUID
|
||||
g.Data1 = order.Uint32(b[0:4])
|
||||
g.Data2 = order.Uint16(b[4:6])
|
||||
g.Data3 = order.Uint16(b[6:8])
|
||||
copy(g.Data4[:], b[8:16])
|
||||
return g
|
||||
}
|
||||
|
||||
func (g GUID) toArray(order binary.ByteOrder) [16]byte {
|
||||
b := [16]byte{}
|
||||
order.PutUint32(b[0:4], g.Data1)
|
||||
order.PutUint16(b[4:6], g.Data2)
|
||||
order.PutUint16(b[6:8], g.Data3)
|
||||
copy(b[8:16], g.Data4[:])
|
||||
return b
|
||||
}
|
||||
|
||||
// FromArray constructs a GUID from a big-endian encoding array of 16 bytes.
|
||||
func FromArray(b [16]byte) GUID {
|
||||
return fromArray(b, binary.BigEndian)
|
||||
}
|
||||
|
||||
// ToArray returns an array of 16 bytes representing the GUID in big-endian
|
||||
// encoding.
|
||||
func (g GUID) ToArray() [16]byte {
|
||||
return g.toArray(binary.BigEndian)
|
||||
}
|
||||
|
||||
// FromWindowsArray constructs a GUID from a Windows encoding array of bytes.
|
||||
func FromWindowsArray(b [16]byte) GUID {
|
||||
return fromArray(b, binary.LittleEndian)
|
||||
}
|
||||
|
||||
// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows
|
||||
// encoding.
|
||||
func (g GUID) ToWindowsArray() [16]byte {
|
||||
return g.toArray(binary.LittleEndian)
|
||||
}
|
||||
|
||||
func (g GUID) String() string {
|
||||
return fmt.Sprintf(
|
||||
"%08x-%04x-%04x-%04x-%012x",
|
||||
g.Data1,
|
||||
g.Data2,
|
||||
g.Data3,
|
||||
g.Data4[:2],
|
||||
g.Data4[2:])
|
||||
}
|
||||
|
||||
// FromString parses a string containing a GUID and returns the GUID. The only
|
||||
// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
|
||||
// format.
|
||||
func FromString(s string) (GUID, error) {
|
||||
if len(s) != 36 {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
|
||||
var g GUID
|
||||
|
||||
data1, err := strconv.ParseUint(s[0:8], 16, 32)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data1 = uint32(data1)
|
||||
|
||||
data2, err := strconv.ParseUint(s[9:13], 16, 16)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data2 = uint16(data2)
|
||||
|
||||
data3, err := strconv.ParseUint(s[14:18], 16, 16)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data3 = uint16(data3)
|
||||
|
||||
for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} {
|
||||
v, err := strconv.ParseUint(s[x:x+2], 16, 8)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data4[i] = uint8(v)
|
||||
}
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
func (g *GUID) setVariant(v Variant) {
|
||||
d := g.Data4[0]
|
||||
switch v {
|
||||
case VariantNCS:
|
||||
d = (d & 0x7f)
|
||||
case VariantRFC4122:
|
||||
d = (d & 0x3f) | 0x80
|
||||
case VariantMicrosoft:
|
||||
d = (d & 0x1f) | 0xc0
|
||||
case VariantFuture:
|
||||
d = (d & 0x0f) | 0xe0
|
||||
case VariantUnknown:
|
||||
fallthrough
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid variant: %d", v))
|
||||
}
|
||||
g.Data4[0] = d
|
||||
}
|
||||
|
||||
// Variant returns the GUID variant, as defined in RFC 4122.
|
||||
func (g GUID) Variant() Variant {
|
||||
b := g.Data4[0]
|
||||
if b&0x80 == 0 {
|
||||
return VariantNCS
|
||||
} else if b&0xc0 == 0x80 {
|
||||
return VariantRFC4122
|
||||
} else if b&0xe0 == 0xc0 {
|
||||
return VariantMicrosoft
|
||||
} else if b&0xe0 == 0xe0 {
|
||||
return VariantFuture
|
||||
}
|
||||
return VariantUnknown
|
||||
}
|
||||
|
||||
func (g *GUID) setVersion(v Version) {
|
||||
g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12)
|
||||
}
|
||||
|
||||
// Version returns the GUID version, as defined in RFC 4122.
|
||||
func (g GUID) Version() Version {
|
||||
return Version((g.Data3 & 0xF000) >> 12)
|
||||
}
|
||||
|
||||
// MarshalText returns the textual representation of the GUID.
|
||||
func (g GUID) MarshalText() ([]byte, error) {
|
||||
return []byte(g.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText takes the textual representation of a GUID, and unmarhals it
|
||||
// into this GUID.
|
||||
func (g *GUID) UnmarshalText(text []byte) error {
|
||||
g2, err := FromString(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*g = g2
|
||||
return nil
|
||||
}
|
||||
2
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
2
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
|
|
@ -1,3 +1,3 @@
|
|||
package winio
|
||||
|
||||
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go
|
||||
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
|
||||
|
|
|
|||
88
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
88
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
|
|
@ -1,4 +1,4 @@
|
|||
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
||||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
|
||||
package winio
|
||||
|
||||
|
|
@ -38,19 +38,25 @@ func errnoErr(e syscall.Errno) error {
|
|||
|
||||
var (
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
|
||||
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
|
||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
||||
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
|
||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||
procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW")
|
||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
|
||||
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
|
||||
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
|
||||
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
|
||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
||||
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
||||
|
|
@ -69,6 +75,7 @@ var (
|
|||
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
|
||||
procBackupRead = modkernel32.NewProc("BackupRead")
|
||||
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
||||
procbind = modws2_32.NewProc("bind")
|
||||
)
|
||||
|
||||
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
|
|
@ -120,6 +127,24 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro
|
|||
return
|
||||
}
|
||||
|
||||
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
|
||||
var _p0 uint32
|
||||
if wait {
|
||||
_p0 = 1
|
||||
} else {
|
||||
_p0 = 0
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
|
||||
if r1 == 0 {
|
||||
|
|
@ -176,27 +201,6 @@ func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityA
|
|||
return
|
||||
}
|
||||
|
||||
func waitNamedPipe(name string, timeout uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _waitNamedPipe(_p0, timeout)
|
||||
}
|
||||
|
||||
func _waitNamedPipe(name *uint16, timeout uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
|
||||
if r1 == 0 {
|
||||
|
|
@ -227,6 +231,32 @@ func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
|
|||
return
|
||||
}
|
||||
|
||||
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
|
||||
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
|
||||
if r0 != 0 {
|
||||
winerr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(accountName)
|
||||
|
|
@ -518,3 +548,15 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p
|
|||
}
|
||||
return
|
||||
}
|
||||
|
||||
func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
|
||||
if r1 == socketError {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
|||
21
vendor/github.com/Microsoft/hcsshim/LICENSE
generated
vendored
Normal file
21
vendor/github.com/Microsoft/hcsshim/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Microsoft
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
57
vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go
generated
vendored
Normal file
57
vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
package osversion
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// OSVersion is a wrapper for Windows version information
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
|
||||
type OSVersion struct {
|
||||
Version uint32
|
||||
MajorVersion uint8
|
||||
MinorVersion uint8
|
||||
Build uint16
|
||||
}
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
|
||||
type osVersionInfoEx struct {
|
||||
OSVersionInfoSize uint32
|
||||
MajorVersion uint32
|
||||
MinorVersion uint32
|
||||
BuildNumber uint32
|
||||
PlatformID uint32
|
||||
CSDVersion [128]uint16
|
||||
ServicePackMajor uint16
|
||||
ServicePackMinor uint16
|
||||
SuiteMask uint16
|
||||
ProductType byte
|
||||
Reserve byte
|
||||
}
|
||||
|
||||
// Get gets the operating system version on Windows.
|
||||
// The calling application must be manifested to get the correct version information.
|
||||
func Get() OSVersion {
|
||||
var err error
|
||||
osv := OSVersion{}
|
||||
osv.Version, err = windows.GetVersion()
|
||||
if err != nil {
|
||||
// GetVersion never fails.
|
||||
panic(err)
|
||||
}
|
||||
osv.MajorVersion = uint8(osv.Version & 0xFF)
|
||||
osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
|
||||
osv.Build = uint16(osv.Version >> 16)
|
||||
return osv
|
||||
}
|
||||
|
||||
// Build gets the build-number on Windows
|
||||
// The calling application must be manifested to get the correct version information.
|
||||
func Build() uint16 {
|
||||
return Get().Build
|
||||
}
|
||||
|
||||
func (osv OSVersion) ToString() string {
|
||||
return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build)
|
||||
}
|
||||
27
vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
generated
vendored
Normal file
27
vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
generated
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
package osversion
|
||||
|
||||
const (
|
||||
// RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server
|
||||
// 2016 (ltsc2016) and Windows 10 (Anniversary Update).
|
||||
RS1 = 14393
|
||||
|
||||
// RS2 (version 1703, codename "Redstone 2") was a client-only update, and
|
||||
// corresponds to Windows 10 (Creators Update).
|
||||
RS2 = 15063
|
||||
|
||||
// RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server
|
||||
// 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update).
|
||||
RS3 = 16299
|
||||
|
||||
// RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server
|
||||
// 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update).
|
||||
RS4 = 17134
|
||||
|
||||
// RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server
|
||||
// 2019 (ltsc2019), and Windows 10 (October 2018 Update).
|
||||
RS5 = 17763
|
||||
|
||||
// V19H1 (version 1903) corresponds to Windows Server 1903 (semi-annual
|
||||
// channel).
|
||||
V19H1 = 18362
|
||||
)
|
||||
36
vendor/github.com/containerd/continuity/AUTHORS
generated
vendored
Normal file
36
vendor/github.com/containerd/continuity/AUTHORS
generated
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||
Akash Gupta <akagup@microsoft.com>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||
Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
|
||||
Akihiro Suda <suda.kyoto@gmail.com>
|
||||
Andrew Pennebaker <apennebaker@datapipe.com>
|
||||
Brandon Philips <brandon.philips@coreos.com>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Christopher Jones <tophj@linux.vnet.ibm.com>
|
||||
Daniel, Dao Quang Minh <dqminh89@gmail.com>
|
||||
Darren Stahl <darst@microsoft.com>
|
||||
Derek McGowan <derek@mcg.dev>
|
||||
Derek McGowan <derek@mcgstyle.net>
|
||||
Edward Pilatowicz <edward.pilatowicz@oracle.com>
|
||||
Ian Campbell <ijc@docker.com>
|
||||
Ivan Markin <sw@nogoegst.net>
|
||||
Justin Cormack <justin.cormack@docker.com>
|
||||
Justin Cummins <sul3n3t@gmail.com>
|
||||
Kasper Fabæch Brandt <poizan@poizan.dk>
|
||||
Kir Kolyshkin <kolyshkin@gmail.com>
|
||||
Michael Crosby <crosbymichael@gmail.com>
|
||||
Michael Wan <zirenwan@gmail.com>
|
||||
Niels de Vos <ndevos@redhat.com>
|
||||
Phil Estes <estesp@gmail.com>
|
||||
Phil Estes <estesp@linux.vnet.ibm.com>
|
||||
Sam Whited <sam@samwhited.com>
|
||||
Shengjing Zhu <zhsj@debian.org>
|
||||
Stephen J Day <stephen.day@docker.com>
|
||||
Tibor Vass <tibor@docker.com>
|
||||
Tobias Klauser <tklauser@distanz.ch>
|
||||
Tom Faulhaber <tffaulha@amazon.com>
|
||||
Tonis Tiigi <tonistiigi@gmail.com>
|
||||
Trevor Porter <trkporter@ucdavis.edu>
|
||||
Wei Fu <fuweid89@gmail.com>
|
||||
Wilbert van de Ridder <wilbert.ridder@gmail.com>
|
||||
Xiaodong Ye <xiaodongy@vmware.com>
|
||||
191
vendor/github.com/containerd/continuity/LICENSE
generated
vendored
Normal file
191
vendor/github.com/containerd/continuity/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,191 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright The containerd Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
176
vendor/github.com/containerd/continuity/fs/copy.go
generated
vendored
Normal file
176
vendor/github.com/containerd/continuity/fs/copy.go
generated
vendored
Normal file
|
|
@ -0,0 +1,176 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
buffer := make([]byte, 32*1024)
|
||||
return &buffer
|
||||
},
|
||||
}
|
||||
|
||||
// XAttrErrorHandlers transform a non-nil xattr error.
|
||||
// Return nil to ignore an error.
|
||||
// xattrKey can be empty for listxattr operation.
|
||||
type XAttrErrorHandler func(dst, src, xattrKey string, err error) error
|
||||
|
||||
type copyDirOpts struct {
|
||||
xeh XAttrErrorHandler
|
||||
}
|
||||
|
||||
type CopyDirOpt func(*copyDirOpts) error
|
||||
|
||||
// WithXAttrErrorHandler allows specifying XAttrErrorHandler
|
||||
// If nil XAttrErrorHandler is specified (default), CopyDir stops
|
||||
// on a non-nil xattr error.
|
||||
func WithXAttrErrorHandler(xeh XAttrErrorHandler) CopyDirOpt {
|
||||
return func(o *copyDirOpts) error {
|
||||
o.xeh = xeh
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAllowXAttrErrors allows ignoring xattr errors.
|
||||
func WithAllowXAttrErrors() CopyDirOpt {
|
||||
xeh := func(dst, src, xattrKey string, err error) error {
|
||||
return nil
|
||||
}
|
||||
return WithXAttrErrorHandler(xeh)
|
||||
}
|
||||
|
||||
// CopyDir copies the directory from src to dst.
|
||||
// Most efficient copy of files is attempted.
|
||||
func CopyDir(dst, src string, opts ...CopyDirOpt) error {
|
||||
var o copyDirOpts
|
||||
for _, opt := range opts {
|
||||
if err := opt(&o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
inodes := map[uint64]string{}
|
||||
return copyDirectory(dst, src, inodes, &o)
|
||||
}
|
||||
|
||||
func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) error {
|
||||
stat, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to stat %s", src)
|
||||
}
|
||||
if !stat.IsDir() {
|
||||
return errors.Errorf("source %s is not directory", src)
|
||||
}
|
||||
|
||||
if st, err := os.Stat(dst); err != nil {
|
||||
if err := os.Mkdir(dst, stat.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to mkdir %s", dst)
|
||||
}
|
||||
} else if !st.IsDir() {
|
||||
return errors.Errorf("cannot copy to non-directory: %s", dst)
|
||||
} else {
|
||||
if err := os.Chmod(dst, stat.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod on %s", dst)
|
||||
}
|
||||
}
|
||||
|
||||
fis, err := ioutil.ReadDir(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to read %s", src)
|
||||
}
|
||||
|
||||
if err := copyFileInfo(stat, dst); err != nil {
|
||||
return errors.Wrapf(err, "failed to copy file info for %s", dst)
|
||||
}
|
||||
|
||||
if err := copyXAttrs(dst, src, o.xeh); err != nil {
|
||||
return errors.Wrap(err, "failed to copy xattrs")
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
source := filepath.Join(src, fi.Name())
|
||||
target := filepath.Join(dst, fi.Name())
|
||||
|
||||
switch {
|
||||
case fi.IsDir():
|
||||
if err := copyDirectory(target, source, inodes, o); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
case (fi.Mode() & os.ModeType) == 0:
|
||||
link, err := getLinkSource(target, fi, inodes)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get hardlink")
|
||||
}
|
||||
if link != "" {
|
||||
if err := os.Link(link, target); err != nil {
|
||||
return errors.Wrap(err, "failed to create hard link")
|
||||
}
|
||||
} else if err := CopyFile(target, source); err != nil {
|
||||
return errors.Wrap(err, "failed to copy files")
|
||||
}
|
||||
case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink:
|
||||
link, err := os.Readlink(source)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to read link: %s", source)
|
||||
}
|
||||
if err := os.Symlink(link, target); err != nil {
|
||||
return errors.Wrapf(err, "failed to create symlink: %s", target)
|
||||
}
|
||||
case (fi.Mode() & os.ModeDevice) == os.ModeDevice:
|
||||
if err := copyDevice(target, fi); err != nil {
|
||||
return errors.Wrapf(err, "failed to create device")
|
||||
}
|
||||
default:
|
||||
// TODO: Support pipes and sockets
|
||||
return errors.Wrapf(err, "unsupported mode %s", fi.Mode())
|
||||
}
|
||||
if err := copyFileInfo(fi, target); err != nil {
|
||||
return errors.Wrap(err, "failed to copy file info")
|
||||
}
|
||||
|
||||
if err := copyXAttrs(target, source, o.xeh); err != nil {
|
||||
return errors.Wrap(err, "failed to copy xattrs")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyFile copies the source file to the target.
|
||||
// The most efficient means of copying is used for the platform.
|
||||
func CopyFile(target, source string) error {
|
||||
src, err := os.Open(source)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open source %s", source)
|
||||
}
|
||||
defer src.Close()
|
||||
tgt, err := os.Create(target)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open target %s", target)
|
||||
}
|
||||
defer tgt.Close()
|
||||
|
||||
return copyFileContent(tgt, src)
|
||||
}
|
||||
147
vendor/github.com/containerd/continuity/fs/copy_linux.go
generated
vendored
Normal file
147
vendor/github.com/containerd/continuity/fs/copy_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,147 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func copyFileInfo(fi os.FileInfo, name string) error {
|
||||
st := fi.Sys().(*syscall.Stat_t)
|
||||
if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
|
||||
if os.IsPermission(err) {
|
||||
// Normally if uid/gid are the same this would be a no-op, but some
|
||||
// filesystems may still return EPERM... for instance NFS does this.
|
||||
// In such a case, this is not an error.
|
||||
if dstStat, err2 := os.Lstat(name); err2 == nil {
|
||||
st2 := dstStat.Sys().(*syscall.Stat_t)
|
||||
if st.Uid == st2.Uid && st.Gid == st2.Gid {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to chown %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
|
||||
if err := os.Chmod(name, fi.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
timespec := []unix.Timespec{
|
||||
unix.NsecToTimespec(syscall.TimespecToNsec(StatAtime(st))),
|
||||
unix.NsecToTimespec(syscall.TimespecToNsec(StatMtime(st))),
|
||||
}
|
||||
if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
||||
return errors.Wrapf(err, "failed to utime %s", name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const maxSSizeT = int64(^uint(0) >> 1)
|
||||
|
||||
func copyFileContent(dst, src *os.File) error {
|
||||
st, err := src.Stat()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to stat source")
|
||||
}
|
||||
|
||||
size := st.Size()
|
||||
first := true
|
||||
srcFd := int(src.Fd())
|
||||
dstFd := int(dst.Fd())
|
||||
|
||||
for size > 0 {
|
||||
// Ensure that we are never trying to copy more than SSIZE_MAX at a
|
||||
// time and at the same time avoids overflows when the file is larger
|
||||
// than 4GB on 32-bit systems.
|
||||
var copySize int
|
||||
if size > maxSSizeT {
|
||||
copySize = int(maxSSizeT)
|
||||
} else {
|
||||
copySize = int(size)
|
||||
}
|
||||
n, err := unix.CopyFileRange(srcFd, nil, dstFd, nil, copySize, 0)
|
||||
if err != nil {
|
||||
if (err != unix.ENOSYS && err != unix.EXDEV) || !first {
|
||||
return errors.Wrap(err, "copy file range failed")
|
||||
}
|
||||
|
||||
buf := bufferPool.Get().(*[]byte)
|
||||
_, err = io.CopyBuffer(dst, src, *buf)
|
||||
bufferPool.Put(buf)
|
||||
return errors.Wrap(err, "userspace copy failed")
|
||||
}
|
||||
|
||||
first = false
|
||||
size -= int64(n)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error {
|
||||
xattrKeys, err := sysx.LListxattr(src)
|
||||
if err != nil {
|
||||
e := errors.Wrapf(err, "failed to list xattrs on %s", src)
|
||||
if xeh != nil {
|
||||
e = xeh(dst, src, "", e)
|
||||
}
|
||||
return e
|
||||
}
|
||||
for _, xattr := range xattrKeys {
|
||||
data, err := sysx.LGetxattr(src, xattr)
|
||||
if err != nil {
|
||||
e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
|
||||
if xeh != nil {
|
||||
if e = xeh(dst, src, xattr, e); e == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
|
||||
e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
|
||||
if xeh != nil {
|
||||
if e = xeh(dst, src, xattr, e); e == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyDevice(dst string, fi os.FileInfo) error {
|
||||
st, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return errors.New("unsupported stat type")
|
||||
}
|
||||
return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
|
||||
}
|
||||
112
vendor/github.com/containerd/continuity/fs/copy_unix.go
generated
vendored
Normal file
112
vendor/github.com/containerd/continuity/fs/copy_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
// +build darwin freebsd openbsd solaris
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func copyFileInfo(fi os.FileInfo, name string) error {
|
||||
st := fi.Sys().(*syscall.Stat_t)
|
||||
if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
|
||||
if os.IsPermission(err) {
|
||||
// Normally if uid/gid are the same this would be a no-op, but some
|
||||
// filesystems may still return EPERM... for instance NFS does this.
|
||||
// In such a case, this is not an error.
|
||||
if dstStat, err2 := os.Lstat(name); err2 == nil {
|
||||
st2 := dstStat.Sys().(*syscall.Stat_t)
|
||||
if st.Uid == st2.Uid && st.Gid == st2.Gid {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to chown %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
|
||||
if err := os.Chmod(name, fi.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
timespec := []syscall.Timespec{StatAtime(st), StatMtime(st)}
|
||||
if err := syscall.UtimesNano(name, timespec); err != nil {
|
||||
return errors.Wrapf(err, "failed to utime %s", name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyFileContent(dst, src *os.File) error {
|
||||
buf := bufferPool.Get().(*[]byte)
|
||||
_, err := io.CopyBuffer(dst, src, *buf)
|
||||
bufferPool.Put(buf)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error {
|
||||
xattrKeys, err := sysx.LListxattr(src)
|
||||
if err != nil {
|
||||
e := errors.Wrapf(err, "failed to list xattrs on %s", src)
|
||||
if xeh != nil {
|
||||
e = xeh(dst, src, "", e)
|
||||
}
|
||||
return e
|
||||
}
|
||||
for _, xattr := range xattrKeys {
|
||||
data, err := sysx.LGetxattr(src, xattr)
|
||||
if err != nil {
|
||||
e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
|
||||
if xeh != nil {
|
||||
if e = xeh(dst, src, xattr, e); e == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
|
||||
e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
|
||||
if xeh != nil {
|
||||
if e = xeh(dst, src, xattr, e); e == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyDevice(dst string, fi os.FileInfo) error {
|
||||
st, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return errors.New("unsupported stat type")
|
||||
}
|
||||
return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
|
||||
}
|
||||
49
vendor/github.com/containerd/continuity/fs/copy_windows.go
generated
vendored
Normal file
49
vendor/github.com/containerd/continuity/fs/copy_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func copyFileInfo(fi os.FileInfo, name string) error {
|
||||
if err := os.Chmod(name, fi.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod %s", name)
|
||||
}
|
||||
|
||||
// TODO: copy windows specific metadata
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyFileContent(dst, src *os.File) error {
|
||||
buf := bufferPool.Get().(*[]byte)
|
||||
_, err := io.CopyBuffer(dst, src, *buf)
|
||||
bufferPool.Put(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyDevice(dst string, fi os.FileInfo) error {
|
||||
return errors.New("device copy not supported")
|
||||
}
|
||||
326
vendor/github.com/containerd/continuity/fs/diff.go
generated
vendored
Normal file
326
vendor/github.com/containerd/continuity/fs/diff.go
generated
vendored
Normal file
|
|
@ -0,0 +1,326 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ChangeKind is the type of modification that
|
||||
// a change is making.
|
||||
type ChangeKind int
|
||||
|
||||
const (
|
||||
// ChangeKindUnmodified represents an unmodified
|
||||
// file
|
||||
ChangeKindUnmodified = iota
|
||||
|
||||
// ChangeKindAdd represents an addition of
|
||||
// a file
|
||||
ChangeKindAdd
|
||||
|
||||
// ChangeKindModify represents a change to
|
||||
// an existing file
|
||||
ChangeKindModify
|
||||
|
||||
// ChangeKindDelete represents a delete of
|
||||
// a file
|
||||
ChangeKindDelete
|
||||
)
|
||||
|
||||
func (k ChangeKind) String() string {
|
||||
switch k {
|
||||
case ChangeKindUnmodified:
|
||||
return "unmodified"
|
||||
case ChangeKindAdd:
|
||||
return "add"
|
||||
case ChangeKindModify:
|
||||
return "modify"
|
||||
case ChangeKindDelete:
|
||||
return "delete"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// Change represents single change between a diff and its parent.
|
||||
type Change struct {
|
||||
Kind ChangeKind
|
||||
Path string
|
||||
}
|
||||
|
||||
// ChangeFunc is the type of function called for each change
|
||||
// computed during a directory changes calculation.
|
||||
type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error
|
||||
|
||||
// Changes computes changes between two directories calling the
|
||||
// given change function for each computed change. The first
|
||||
// directory is intended to the base directory and second
|
||||
// directory the changed directory.
|
||||
//
|
||||
// The change callback is called by the order of path names and
|
||||
// should be appliable in that order.
|
||||
// Due to this apply ordering, the following is true
|
||||
// - Removed directory trees only create a single change for the root
|
||||
// directory removed. Remaining changes are implied.
|
||||
// - A directory which is modified to become a file will not have
|
||||
// delete entries for sub-path items, their removal is implied
|
||||
// by the removal of the parent directory.
|
||||
//
|
||||
// Opaque directories will not be treated specially and each file
|
||||
// removed from the base directory will show up as a removal.
|
||||
//
|
||||
// File content comparisons will be done on files which have timestamps
|
||||
// which may have been truncated. If either of the files being compared
|
||||
// has a zero value nanosecond value, each byte will be compared for
|
||||
// differences. If 2 files have the same seconds value but different
|
||||
// nanosecond values where one of those values is zero, the files will
|
||||
// be considered unchanged if the content is the same. This behavior
|
||||
// is to account for timestamp truncation during archiving.
|
||||
func Changes(ctx context.Context, a, b string, changeFn ChangeFunc) error {
|
||||
if a == "" {
|
||||
logrus.Debugf("Using single walk diff for %s", b)
|
||||
return addDirChanges(ctx, changeFn, b)
|
||||
} else if diffOptions := detectDirDiff(b, a); diffOptions != nil {
|
||||
logrus.Debugf("Using single walk diff for %s from %s", diffOptions.diffDir, a)
|
||||
return diffDirChanges(ctx, changeFn, a, diffOptions)
|
||||
}
|
||||
|
||||
logrus.Debugf("Using double walk diff for %s from %s", b, a)
|
||||
return doubleWalkDiff(ctx, changeFn, a, b)
|
||||
}
|
||||
|
||||
func addDirChanges(ctx context.Context, changeFn ChangeFunc, root string) error {
|
||||
return filepath.Walk(root, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
path, err = filepath.Rel(root, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path = filepath.Join(string(os.PathSeparator), path)
|
||||
|
||||
// Skip root
|
||||
if path == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return changeFn(ChangeKindAdd, path, f, nil)
|
||||
})
|
||||
}
|
||||
|
||||
// diffDirOptions is used when the diff can be directly calculated from
|
||||
// a diff directory to its base, without walking both trees.
|
||||
type diffDirOptions struct {
|
||||
diffDir string
|
||||
skipChange func(string) (bool, error)
|
||||
deleteChange func(string, string, os.FileInfo) (string, error)
|
||||
}
|
||||
|
||||
// diffDirChanges walks the diff directory and compares changes against the base.
|
||||
func diffDirChanges(ctx context.Context, changeFn ChangeFunc, base string, o *diffDirOptions) error {
|
||||
changedDirs := make(map[string]struct{})
|
||||
return filepath.Walk(o.diffDir, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
path, err = filepath.Rel(o.diffDir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path = filepath.Join(string(os.PathSeparator), path)
|
||||
|
||||
// Skip root
|
||||
if path == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: handle opaqueness, start new double walker at this
|
||||
// location to get deletes, and skip tree in single walker
|
||||
|
||||
if o.skipChange != nil {
|
||||
if skip, err := o.skipChange(path); skip {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var kind ChangeKind
|
||||
|
||||
deletedFile, err := o.deleteChange(o.diffDir, path, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find out what kind of modification happened
|
||||
if deletedFile != "" {
|
||||
path = deletedFile
|
||||
kind = ChangeKindDelete
|
||||
f = nil
|
||||
} else {
|
||||
// Otherwise, the file was added
|
||||
kind = ChangeKindAdd
|
||||
|
||||
// ...Unless it already existed in a base, in which case, it's a modification
|
||||
stat, err := os.Stat(filepath.Join(base, path))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
// The file existed in the base, so that's a modification
|
||||
|
||||
// However, if it's a directory, maybe it wasn't actually modified.
|
||||
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
||||
if stat.IsDir() && f.IsDir() {
|
||||
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
|
||||
// Both directories are the same, don't record the change
|
||||
return nil
|
||||
}
|
||||
}
|
||||
kind = ChangeKindModify
|
||||
}
|
||||
}
|
||||
|
||||
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
|
||||
// This block is here to ensure the change is recorded even if the
|
||||
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
|
||||
// Check https://github.com/docker/docker/pull/13590 for details.
|
||||
if f.IsDir() {
|
||||
changedDirs[path] = struct{}{}
|
||||
}
|
||||
if kind == ChangeKindAdd || kind == ChangeKindDelete {
|
||||
parent := filepath.Dir(path)
|
||||
if _, ok := changedDirs[parent]; !ok && parent != "/" {
|
||||
pi, err := os.Stat(filepath.Join(o.diffDir, parent))
|
||||
if err := changeFn(ChangeKindModify, parent, pi, err); err != nil {
|
||||
return err
|
||||
}
|
||||
changedDirs[parent] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return changeFn(kind, path, f, nil)
|
||||
})
|
||||
}
|
||||
|
||||
// doubleWalkDiff walks both directories to create a diff
|
||||
func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err error) {
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
var (
|
||||
c1 = make(chan *currentPath)
|
||||
c2 = make(chan *currentPath)
|
||||
|
||||
f1, f2 *currentPath
|
||||
rmdir string
|
||||
)
|
||||
g.Go(func() error {
|
||||
defer close(c1)
|
||||
return pathWalk(ctx, a, c1)
|
||||
})
|
||||
g.Go(func() error {
|
||||
defer close(c2)
|
||||
return pathWalk(ctx, b, c2)
|
||||
})
|
||||
g.Go(func() error {
|
||||
for c1 != nil || c2 != nil {
|
||||
if f1 == nil && c1 != nil {
|
||||
f1, err = nextPath(ctx, c1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f1 == nil {
|
||||
c1 = nil
|
||||
}
|
||||
}
|
||||
|
||||
if f2 == nil && c2 != nil {
|
||||
f2, err = nextPath(ctx, c2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f2 == nil {
|
||||
c2 = nil
|
||||
}
|
||||
}
|
||||
if f1 == nil && f2 == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var f os.FileInfo
|
||||
k, p := pathChange(f1, f2)
|
||||
switch k {
|
||||
case ChangeKindAdd:
|
||||
if rmdir != "" {
|
||||
rmdir = ""
|
||||
}
|
||||
f = f2.f
|
||||
f2 = nil
|
||||
case ChangeKindDelete:
|
||||
// Check if this file is already removed by being
|
||||
// under of a removed directory
|
||||
if rmdir != "" && strings.HasPrefix(f1.path, rmdir) {
|
||||
f1 = nil
|
||||
continue
|
||||
} else if f1.f.IsDir() {
|
||||
rmdir = f1.path + string(os.PathSeparator)
|
||||
} else if rmdir != "" {
|
||||
rmdir = ""
|
||||
}
|
||||
f1 = nil
|
||||
case ChangeKindModify:
|
||||
same, err := sameFile(f1, f2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f1.f.IsDir() && !f2.f.IsDir() {
|
||||
rmdir = f1.path + string(os.PathSeparator)
|
||||
} else if rmdir != "" {
|
||||
rmdir = ""
|
||||
}
|
||||
f = f2.f
|
||||
f1 = nil
|
||||
f2 = nil
|
||||
if same {
|
||||
if !isLinked(f) {
|
||||
continue
|
||||
}
|
||||
k = ChangeKindUnmodified
|
||||
}
|
||||
}
|
||||
if err := changeFn(k, p, f, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return g.Wait()
|
||||
}
|
||||
74
vendor/github.com/containerd/continuity/fs/diff_unix.go
generated
vendored
Normal file
74
vendor/github.com/containerd/continuity/fs/diff_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// detectDirDiff returns diff dir options if a directory could
|
||||
// be found in the mount info for upper which is the direct
|
||||
// diff with the provided lower directory
|
||||
func detectDirDiff(upper, lower string) *diffDirOptions {
|
||||
// TODO: get mount options for upper
|
||||
// TODO: detect AUFS
|
||||
// TODO: detect overlay
|
||||
return nil
|
||||
}
|
||||
|
||||
// compareSysStat returns whether the stats are equivalent,
|
||||
// whether the files are considered the same file, and
|
||||
// an error
|
||||
func compareSysStat(s1, s2 interface{}) (bool, error) {
|
||||
ls1, ok := s1.(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
ls2, ok := s2.(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil
|
||||
}
|
||||
|
||||
func compareCapabilities(p1, p2 string) (bool, error) {
|
||||
c1, err := sysx.LGetxattr(p1, "security.capability")
|
||||
if err != nil && err != sysx.ENODATA {
|
||||
return false, errors.Wrapf(err, "failed to get xattr for %s", p1)
|
||||
}
|
||||
c2, err := sysx.LGetxattr(p2, "security.capability")
|
||||
if err != nil && err != sysx.ENODATA {
|
||||
return false, errors.Wrapf(err, "failed to get xattr for %s", p2)
|
||||
}
|
||||
return bytes.Equal(c1, c2), nil
|
||||
}
|
||||
|
||||
func isLinked(f os.FileInfo) bool {
|
||||
s, ok := f.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return !f.IsDir() && s.Nlink > 1
|
||||
}
|
||||
48
vendor/github.com/containerd/continuity/fs/diff_windows.go
generated
vendored
Normal file
48
vendor/github.com/containerd/continuity/fs/diff_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func detectDirDiff(upper, lower string) *diffDirOptions {
|
||||
return nil
|
||||
}
|
||||
|
||||
func compareSysStat(s1, s2 interface{}) (bool, error) {
|
||||
f1, ok := s1.(windows.Win32FileAttributeData)
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
f2, ok := s2.(windows.Win32FileAttributeData)
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
return f1.FileAttributes == f2.FileAttributes, nil
|
||||
}
|
||||
|
||||
func compareCapabilities(p1, p2 string) (bool, error) {
|
||||
// TODO: Use windows equivalent
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func isLinked(os.FileInfo) bool {
|
||||
return false
|
||||
}
|
||||
103
vendor/github.com/containerd/continuity/fs/dtype_linux.go
generated
vendored
Normal file
103
vendor/github.com/containerd/continuity/fs/dtype_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func locateDummyIfEmpty(path string) (string, error) {
|
||||
children, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(children) != 0 {
|
||||
return "", nil
|
||||
}
|
||||
dummyFile, err := ioutil.TempFile(path, "fsutils-dummy")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
name := dummyFile.Name()
|
||||
err = dummyFile.Close()
|
||||
return name, err
|
||||
}
|
||||
|
||||
// SupportsDType returns whether the filesystem mounted on path supports d_type
|
||||
func SupportsDType(path string) (bool, error) {
|
||||
// locate dummy so that we have at least one dirent
|
||||
dummy, err := locateDummyIfEmpty(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if dummy != "" {
|
||||
defer os.Remove(dummy)
|
||||
}
|
||||
|
||||
visited := 0
|
||||
supportsDType := true
|
||||
fn := func(ent *syscall.Dirent) bool {
|
||||
visited++
|
||||
if ent.Type == syscall.DT_UNKNOWN {
|
||||
supportsDType = false
|
||||
// stop iteration
|
||||
return true
|
||||
}
|
||||
// continue iteration
|
||||
return false
|
||||
}
|
||||
if err = iterateReadDir(path, fn); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if visited == 0 {
|
||||
return false, fmt.Errorf("did not hit any dirent during iteration %s", path)
|
||||
}
|
||||
return supportsDType, nil
|
||||
}
|
||||
|
||||
func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error {
|
||||
d, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer d.Close()
|
||||
fd := int(d.Fd())
|
||||
buf := make([]byte, 4096)
|
||||
for {
|
||||
nbytes, err := syscall.ReadDirent(fd, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if nbytes == 0 {
|
||||
break
|
||||
}
|
||||
for off := 0; off < nbytes; {
|
||||
ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off]))
|
||||
if stop := fn(ent); stop {
|
||||
return nil
|
||||
}
|
||||
off += int(ent.Reclen)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
38
vendor/github.com/containerd/continuity/fs/du.go
generated
vendored
Normal file
38
vendor/github.com/containerd/continuity/fs/du.go
generated
vendored
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import "context"
|
||||
|
||||
// Usage of disk information
|
||||
type Usage struct {
|
||||
Inodes int64
|
||||
Size int64
|
||||
}
|
||||
|
||||
// DiskUsage counts the number of inodes and disk usage for the resources under
|
||||
// path.
|
||||
func DiskUsage(ctx context.Context, roots ...string) (Usage, error) {
|
||||
return diskUsage(ctx, roots...)
|
||||
}
|
||||
|
||||
// DiffUsage counts the numbers of inodes and disk usage in the
|
||||
// diff between the 2 directories. The first path is intended
|
||||
// as the base directory and the second as the changed directory.
|
||||
func DiffUsage(ctx context.Context, a, b string) (Usage, error) {
|
||||
return diffUsage(ctx, a, b)
|
||||
}
|
||||
110
vendor/github.com/containerd/continuity/fs/du_unix.go
generated
vendored
Normal file
110
vendor/github.com/containerd/continuity/fs/du_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type inode struct {
|
||||
// TODO(stevvooe): Can probably reduce memory usage by not tracking
|
||||
// device, but we can leave this right for now.
|
||||
dev, ino uint64
|
||||
}
|
||||
|
||||
func newInode(stat *syscall.Stat_t) inode {
|
||||
return inode{
|
||||
// Dev is uint32 on darwin/bsd, uint64 on linux/solaris
|
||||
dev: uint64(stat.Dev), // nolint: unconvert
|
||||
// Ino is uint32 on bsd, uint64 on darwin/linux/solaris
|
||||
ino: uint64(stat.Ino), // nolint: unconvert
|
||||
}
|
||||
}
|
||||
|
||||
func diskUsage(ctx context.Context, roots ...string) (Usage, error) {
|
||||
|
||||
var (
|
||||
size int64
|
||||
inodes = map[inode]struct{}{} // expensive!
|
||||
)
|
||||
|
||||
for _, root := range roots {
|
||||
if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
inoKey := newInode(fi.Sys().(*syscall.Stat_t))
|
||||
if _, ok := inodes[inoKey]; !ok {
|
||||
inodes[inoKey] = struct{}{}
|
||||
size += fi.Size()
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return Usage{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return Usage{
|
||||
Inodes: int64(len(inodes)),
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func diffUsage(ctx context.Context, a, b string) (Usage, error) {
|
||||
var (
|
||||
size int64
|
||||
inodes = map[inode]struct{}{} // expensive!
|
||||
)
|
||||
|
||||
if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if kind == ChangeKindAdd || kind == ChangeKindModify {
|
||||
inoKey := newInode(fi.Sys().(*syscall.Stat_t))
|
||||
if _, ok := inodes[inoKey]; !ok {
|
||||
inodes[inoKey] = struct{}{}
|
||||
size += fi.Size()
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return Usage{}, err
|
||||
}
|
||||
|
||||
return Usage{
|
||||
Inodes: int64(len(inodes)),
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
||||
82
vendor/github.com/containerd/continuity/fs/du_windows.go
generated
vendored
Normal file
82
vendor/github.com/containerd/continuity/fs/du_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func diskUsage(ctx context.Context, roots ...string) (Usage, error) {
|
||||
var (
|
||||
size int64
|
||||
)
|
||||
|
||||
// TODO(stevvooe): Support inodes (or equivalent) for windows.
|
||||
|
||||
for _, root := range roots {
|
||||
if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
size += fi.Size()
|
||||
return nil
|
||||
}); err != nil {
|
||||
return Usage{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return Usage{
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func diffUsage(ctx context.Context, a, b string) (Usage, error) {
|
||||
var (
|
||||
size int64
|
||||
)
|
||||
|
||||
if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if kind == ChangeKindAdd || kind == ChangeKindModify {
|
||||
size += fi.Size()
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return Usage{}, err
|
||||
}
|
||||
|
||||
return Usage{
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
||||
43
vendor/github.com/containerd/continuity/fs/hardlink.go
generated
vendored
Normal file
43
vendor/github.com/containerd/continuity/fs/hardlink.go
generated
vendored
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import "os"
|
||||
|
||||
// GetLinkInfo returns an identifier representing the node a hardlink is pointing
|
||||
// to. If the file is not hard linked then 0 will be returned.
|
||||
func GetLinkInfo(fi os.FileInfo) (uint64, bool) {
|
||||
return getLinkInfo(fi)
|
||||
}
|
||||
|
||||
// getLinkSource returns a path for the given name and
|
||||
// file info to its link source in the provided inode
|
||||
// map. If the given file name is not in the map and
|
||||
// has other links, it is added to the inode map
|
||||
// to be a source for other link locations.
|
||||
func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) {
|
||||
inode, isHardlink := getLinkInfo(fi)
|
||||
if !isHardlink {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
path, ok := inodes[inode]
|
||||
if !ok {
|
||||
inodes[inode] = name
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
34
vendor/github.com/containerd/continuity/fs/hardlink_unix.go
generated
vendored
Normal file
34
vendor/github.com/containerd/continuity/fs/hardlink_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func getLinkInfo(fi os.FileInfo) (uint64, bool) {
|
||||
s, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Ino is uint32 on bsd, uint64 on darwin/linux/solaris
|
||||
return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1 // nolint: unconvert
|
||||
}
|
||||
23
vendor/github.com/containerd/continuity/fs/hardlink_windows.go
generated
vendored
Normal file
23
vendor/github.com/containerd/continuity/fs/hardlink_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import "os"
|
||||
|
||||
func getLinkInfo(fi os.FileInfo) (uint64, bool) {
|
||||
return 0, false
|
||||
}
|
||||
311
vendor/github.com/containerd/continuity/fs/path.go
generated
vendored
Normal file
311
vendor/github.com/containerd/continuity/fs/path.go
generated
vendored
Normal file
|
|
@ -0,0 +1,311 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
errTooManyLinks = errors.New("too many links")
|
||||
)
|
||||
|
||||
type currentPath struct {
|
||||
path string
|
||||
f os.FileInfo
|
||||
fullPath string
|
||||
}
|
||||
|
||||
func pathChange(lower, upper *currentPath) (ChangeKind, string) {
|
||||
if lower == nil {
|
||||
if upper == nil {
|
||||
panic("cannot compare nil paths")
|
||||
}
|
||||
return ChangeKindAdd, upper.path
|
||||
}
|
||||
if upper == nil {
|
||||
return ChangeKindDelete, lower.path
|
||||
}
|
||||
|
||||
switch i := directoryCompare(lower.path, upper.path); {
|
||||
case i < 0:
|
||||
// File in lower that is not in upper
|
||||
return ChangeKindDelete, lower.path
|
||||
case i > 0:
|
||||
// File in upper that is not in lower
|
||||
return ChangeKindAdd, upper.path
|
||||
default:
|
||||
return ChangeKindModify, upper.path
|
||||
}
|
||||
}
|
||||
|
||||
func directoryCompare(a, b string) int {
|
||||
l := len(a)
|
||||
if len(b) < l {
|
||||
l = len(b)
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
c1, c2 := a[i], b[i]
|
||||
if c1 == filepath.Separator {
|
||||
c1 = byte(0)
|
||||
}
|
||||
if c2 == filepath.Separator {
|
||||
c2 = byte(0)
|
||||
}
|
||||
if c1 < c2 {
|
||||
return -1
|
||||
}
|
||||
if c1 > c2 {
|
||||
return +1
|
||||
}
|
||||
}
|
||||
if len(a) < len(b) {
|
||||
return -1
|
||||
}
|
||||
if len(a) > len(b) {
|
||||
return +1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func sameFile(f1, f2 *currentPath) (bool, error) {
|
||||
if os.SameFile(f1.f, f2.f) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
equalStat, err := compareSysStat(f1.f.Sys(), f2.f.Sys())
|
||||
if err != nil || !equalStat {
|
||||
return equalStat, err
|
||||
}
|
||||
|
||||
if eq, err := compareCapabilities(f1.fullPath, f2.fullPath); err != nil || !eq {
|
||||
return eq, err
|
||||
}
|
||||
|
||||
// If not a directory also check size, modtime, and content
|
||||
if !f1.f.IsDir() {
|
||||
if f1.f.Size() != f2.f.Size() {
|
||||
return false, nil
|
||||
}
|
||||
t1 := f1.f.ModTime()
|
||||
t2 := f2.f.ModTime()
|
||||
|
||||
if t1.Unix() != t2.Unix() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If the timestamp may have been truncated in both of the
|
||||
// files, check content of file to determine difference
|
||||
if t1.Nanosecond() == 0 && t2.Nanosecond() == 0 {
|
||||
if (f1.f.Mode() & os.ModeSymlink) == os.ModeSymlink {
|
||||
return compareSymlinkTarget(f1.fullPath, f2.fullPath)
|
||||
}
|
||||
if f1.f.Size() == 0 { // if file sizes are zero length, the files are the same by definition
|
||||
return true, nil
|
||||
}
|
||||
return compareFileContent(f1.fullPath, f2.fullPath)
|
||||
} else if t1.Nanosecond() != t2.Nanosecond() {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func compareSymlinkTarget(p1, p2 string) (bool, error) {
|
||||
t1, err := os.Readlink(p1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
t2, err := os.Readlink(p2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return t1 == t2, nil
|
||||
}
|
||||
|
||||
const compareChuckSize = 32 * 1024
|
||||
|
||||
// compareFileContent compares the content of 2 same sized files
|
||||
// by comparing each byte.
|
||||
func compareFileContent(p1, p2 string) (bool, error) {
|
||||
f1, err := os.Open(p1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer f1.Close()
|
||||
f2, err := os.Open(p2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer f2.Close()
|
||||
|
||||
b1 := make([]byte, compareChuckSize)
|
||||
b2 := make([]byte, compareChuckSize)
|
||||
for {
|
||||
n1, err1 := f1.Read(b1)
|
||||
if err1 != nil && err1 != io.EOF {
|
||||
return false, err1
|
||||
}
|
||||
n2, err2 := f2.Read(b2)
|
||||
if err2 != nil && err2 != io.EOF {
|
||||
return false, err2
|
||||
}
|
||||
if n1 != n2 || !bytes.Equal(b1[:n1], b2[:n2]) {
|
||||
return false, nil
|
||||
}
|
||||
if err1 == io.EOF && err2 == io.EOF {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pathWalk(ctx context.Context, root string, pathC chan<- *currentPath) error {
|
||||
return filepath.Walk(root, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
path, err = filepath.Rel(root, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path = filepath.Join(string(os.PathSeparator), path)
|
||||
|
||||
// Skip root
|
||||
if path == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
p := ¤tPath{
|
||||
path: path,
|
||||
f: f,
|
||||
fullPath: filepath.Join(root, path),
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case pathC <- p:
|
||||
return nil
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case p := <-pathC:
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
// RootPath joins a path with a root, evaluating and bounding any
|
||||
// symlink to the root directory.
|
||||
func RootPath(root, path string) (string, error) {
|
||||
if path == "" {
|
||||
return root, nil
|
||||
}
|
||||
var linksWalked int // to protect against cycles
|
||||
for {
|
||||
i := linksWalked
|
||||
newpath, err := walkLinks(root, path, &linksWalked)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
path = newpath
|
||||
if i == linksWalked {
|
||||
newpath = filepath.Join("/", newpath)
|
||||
if path == newpath {
|
||||
return filepath.Join(root, newpath), nil
|
||||
}
|
||||
path = newpath
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func walkLink(root, path string, linksWalked *int) (newpath string, islink bool, err error) {
|
||||
if *linksWalked > 255 {
|
||||
return "", false, errTooManyLinks
|
||||
}
|
||||
|
||||
path = filepath.Join("/", path)
|
||||
if path == "/" {
|
||||
return path, false, nil
|
||||
}
|
||||
realPath := filepath.Join(root, path)
|
||||
|
||||
fi, err := os.Lstat(realPath)
|
||||
if err != nil {
|
||||
// If path does not yet exist, treat as non-symlink
|
||||
if os.IsNotExist(err) {
|
||||
return path, false, nil
|
||||
}
|
||||
return "", false, err
|
||||
}
|
||||
if fi.Mode()&os.ModeSymlink == 0 {
|
||||
return path, false, nil
|
||||
}
|
||||
newpath, err = os.Readlink(realPath)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
*linksWalked++
|
||||
return newpath, true, nil
|
||||
}
|
||||
|
||||
func walkLinks(root, path string, linksWalked *int) (string, error) {
|
||||
switch dir, file := filepath.Split(path); {
|
||||
case dir == "":
|
||||
newpath, _, err := walkLink(root, file, linksWalked)
|
||||
return newpath, err
|
||||
case file == "":
|
||||
if os.IsPathSeparator(dir[len(dir)-1]) {
|
||||
if dir == "/" {
|
||||
return dir, nil
|
||||
}
|
||||
return walkLinks(root, dir[:len(dir)-1], linksWalked)
|
||||
}
|
||||
newpath, _, err := walkLink(root, dir, linksWalked)
|
||||
return newpath, err
|
||||
default:
|
||||
newdir, err := walkLinks(root, dir, linksWalked)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !islink {
|
||||
return newpath, nil
|
||||
}
|
||||
if filepath.IsAbs(newpath) {
|
||||
return newpath, nil
|
||||
}
|
||||
return filepath.Join(newdir, newpath), nil
|
||||
}
|
||||
}
|
||||
44
vendor/github.com/containerd/continuity/fs/stat_darwinfreebsd.go
generated
vendored
Normal file
44
vendor/github.com/containerd/continuity/fs/stat_darwinfreebsd.go
generated
vendored
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
// +build darwin freebsd
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// StatAtime returns the access time from a stat struct
|
||||
func StatAtime(st *syscall.Stat_t) syscall.Timespec {
|
||||
return st.Atimespec
|
||||
}
|
||||
|
||||
// StatCtime returns the created time from a stat struct
|
||||
func StatCtime(st *syscall.Stat_t) syscall.Timespec {
|
||||
return st.Ctimespec
|
||||
}
|
||||
|
||||
// StatMtime returns the modified time from a stat struct
|
||||
func StatMtime(st *syscall.Stat_t) syscall.Timespec {
|
||||
return st.Mtimespec
|
||||
}
|
||||
|
||||
// StatATimeAsTime returns the access time as a time.Time
|
||||
func StatATimeAsTime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) // nolint: unconvert
|
||||
}
|
||||
45
vendor/github.com/containerd/continuity/fs/stat_linuxopenbsd.go
generated
vendored
Normal file
45
vendor/github.com/containerd/continuity/fs/stat_linuxopenbsd.go
generated
vendored
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
// +build linux openbsd
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// StatAtime returns the Atim
|
||||
func StatAtime(st *syscall.Stat_t) syscall.Timespec {
|
||||
return st.Atim
|
||||
}
|
||||
|
||||
// StatCtime returns the Ctim
|
||||
func StatCtime(st *syscall.Stat_t) syscall.Timespec {
|
||||
return st.Ctim
|
||||
}
|
||||
|
||||
// StatMtime returns the Mtim
|
||||
func StatMtime(st *syscall.Stat_t) syscall.Timespec {
|
||||
return st.Mtim
|
||||
}
|
||||
|
||||
// StatATimeAsTime returns st.Atim as a time.Time
|
||||
func StatATimeAsTime(st *syscall.Stat_t) time.Time {
|
||||
// The int64 conversions ensure the line compiles for 32-bit systems as well.
|
||||
return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert
|
||||
}
|
||||
29
vendor/github.com/containerd/continuity/fs/time.go
generated
vendored
Normal file
29
vendor/github.com/containerd/continuity/fs/time.go
generated
vendored
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import "time"
|
||||
|
||||
// Gnu tar and the go tar writer don't have sub-second mtime
|
||||
// precision, which is problematic when we apply changes via tar
|
||||
// files, we handle this by comparing for exact times, *or* same
|
||||
// second count and either a or b having exactly 0 nanoseconds
|
||||
func sameFsTime(a, b time.Time) bool {
|
||||
return a == b ||
|
||||
(a.Unix() == b.Unix() &&
|
||||
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||
}
|
||||
101
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
Normal file
101
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pathdriver
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// PathDriver provides all of the path manipulation functions in a common
|
||||
// interface. The context should call these and never use the `filepath`
|
||||
// package or any other package to manipulate paths.
|
||||
type PathDriver interface {
|
||||
Join(paths ...string) string
|
||||
IsAbs(path string) bool
|
||||
Rel(base, target string) (string, error)
|
||||
Base(path string) string
|
||||
Dir(path string) string
|
||||
Clean(path string) string
|
||||
Split(path string) (dir, file string)
|
||||
Separator() byte
|
||||
Abs(path string) (string, error)
|
||||
Walk(string, filepath.WalkFunc) error
|
||||
FromSlash(path string) string
|
||||
ToSlash(path string) string
|
||||
Match(pattern, name string) (matched bool, err error)
|
||||
}
|
||||
|
||||
// pathDriver is a simple default implementation calls the filepath package.
|
||||
type pathDriver struct{}
|
||||
|
||||
// LocalPathDriver is the exported pathDriver struct for convenience.
|
||||
var LocalPathDriver PathDriver = &pathDriver{}
|
||||
|
||||
func (*pathDriver) Join(paths ...string) string {
|
||||
return filepath.Join(paths...)
|
||||
}
|
||||
|
||||
func (*pathDriver) IsAbs(path string) bool {
|
||||
return filepath.IsAbs(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Rel(base, target string) (string, error) {
|
||||
return filepath.Rel(base, target)
|
||||
}
|
||||
|
||||
func (*pathDriver) Base(path string) string {
|
||||
return filepath.Base(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Dir(path string) string {
|
||||
return filepath.Dir(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Clean(path string) string {
|
||||
return filepath.Clean(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Split(path string) (dir, file string) {
|
||||
return filepath.Split(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Separator() byte {
|
||||
return filepath.Separator
|
||||
}
|
||||
|
||||
func (*pathDriver) Abs(path string) (string, error) {
|
||||
return filepath.Abs(path)
|
||||
}
|
||||
|
||||
// Note that filepath.Walk calls os.Stat, so if the context wants to
|
||||
// to call Driver.Stat() for Walk, they need to create a new struct that
|
||||
// overrides this method.
|
||||
func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error {
|
||||
return filepath.Walk(root, walkFn)
|
||||
}
|
||||
|
||||
func (*pathDriver) FromSlash(path string) string {
|
||||
return filepath.FromSlash(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) ToSlash(path string) string {
|
||||
return filepath.ToSlash(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Match(pattern, name string) (bool, error) {
|
||||
return filepath.Match(pattern, name)
|
||||
}
|
||||
3
vendor/github.com/containerd/continuity/sysx/README.md
generated
vendored
Normal file
3
vendor/github.com/containerd/continuity/sysx/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
This package is for internal use only. It is intended to only have
|
||||
temporary changes before they are upstreamed to golang.org/x/sys/
|
||||
(a.k.a. https://github.com/golang/sys).
|
||||
52
vendor/github.com/containerd/continuity/sysx/generate.sh
generated
vendored
Normal file
52
vendor/github.com/containerd/continuity/sysx/generate.sh
generated
vendored
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
set -e
|
||||
|
||||
mksyscall="$(go env GOROOT)/src/syscall/mksyscall.pl"
|
||||
|
||||
fix() {
|
||||
sed 's,^package syscall$,package sysx,' \
|
||||
| sed 's,^import "unsafe"$,import (\n\t"syscall"\n\t"unsafe"\n),' \
|
||||
| gofmt -r='BytePtrFromString -> syscall.BytePtrFromString' \
|
||||
| gofmt -r='Syscall6 -> syscall.Syscall6' \
|
||||
| gofmt -r='Syscall -> syscall.Syscall' \
|
||||
| gofmt -r='SYS_GETXATTR -> syscall.SYS_GETXATTR' \
|
||||
| gofmt -r='SYS_LISTXATTR -> syscall.SYS_LISTXATTR' \
|
||||
| gofmt -r='SYS_SETXATTR -> syscall.SYS_SETXATTR' \
|
||||
| gofmt -r='SYS_REMOVEXATTR -> syscall.SYS_REMOVEXATTR' \
|
||||
| gofmt -r='SYS_LGETXATTR -> syscall.SYS_LGETXATTR' \
|
||||
| gofmt -r='SYS_LLISTXATTR -> syscall.SYS_LLISTXATTR' \
|
||||
| gofmt -r='SYS_LSETXATTR -> syscall.SYS_LSETXATTR' \
|
||||
| gofmt -r='SYS_LREMOVEXATTR -> syscall.SYS_LREMOVEXATTR'
|
||||
}
|
||||
|
||||
if [ "$GOARCH" == "" ] || [ "$GOOS" == "" ]; then
|
||||
echo "Must specify \$GOARCH and \$GOOS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkargs=""
|
||||
|
||||
if [ "$GOARCH" == "386" ] || [ "$GOARCH" == "arm" ]; then
|
||||
mkargs="-l32"
|
||||
fi
|
||||
|
||||
for f in "$@"; do
|
||||
$mksyscall $mkargs "${f}_${GOOS}.go" | fix > "${f}_${GOOS}_${GOARCH}.go"
|
||||
done
|
||||
|
||||
23
vendor/github.com/containerd/continuity/sysx/nodata_linux.go
generated
vendored
Normal file
23
vendor/github.com/containerd/continuity/sysx/nodata_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const ENODATA = syscall.ENODATA
|
||||
24
vendor/github.com/containerd/continuity/sysx/nodata_solaris.go
generated
vendored
Normal file
24
vendor/github.com/containerd/continuity/sysx/nodata_solaris.go
generated
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// This should actually be a set that contains ENOENT and EPERM
|
||||
const ENODATA = syscall.ENOENT
|
||||
25
vendor/github.com/containerd/continuity/sysx/nodata_unix.go
generated
vendored
Normal file
25
vendor/github.com/containerd/continuity/sysx/nodata_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
// +build darwin freebsd openbsd
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const ENODATA = syscall.ENOATTR
|
||||
117
vendor/github.com/containerd/continuity/sysx/xattr.go
generated
vendored
Normal file
117
vendor/github.com/containerd/continuity/sysx/xattr.go
generated
vendored
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
// +build linux darwin
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Listxattr calls syscall listxattr and reads all content
|
||||
// and returns a string array
|
||||
func Listxattr(path string) ([]string, error) {
|
||||
return listxattrAll(path, unix.Listxattr)
|
||||
}
|
||||
|
||||
// Removexattr calls syscall removexattr
|
||||
func Removexattr(path string, attr string) (err error) {
|
||||
return unix.Removexattr(path, attr)
|
||||
}
|
||||
|
||||
// Setxattr calls syscall setxattr
|
||||
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
return unix.Setxattr(path, attr, data, flags)
|
||||
}
|
||||
|
||||
// Getxattr calls syscall getxattr
|
||||
func Getxattr(path, attr string) ([]byte, error) {
|
||||
return getxattrAll(path, attr, unix.Getxattr)
|
||||
}
|
||||
|
||||
// LListxattr lists xattrs, not following symlinks
|
||||
func LListxattr(path string) ([]string, error) {
|
||||
return listxattrAll(path, unix.Llistxattr)
|
||||
}
|
||||
|
||||
// LRemovexattr removes an xattr, not following symlinks
|
||||
func LRemovexattr(path string, attr string) (err error) {
|
||||
return unix.Lremovexattr(path, attr)
|
||||
}
|
||||
|
||||
// LSetxattr sets an xattr, not following symlinks
|
||||
func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
return unix.Lsetxattr(path, attr, data, flags)
|
||||
}
|
||||
|
||||
// LGetxattr gets an xattr, not following symlinks
|
||||
func LGetxattr(path, attr string) ([]byte, error) {
|
||||
return getxattrAll(path, attr, unix.Lgetxattr)
|
||||
}
|
||||
|
||||
const defaultXattrBufferSize = 128
|
||||
|
||||
type listxattrFunc func(path string, dest []byte) (int, error)
|
||||
|
||||
func listxattrAll(path string, listFunc listxattrFunc) ([]string, error) {
|
||||
buf := make([]byte, defaultXattrBufferSize)
|
||||
n, err := listFunc(path, buf)
|
||||
for err == unix.ERANGE {
|
||||
// Buffer too small, use zero-sized buffer to get the actual size
|
||||
n, err = listFunc(path, []byte{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf = make([]byte, n)
|
||||
n, err = listFunc(path, buf)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ps := bytes.Split(bytes.TrimSuffix(buf[:n], []byte{0}), []byte{0})
|
||||
var entries []string
|
||||
for _, p := range ps {
|
||||
if len(p) > 0 {
|
||||
entries = append(entries, string(p))
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
type getxattrFunc func(string, string, []byte) (int, error)
|
||||
|
||||
func getxattrAll(path, attr string, getFunc getxattrFunc) ([]byte, error) {
|
||||
buf := make([]byte, defaultXattrBufferSize)
|
||||
n, err := getFunc(path, attr, buf)
|
||||
for err == unix.ERANGE {
|
||||
// Buffer too small, use zero-sized buffer to get the actual size
|
||||
n, err = getFunc(path, attr, []byte{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf = make([]byte, n)
|
||||
n, err = getFunc(path, attr, buf)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf[:n], nil
|
||||
}
|
||||
67
vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go
generated
vendored
Normal file
67
vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go
generated
vendored
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
// +build !linux,!darwin
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
var errUnsupported = errors.New("extended attributes unsupported on " + runtime.GOOS)
|
||||
|
||||
// Listxattr calls syscall listxattr and reads all content
|
||||
// and returns a string array
|
||||
func Listxattr(path string) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
// Removexattr calls syscall removexattr
|
||||
func Removexattr(path string, attr string) (err error) {
|
||||
return errUnsupported
|
||||
}
|
||||
|
||||
// Setxattr calls syscall setxattr
|
||||
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
return errUnsupported
|
||||
}
|
||||
|
||||
// Getxattr calls syscall getxattr
|
||||
func Getxattr(path, attr string) ([]byte, error) {
|
||||
return []byte{}, errUnsupported
|
||||
}
|
||||
|
||||
// LListxattr lists xattrs, not following symlinks
|
||||
func LListxattr(path string) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
// LRemovexattr removes an xattr, not following symlinks
|
||||
func LRemovexattr(path string, attr string) (err error) {
|
||||
return errUnsupported
|
||||
}
|
||||
|
||||
// LSetxattr sets an xattr, not following symlinks
|
||||
func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
return errUnsupported
|
||||
}
|
||||
|
||||
// LGetxattr gets an xattr, not following symlinks
|
||||
func LGetxattr(path, attr string) ([]byte, error) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
440
vendor/github.com/docker/cli/cli/command/image/build/context.go
generated
vendored
Normal file
440
vendor/github.com/docker/cli/cli/command/image/build/context.go
generated
vendored
Normal file
|
|
@ -0,0 +1,440 @@
|
|||
package build
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/builder/remotecontext/git"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultDockerfileName is the Default filename with Docker commands, read by docker build
|
||||
DefaultDockerfileName string = "Dockerfile"
|
||||
// archiveHeaderSize is the number of bytes in an archive header
|
||||
archiveHeaderSize = 512
|
||||
)
|
||||
|
||||
// ValidateContextDirectory checks if all the contents of the directory
|
||||
// can be read and returns an error if some files can't be read
|
||||
// symlinks which point to non-existing files don't trigger an error
|
||||
func ValidateContextDirectory(srcPath string, excludes []string) error {
|
||||
contextRoot, err := getContextRoot(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pm, err := fileutils.NewPatternMatcher(excludes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
if os.IsPermission(err) {
|
||||
return errors.Errorf("can't stat '%s'", filePath)
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
return errors.Errorf("file ('%s') not found or excluded by .dockerignore", filePath)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// skip this directory/file if it's not in the path, it won't get added to the context
|
||||
if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil {
|
||||
return err
|
||||
} else if skip, err := filepathMatches(pm, relFilePath); err != nil {
|
||||
return err
|
||||
} else if skip {
|
||||
if f.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// skip checking if symlinks point to non-existing files, such symlinks can be useful
|
||||
// also skip named pipes, because they hanging on open
|
||||
if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !f.IsDir() {
|
||||
currentFile, err := os.Open(filePath)
|
||||
if err != nil && os.IsPermission(err) {
|
||||
return errors.Errorf("no permission to read from '%s'", filePath)
|
||||
}
|
||||
currentFile.Close()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func filepathMatches(matcher *fileutils.PatternMatcher, file string) (bool, error) {
|
||||
file = filepath.Clean(file)
|
||||
if file == "." {
|
||||
// Don't let them exclude everything, kind of silly.
|
||||
return false, nil
|
||||
}
|
||||
return matcher.Matches(file)
|
||||
}
|
||||
|
||||
// DetectArchiveReader detects whether the input stream is an archive or a
|
||||
// Dockerfile and returns a buffered version of input, safe to consume in lieu
|
||||
// of input. If an archive is detected, isArchive is set to true, and to false
|
||||
// otherwise, in which case it is safe to assume input represents the contents
|
||||
// of a Dockerfile.
|
||||
func DetectArchiveReader(input io.ReadCloser) (rc io.ReadCloser, isArchive bool, err error) {
|
||||
buf := bufio.NewReader(input)
|
||||
|
||||
magic, err := buf.Peek(archiveHeaderSize * 2)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, false, errors.Errorf("failed to peek context header from STDIN: %v", err)
|
||||
}
|
||||
|
||||
return ioutils.NewReadCloserWrapper(buf, func() error { return input.Close() }), IsArchive(magic), nil
|
||||
}
|
||||
|
||||
// WriteTempDockerfile writes a Dockerfile stream to a temporary file with a
|
||||
// name specified by DefaultDockerfileName and returns the path to the
|
||||
// temporary directory containing the Dockerfile.
|
||||
func WriteTempDockerfile(rc io.ReadCloser) (dockerfileDir string, err error) {
|
||||
// err is a named return value, due to the defer call below.
|
||||
dockerfileDir, err = ioutil.TempDir("", "docker-build-tempdockerfile-")
|
||||
if err != nil {
|
||||
return "", errors.Errorf("unable to create temporary context directory: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.RemoveAll(dockerfileDir)
|
||||
}
|
||||
}()
|
||||
|
||||
f, err := os.Create(filepath.Join(dockerfileDir, DefaultDockerfileName))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := io.Copy(f, rc); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return dockerfileDir, rc.Close()
|
||||
}
|
||||
|
||||
// GetContextFromReader will read the contents of the given reader as either a
|
||||
// Dockerfile or tar archive. Returns a tar archive used as a context and a
|
||||
// path to the Dockerfile inside the tar.
|
||||
func GetContextFromReader(rc io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) {
|
||||
rc, isArchive, err := DetectArchiveReader(rc)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
if isArchive {
|
||||
return rc, dockerfileName, nil
|
||||
}
|
||||
|
||||
// Input should be read as a Dockerfile.
|
||||
|
||||
if dockerfileName == "-" {
|
||||
return nil, "", errors.New("build context is not an archive")
|
||||
}
|
||||
|
||||
dockerfileDir, err := WriteTempDockerfile(rc)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
tar, err := archive.Tar(dockerfileDir, archive.Uncompressed)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return ioutils.NewReadCloserWrapper(tar, func() error {
|
||||
err := tar.Close()
|
||||
os.RemoveAll(dockerfileDir)
|
||||
return err
|
||||
}), DefaultDockerfileName, nil
|
||||
}
|
||||
|
||||
// IsArchive checks for the magic bytes of a tar or any supported compression
|
||||
// algorithm.
|
||||
func IsArchive(header []byte) bool {
|
||||
compression := archive.DetectCompression(header)
|
||||
if compression != archive.Uncompressed {
|
||||
return true
|
||||
}
|
||||
r := tar.NewReader(bytes.NewBuffer(header))
|
||||
_, err := r.Next()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// GetContextFromGitURL uses a Git URL as context for a `docker build`. The
|
||||
// git repo is cloned into a temporary directory used as the context directory.
|
||||
// Returns the absolute path to the temporary context directory, the relative
|
||||
// path of the dockerfile in that context directory, and a non-nil error on
|
||||
// success.
|
||||
func GetContextFromGitURL(gitURL, dockerfileName string) (string, string, error) {
|
||||
if _, err := exec.LookPath("git"); err != nil {
|
||||
return "", "", errors.Wrapf(err, "unable to find 'git'")
|
||||
}
|
||||
absContextDir, err := git.Clone(gitURL)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "unable to 'git clone' to temporary context directory")
|
||||
}
|
||||
|
||||
absContextDir, err = ResolveAndValidateContextPath(absContextDir)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
relDockerfile, err := getDockerfileRelPath(absContextDir, dockerfileName)
|
||||
if err == nil && strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) {
|
||||
return "", "", errors.Errorf("the Dockerfile (%s) must be within the build context", dockerfileName)
|
||||
}
|
||||
|
||||
return absContextDir, relDockerfile, err
|
||||
}
|
||||
|
||||
// GetContextFromURL uses a remote URL as context for a `docker build`. The
|
||||
// remote resource is downloaded as either a Dockerfile or a tar archive.
|
||||
// Returns the tar archive used for the context and a path of the
|
||||
// dockerfile inside the tar.
|
||||
func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) {
|
||||
response, err := getWithStatusError(remoteURL)
|
||||
if err != nil {
|
||||
return nil, "", errors.Errorf("unable to download remote context %s: %v", remoteURL, err)
|
||||
}
|
||||
progressOutput := streamformatter.NewProgressOutput(out)
|
||||
|
||||
// Pass the response body through a progress reader.
|
||||
progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL))
|
||||
|
||||
return GetContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName)
|
||||
}
|
||||
|
||||
// getWithStatusError does an http.Get() and returns an error if the
|
||||
// status code is 4xx or 5xx.
|
||||
func getWithStatusError(url string) (resp *http.Response, err error) {
|
||||
if resp, err = http.Get(url); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode < 400 {
|
||||
return resp, nil
|
||||
}
|
||||
msg := fmt.Sprintf("failed to GET %s with status %s", url, resp.Status)
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, msg+": error reading body")
|
||||
}
|
||||
return nil, errors.Errorf(msg+": %s", bytes.TrimSpace(body))
|
||||
}
|
||||
|
||||
// GetContextFromLocalDir uses the given local directory as context for a
|
||||
// `docker build`. Returns the absolute path to the local context directory,
|
||||
// the relative path of the dockerfile in that context directory, and a non-nil
|
||||
// error on success.
|
||||
func GetContextFromLocalDir(localDir, dockerfileName string) (string, string, error) {
|
||||
localDir, err := ResolveAndValidateContextPath(localDir)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// When using a local context directory, and the Dockerfile is specified
|
||||
// with the `-f/--file` option then it is considered relative to the
|
||||
// current directory and not the context directory.
|
||||
if dockerfileName != "" && dockerfileName != "-" {
|
||||
if dockerfileName, err = filepath.Abs(dockerfileName); err != nil {
|
||||
return "", "", errors.Errorf("unable to get absolute path to Dockerfile: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
relDockerfile, err := getDockerfileRelPath(localDir, dockerfileName)
|
||||
return localDir, relDockerfile, err
|
||||
}
|
||||
|
||||
// ResolveAndValidateContextPath uses the given context directory for a `docker build`
|
||||
// and returns the absolute path to the context directory.
|
||||
func ResolveAndValidateContextPath(givenContextDir string) (string, error) {
|
||||
absContextDir, err := filepath.Abs(givenContextDir)
|
||||
if err != nil {
|
||||
return "", errors.Errorf("unable to get absolute context directory of given context directory %q: %v", givenContextDir, err)
|
||||
}
|
||||
|
||||
// The context dir might be a symbolic link, so follow it to the actual
|
||||
// target directory.
|
||||
//
|
||||
// FIXME. We use isUNC (always false on non-Windows platforms) to workaround
|
||||
// an issue in golang. On Windows, EvalSymLinks does not work on UNC file
|
||||
// paths (those starting with \\). This hack means that when using links
|
||||
// on UNC paths, they will not be followed.
|
||||
if !isUNC(absContextDir) {
|
||||
absContextDir, err = filepath.EvalSymlinks(absContextDir)
|
||||
if err != nil {
|
||||
return "", errors.Errorf("unable to evaluate symlinks in context path: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
stat, err := os.Lstat(absContextDir)
|
||||
if err != nil {
|
||||
return "", errors.Errorf("unable to stat context directory %q: %v", absContextDir, err)
|
||||
}
|
||||
|
||||
if !stat.IsDir() {
|
||||
return "", errors.Errorf("context must be a directory: %s", absContextDir)
|
||||
}
|
||||
return absContextDir, err
|
||||
}
|
||||
|
||||
// getDockerfileRelPath returns the dockerfile path relative to the context
|
||||
// directory
|
||||
func getDockerfileRelPath(absContextDir, givenDockerfile string) (string, error) {
|
||||
var err error
|
||||
|
||||
if givenDockerfile == "-" {
|
||||
return givenDockerfile, nil
|
||||
}
|
||||
|
||||
absDockerfile := givenDockerfile
|
||||
if absDockerfile == "" {
|
||||
// No -f/--file was specified so use the default relative to the
|
||||
// context directory.
|
||||
absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName)
|
||||
|
||||
// Just to be nice ;-) look for 'dockerfile' too but only
|
||||
// use it if we found it, otherwise ignore this check
|
||||
if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) {
|
||||
altPath := filepath.Join(absContextDir, strings.ToLower(DefaultDockerfileName))
|
||||
if _, err = os.Lstat(altPath); err == nil {
|
||||
absDockerfile = altPath
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If not already an absolute path, the Dockerfile path should be joined to
|
||||
// the base directory.
|
||||
if !filepath.IsAbs(absDockerfile) {
|
||||
absDockerfile = filepath.Join(absContextDir, absDockerfile)
|
||||
}
|
||||
|
||||
// Evaluate symlinks in the path to the Dockerfile too.
|
||||
//
|
||||
// FIXME. We use isUNC (always false on non-Windows platforms) to workaround
|
||||
// an issue in golang. On Windows, EvalSymLinks does not work on UNC file
|
||||
// paths (those starting with \\). This hack means that when using links
|
||||
// on UNC paths, they will not be followed.
|
||||
if !isUNC(absDockerfile) {
|
||||
absDockerfile, err = filepath.EvalSymlinks(absDockerfile)
|
||||
if err != nil {
|
||||
return "", errors.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Lstat(absDockerfile); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return "", errors.Errorf("Cannot locate Dockerfile: %q", absDockerfile)
|
||||
}
|
||||
return "", errors.Errorf("unable to stat Dockerfile: %v", err)
|
||||
}
|
||||
|
||||
relDockerfile, err := filepath.Rel(absContextDir, absDockerfile)
|
||||
if err != nil {
|
||||
return "", errors.Errorf("unable to get relative Dockerfile path: %v", err)
|
||||
}
|
||||
|
||||
return relDockerfile, nil
|
||||
}
|
||||
|
||||
// isUNC returns true if the path is UNC (one starting \\). It always returns
|
||||
// false on Linux.
|
||||
func isUNC(path string) bool {
|
||||
return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`)
|
||||
}
|
||||
|
||||
// AddDockerfileToBuildContext from a ReadCloser, returns a new archive and
|
||||
// the relative path to the dockerfile in the context.
|
||||
func AddDockerfileToBuildContext(dockerfileCtx io.ReadCloser, buildCtx io.ReadCloser) (io.ReadCloser, string, error) {
|
||||
file, err := ioutil.ReadAll(dockerfileCtx)
|
||||
dockerfileCtx.Close()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
now := time.Now()
|
||||
hdrTmpl := &tar.Header{
|
||||
Mode: 0600,
|
||||
Uid: 0,
|
||||
Gid: 0,
|
||||
ModTime: now,
|
||||
Typeflag: tar.TypeReg,
|
||||
AccessTime: now,
|
||||
ChangeTime: now,
|
||||
}
|
||||
randomName := ".dockerfile." + stringid.GenerateRandomID()[:20]
|
||||
|
||||
buildCtx = archive.ReplaceFileTarWrapper(buildCtx, map[string]archive.TarModifierFunc{
|
||||
// Add the dockerfile with a random filename
|
||||
randomName: func(_ string, h *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
|
||||
return hdrTmpl, file, nil
|
||||
},
|
||||
// Update .dockerignore to include the random filename
|
||||
".dockerignore": func(_ string, h *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
|
||||
if h == nil {
|
||||
h = hdrTmpl
|
||||
}
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
if content != nil {
|
||||
if _, err := b.ReadFrom(content); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else {
|
||||
b.WriteString(".dockerignore")
|
||||
}
|
||||
b.WriteString("\n" + randomName + "\n")
|
||||
return h, b.Bytes(), nil
|
||||
},
|
||||
})
|
||||
return buildCtx, randomName, nil
|
||||
}
|
||||
|
||||
// Compress the build context for sending to the API
|
||||
func Compress(buildCtx io.ReadCloser) (io.ReadCloser, error) {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
go func() {
|
||||
compressWriter, err := archive.CompressStream(pipeWriter, archive.Gzip)
|
||||
if err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
}
|
||||
defer buildCtx.Close()
|
||||
|
||||
if _, err := pools.Copy(compressWriter, buildCtx); err != nil {
|
||||
pipeWriter.CloseWithError(
|
||||
errors.Wrap(err, "failed to compress context"))
|
||||
compressWriter.Close()
|
||||
return
|
||||
}
|
||||
compressWriter.Close()
|
||||
pipeWriter.Close()
|
||||
}()
|
||||
|
||||
return pipeReader, nil
|
||||
}
|
||||
11
vendor/github.com/docker/cli/cli/command/image/build/context_unix.go
generated
vendored
Normal file
11
vendor/github.com/docker/cli/cli/command/image/build/context_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
// +build !windows
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func getContextRoot(srcPath string) (string, error) {
|
||||
return filepath.Join(srcPath, "."), nil
|
||||
}
|
||||
17
vendor/github.com/docker/cli/cli/command/image/build/context_windows.go
generated
vendored
Normal file
17
vendor/github.com/docker/cli/cli/command/image/build/context_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
// +build windows
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
)
|
||||
|
||||
func getContextRoot(srcPath string) (string, error) {
|
||||
cr, err := filepath.Abs(srcPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return longpath.AddPrefix(cr), nil
|
||||
}
|
||||
39
vendor/github.com/docker/cli/cli/command/image/build/dockerignore.go
generated
vendored
Normal file
39
vendor/github.com/docker/cli/cli/command/image/build/dockerignore.go
generated
vendored
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
package build
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/builder/dockerignore"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
)
|
||||
|
||||
// ReadDockerignore reads the .dockerignore file in the context directory and
|
||||
// returns the list of paths to exclude
|
||||
func ReadDockerignore(contextDir string) ([]string, error) {
|
||||
var excludes []string
|
||||
|
||||
f, err := os.Open(filepath.Join(contextDir, ".dockerignore"))
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
return excludes, nil
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return dockerignore.ReadAll(f)
|
||||
}
|
||||
|
||||
// TrimBuildFilesFromExcludes removes the named Dockerfile and .dockerignore from
|
||||
// the list of excluded files. The daemon will remove them from the final context
|
||||
// but they must be in available in the context when passed to the API.
|
||||
func TrimBuildFilesFromExcludes(excludes []string, dockerfile string, dockerfileFromStdin bool) []string {
|
||||
if keep, _ := fileutils.Matches(".dockerignore", excludes); keep {
|
||||
excludes = append(excludes, "!.dockerignore")
|
||||
}
|
||||
if keep, _ := fileutils.Matches(dockerfile, excludes); keep && !dockerfileFromStdin {
|
||||
excludes = append(excludes, "!"+dockerfile)
|
||||
}
|
||||
return excludes
|
||||
}
|
||||
64
vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go
generated
vendored
Normal file
64
vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go
generated
vendored
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
package dockerignore // import "github.com/docker/docker/builder/dockerignore"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ReadAll reads a .dockerignore file and returns the list of file patterns
|
||||
// to ignore. Note this will trim whitespace from each line as well
|
||||
// as use GO's "clean" func to get the shortest/cleanest path for each.
|
||||
func ReadAll(reader io.Reader) ([]string, error) {
|
||||
if reader == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(reader)
|
||||
var excludes []string
|
||||
currentLine := 0
|
||||
|
||||
utf8bom := []byte{0xEF, 0xBB, 0xBF}
|
||||
for scanner.Scan() {
|
||||
scannedBytes := scanner.Bytes()
|
||||
// We trim UTF8 BOM
|
||||
if currentLine == 0 {
|
||||
scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom)
|
||||
}
|
||||
pattern := string(scannedBytes)
|
||||
currentLine++
|
||||
// Lines starting with # (comments) are ignored before processing
|
||||
if strings.HasPrefix(pattern, "#") {
|
||||
continue
|
||||
}
|
||||
pattern = strings.TrimSpace(pattern)
|
||||
if pattern == "" {
|
||||
continue
|
||||
}
|
||||
// normalize absolute paths to paths relative to the context
|
||||
// (taking care of '!' prefix)
|
||||
invert := pattern[0] == '!'
|
||||
if invert {
|
||||
pattern = strings.TrimSpace(pattern[1:])
|
||||
}
|
||||
if len(pattern) > 0 {
|
||||
pattern = filepath.Clean(pattern)
|
||||
pattern = filepath.ToSlash(pattern)
|
||||
if len(pattern) > 1 && pattern[0] == '/' {
|
||||
pattern = pattern[1:]
|
||||
}
|
||||
}
|
||||
if invert {
|
||||
pattern = "!" + pattern
|
||||
}
|
||||
|
||||
excludes = append(excludes, pattern)
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, fmt.Errorf("Error reading .dockerignore: %v", err)
|
||||
}
|
||||
return excludes, nil
|
||||
}
|
||||
209
vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go
generated
vendored
Normal file
209
vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go
generated
vendored
Normal file
|
|
@ -0,0 +1,209 @@
|
|||
package git // import "github.com/docker/docker/builder/remotecontext/git"
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type gitRepo struct {
|
||||
remote string
|
||||
ref string
|
||||
subdir string
|
||||
}
|
||||
|
||||
// Clone clones a repository into a newly created directory which
|
||||
// will be under "docker-build-git"
|
||||
func Clone(remoteURL string) (string, error) {
|
||||
repo, err := parseRemoteURL(remoteURL)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return cloneGitRepo(repo)
|
||||
}
|
||||
|
||||
func cloneGitRepo(repo gitRepo) (checkoutDir string, err error) {
|
||||
fetch := fetchArgs(repo.remote, repo.ref)
|
||||
|
||||
root, err := ioutil.TempDir("", "docker-build-git")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.RemoveAll(root)
|
||||
}
|
||||
}()
|
||||
|
||||
if out, err := gitWithinDir(root, "init"); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to init repo at %s: %s", root, out)
|
||||
}
|
||||
|
||||
// Add origin remote for compatibility with previous implementation that
|
||||
// used "git clone" and also to make sure local refs are created for branches
|
||||
if out, err := gitWithinDir(root, "remote", "add", "origin", repo.remote); err != nil {
|
||||
return "", errors.Wrapf(err, "failed add origin repo at %s: %s", repo.remote, out)
|
||||
}
|
||||
|
||||
if output, err := gitWithinDir(root, fetch...); err != nil {
|
||||
return "", errors.Wrapf(err, "error fetching: %s", output)
|
||||
}
|
||||
|
||||
checkoutDir, err = checkoutGit(root, repo.ref, repo.subdir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
cmd := exec.Command("git", "submodule", "update", "--init", "--recursive", "--depth=1")
|
||||
cmd.Dir = root
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error initializing submodules: %s", output)
|
||||
}
|
||||
|
||||
return checkoutDir, nil
|
||||
}
|
||||
|
||||
func parseRemoteURL(remoteURL string) (gitRepo, error) {
|
||||
repo := gitRepo{}
|
||||
|
||||
if !isGitTransport(remoteURL) {
|
||||
remoteURL = "https://" + remoteURL
|
||||
}
|
||||
|
||||
var fragment string
|
||||
if strings.HasPrefix(remoteURL, "git@") {
|
||||
// git@.. is not an URL, so cannot be parsed as URL
|
||||
parts := strings.SplitN(remoteURL, "#", 2)
|
||||
|
||||
repo.remote = parts[0]
|
||||
if len(parts) == 2 {
|
||||
fragment = parts[1]
|
||||
}
|
||||
repo.ref, repo.subdir = getRefAndSubdir(fragment)
|
||||
} else {
|
||||
u, err := url.Parse(remoteURL)
|
||||
if err != nil {
|
||||
return repo, err
|
||||
}
|
||||
|
||||
repo.ref, repo.subdir = getRefAndSubdir(u.Fragment)
|
||||
u.Fragment = ""
|
||||
repo.remote = u.String()
|
||||
}
|
||||
|
||||
if strings.HasPrefix(repo.ref, "-") {
|
||||
return gitRepo{}, errors.Errorf("invalid refspec: %s", repo.ref)
|
||||
}
|
||||
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
func getRefAndSubdir(fragment string) (ref string, subdir string) {
|
||||
refAndDir := strings.SplitN(fragment, ":", 2)
|
||||
ref = "master"
|
||||
if len(refAndDir[0]) != 0 {
|
||||
ref = refAndDir[0]
|
||||
}
|
||||
if len(refAndDir) > 1 && len(refAndDir[1]) != 0 {
|
||||
subdir = refAndDir[1]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func fetchArgs(remoteURL string, ref string) []string {
|
||||
args := []string{"fetch"}
|
||||
|
||||
if supportsShallowClone(remoteURL) {
|
||||
args = append(args, "--depth", "1")
|
||||
}
|
||||
|
||||
return append(args, "origin", "--", ref)
|
||||
}
|
||||
|
||||
// Check if a given git URL supports a shallow git clone,
|
||||
// i.e. it is a non-HTTP server or a smart HTTP server.
|
||||
func supportsShallowClone(remoteURL string) bool {
|
||||
if urlutil.IsURL(remoteURL) {
|
||||
// Check if the HTTP server is smart
|
||||
|
||||
// Smart servers must correctly respond to a query for the git-upload-pack service
|
||||
serviceURL := remoteURL + "/info/refs?service=git-upload-pack"
|
||||
|
||||
// Try a HEAD request and fallback to a Get request on error
|
||||
res, err := http.Head(serviceURL)
|
||||
if err != nil || res.StatusCode != http.StatusOK {
|
||||
res, err = http.Get(serviceURL)
|
||||
if err == nil {
|
||||
res.Body.Close()
|
||||
}
|
||||
if err != nil || res.StatusCode != http.StatusOK {
|
||||
// request failed
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" {
|
||||
// Fallback, not a smart server
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
// Non-HTTP protocols always support shallow clones
|
||||
return true
|
||||
}
|
||||
|
||||
func checkoutGit(root, ref, subdir string) (string, error) {
|
||||
// Try checking out by ref name first. This will work on branches and sets
|
||||
// .git/HEAD to the current branch name
|
||||
if output, err := gitWithinDir(root, "checkout", ref); err != nil {
|
||||
// If checking out by branch name fails check out the last fetched ref
|
||||
if _, err2 := gitWithinDir(root, "checkout", "FETCH_HEAD"); err2 != nil {
|
||||
return "", errors.Wrapf(err, "error checking out %s: %s", ref, output)
|
||||
}
|
||||
}
|
||||
|
||||
if subdir != "" {
|
||||
newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, subdir), root)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error setting git context, %q not within git root", subdir)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(newCtx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return "", errors.Errorf("error setting git context, not a directory: %s", newCtx)
|
||||
}
|
||||
root = newCtx
|
||||
}
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func gitWithinDir(dir string, args ...string) ([]byte, error) {
|
||||
a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")}
|
||||
return git(append(a, args...)...)
|
||||
}
|
||||
|
||||
func git(args ...string) ([]byte, error) {
|
||||
return exec.Command("git", args...).CombinedOutput()
|
||||
}
|
||||
|
||||
// isGitTransport returns true if the provided str is a git transport by inspecting
|
||||
// the prefix of the string for known protocols used in git.
|
||||
func isGitTransport(str string) bool {
|
||||
return urlutil.IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@")
|
||||
}
|
||||
1
vendor/github.com/docker/docker/pkg/archive/README.md
generated
vendored
Normal file
1
vendor/github.com/docker/docker/pkg/archive/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
This code provides helper functions for dealing with archive files.
|
||||
1284
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
Normal file
1284
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
261
vendor/github.com/docker/docker/pkg/archive/archive_linux.go
generated
vendored
Normal file
261
vendor/github.com/docker/docker/pkg/archive/archive_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,261 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter {
|
||||
if format == OverlayWhiteoutFormat {
|
||||
return overlayWhiteoutConverter{inUserNS: inUserNS}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type overlayWhiteoutConverter struct {
|
||||
inUserNS bool
|
||||
}
|
||||
|
||||
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
|
||||
// convert whiteouts to AUFS format
|
||||
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
|
||||
// we just rename the file and make it normal
|
||||
dir, filename := filepath.Split(hdr.Name)
|
||||
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
|
||||
hdr.Mode = 0600
|
||||
hdr.Typeflag = tar.TypeReg
|
||||
hdr.Size = 0
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeDir != 0 {
|
||||
// convert opaque dirs to AUFS format by writing an empty file with the prefix
|
||||
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(opaque) == 1 && opaque[0] == 'y' {
|
||||
if hdr.Xattrs != nil {
|
||||
delete(hdr.Xattrs, "trusted.overlay.opaque")
|
||||
}
|
||||
|
||||
// create a header for the whiteout file
|
||||
// it should inherit some properties from the parent, but be a regular file
|
||||
wo = &tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Mode: hdr.Mode & int64(os.ModePerm),
|
||||
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
|
||||
Size: 0,
|
||||
Uid: hdr.Uid,
|
||||
Uname: hdr.Uname,
|
||||
Gid: hdr.Gid,
|
||||
Gname: hdr.Gname,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
|
||||
base := filepath.Base(path)
|
||||
dir := filepath.Dir(path)
|
||||
|
||||
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
|
||||
if base == WhiteoutOpaqueDir {
|
||||
err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
|
||||
if err != nil {
|
||||
if c.inUserNS {
|
||||
if err = replaceDirWithOverlayOpaque(dir); err != nil {
|
||||
return false, errors.Wrapf(err, "replaceDirWithOverlayOpaque(%q) failed", dir)
|
||||
}
|
||||
} else {
|
||||
return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir)
|
||||
}
|
||||
}
|
||||
// don't write the file itself
|
||||
return false, err
|
||||
}
|
||||
|
||||
// if a file was deleted and we are using overlay, we need to create a character device
|
||||
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||
originalBase := base[len(WhiteoutPrefix):]
|
||||
originalPath := filepath.Join(dir, originalBase)
|
||||
|
||||
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
|
||||
if c.inUserNS {
|
||||
// Ubuntu and a few distros support overlayfs in userns.
|
||||
//
|
||||
// Although we can't call mknod directly in userns (at least on bionic kernel 4.15),
|
||||
// we can still create 0,0 char device using mknodChar0Overlay().
|
||||
//
|
||||
// NOTE: we don't need this hack for the containerd snapshotter+unpack model.
|
||||
if err := mknodChar0Overlay(originalPath); err != nil {
|
||||
return false, errors.Wrapf(err, "failed to mknodChar0UserNS(%q)", originalPath)
|
||||
}
|
||||
} else {
|
||||
return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath)
|
||||
}
|
||||
}
|
||||
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// don't write the file itself
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// mknodChar0Overlay creates 0,0 char device by mounting overlayfs and unlinking.
|
||||
// This function can be used for creating 0,0 char device in userns on Ubuntu.
|
||||
//
|
||||
// Steps:
|
||||
// * Mkdir lower,upper,merged,work
|
||||
// * Create lower/dummy
|
||||
// * Mount overlayfs
|
||||
// * Unlink merged/dummy
|
||||
// * Unmount overlayfs
|
||||
// * Make sure a 0,0 char device is created as upper/dummy
|
||||
// * Rename upper/dummy to cleansedOriginalPath
|
||||
func mknodChar0Overlay(cleansedOriginalPath string) error {
|
||||
dir := filepath.Dir(cleansedOriginalPath)
|
||||
tmp, err := ioutil.TempDir(dir, "mc0o")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create a tmp directory under %s", dir)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
lower := filepath.Join(tmp, "l")
|
||||
upper := filepath.Join(tmp, "u")
|
||||
work := filepath.Join(tmp, "w")
|
||||
merged := filepath.Join(tmp, "m")
|
||||
for _, s := range []string{lower, upper, work, merged} {
|
||||
if err := os.MkdirAll(s, 0700); err != nil {
|
||||
return errors.Wrapf(err, "failed to mkdir %s", s)
|
||||
}
|
||||
}
|
||||
dummyBase := "d"
|
||||
lowerDummy := filepath.Join(lower, dummyBase)
|
||||
if err := ioutil.WriteFile(lowerDummy, []byte{}, 0600); err != nil {
|
||||
return errors.Wrapf(err, "failed to create a dummy lower file %s", lowerDummy)
|
||||
}
|
||||
mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work)
|
||||
// docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead.
|
||||
if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil {
|
||||
return errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged)
|
||||
}
|
||||
mergedDummy := filepath.Join(merged, dummyBase)
|
||||
if err := os.Remove(mergedDummy); err != nil {
|
||||
syscall.Unmount(merged, 0)
|
||||
return errors.Wrapf(err, "failed to unlink %s", mergedDummy)
|
||||
}
|
||||
if err := syscall.Unmount(merged, 0); err != nil {
|
||||
return errors.Wrapf(err, "failed to unmount %s", merged)
|
||||
}
|
||||
upperDummy := filepath.Join(upper, dummyBase)
|
||||
if err := isChar0(upperDummy); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Rename(upperDummy, cleansedOriginalPath); err != nil {
|
||||
return errors.Wrapf(err, "failed to rename %s to %s", upperDummy, cleansedOriginalPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isChar0(path string) error {
|
||||
osStat, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to stat %s", path)
|
||||
}
|
||||
st, ok := osStat.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return errors.Errorf("got unsupported stat for %s", path)
|
||||
}
|
||||
if os.FileMode(st.Mode)&syscall.S_IFMT != syscall.S_IFCHR {
|
||||
return errors.Errorf("%s is not a character device, got mode=%d", path, st.Mode)
|
||||
}
|
||||
if st.Rdev != 0 {
|
||||
return errors.Errorf("%s is not a 0,0 character device, got Rdev=%d", path, st.Rdev)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// replaceDirWithOverlayOpaque replaces path with a new directory with trusted.overlay.opaque
|
||||
// xattr. The contents of the directory are preserved.
|
||||
func replaceDirWithOverlayOpaque(path string) error {
|
||||
if path == "/" {
|
||||
return errors.New("replaceDirWithOverlayOpaque: path must not be \"/\"")
|
||||
}
|
||||
dir := filepath.Dir(path)
|
||||
tmp, err := ioutil.TempDir(dir, "rdwoo")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create a tmp directory under %s", dir)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
// newPath is a new empty directory crafted with trusted.overlay.opaque xattr.
|
||||
// we copy the content of path into newPath, remove path, and rename newPath to path.
|
||||
newPath, err := createDirWithOverlayOpaque(tmp)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "createDirWithOverlayOpaque(%q) failed", tmp)
|
||||
}
|
||||
if err := fs.CopyDir(newPath, path); err != nil {
|
||||
return errors.Wrapf(err, "CopyDir(%q, %q) failed", newPath, path)
|
||||
}
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Rename(newPath, path)
|
||||
}
|
||||
|
||||
// createDirWithOverlayOpaque creates a directory with trusted.overlay.opaque xattr,
|
||||
// without calling setxattr, so as to allow creating opaque dir in userns on Ubuntu.
|
||||
func createDirWithOverlayOpaque(tmp string) (string, error) {
|
||||
lower := filepath.Join(tmp, "l")
|
||||
upper := filepath.Join(tmp, "u")
|
||||
work := filepath.Join(tmp, "w")
|
||||
merged := filepath.Join(tmp, "m")
|
||||
for _, s := range []string{lower, upper, work, merged} {
|
||||
if err := os.MkdirAll(s, 0700); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to mkdir %s", s)
|
||||
}
|
||||
}
|
||||
dummyBase := "d"
|
||||
lowerDummy := filepath.Join(lower, dummyBase)
|
||||
if err := os.MkdirAll(lowerDummy, 0700); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create a dummy lower directory %s", lowerDummy)
|
||||
}
|
||||
mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work)
|
||||
// docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead.
|
||||
if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged)
|
||||
}
|
||||
mergedDummy := filepath.Join(merged, dummyBase)
|
||||
if err := os.Remove(mergedDummy); err != nil {
|
||||
syscall.Unmount(merged, 0)
|
||||
return "", errors.Wrapf(err, "failed to rmdir %s", mergedDummy)
|
||||
}
|
||||
// upperDummy becomes a 0,0-char device file here
|
||||
if err := os.Mkdir(mergedDummy, 0700); err != nil {
|
||||
syscall.Unmount(merged, 0)
|
||||
return "", errors.Wrapf(err, "failed to mkdir %s", mergedDummy)
|
||||
}
|
||||
// upperDummy becomes a directory with trusted.overlay.opaque xattr
|
||||
// (but can't be verified in userns)
|
||||
if err := syscall.Unmount(merged, 0); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to unmount %s", merged)
|
||||
}
|
||||
upperDummy := filepath.Join(upper, dummyBase)
|
||||
return upperDummy, nil
|
||||
}
|
||||
7
vendor/github.com/docker/docker/pkg/archive/archive_other.go
generated
vendored
Normal file
7
vendor/github.com/docker/docker/pkg/archive/archive_other.go
generated
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
// +build !linux
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter {
|
||||
return nil
|
||||
}
|
||||
114
vendor/github.com/docker/docker/pkg/archive/archive_unix.go
generated
vendored
Normal file
114
vendor/github.com/docker/docker/pkg/archive/archive_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
// +build !windows
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||
// the path being passed in is not in a volume path format, convert it to one.
|
||||
func fixVolumePathPrefix(srcPath string) string {
|
||||
return srcPath
|
||||
}
|
||||
|
||||
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||
// We use a separate function as this is platform specific. On Linux, we
|
||||
// can't use filepath.Join(srcPath,include) because this will clean away
|
||||
// a trailing "." or "/" which may be important.
|
||||
func getWalkRoot(srcPath string, include string) string {
|
||||
return srcPath + string(filepath.Separator) + include
|
||||
}
|
||||
|
||||
// CanonicalTarNameForPath returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
func CanonicalTarNameForPath(p string) string {
|
||||
return p // already unix-style
|
||||
}
|
||||
|
||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||
// on the platform the archival is done.
|
||||
|
||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||
return perm // noop for unix as golang APIs provide perm bits correctly
|
||||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if ok {
|
||||
// Currently go does not fill in the major/minors
|
||||
if s.Mode&unix.S_IFBLK != 0 ||
|
||||
s.Mode&unix.S_IFCHR != 0 {
|
||||
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert
|
||||
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if ok {
|
||||
inode = s.Ino
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if !ok {
|
||||
return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t")
|
||||
}
|
||||
return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil
|
||||
}
|
||||
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
if rsystem.RunningInUserNS() {
|
||||
// cannot create a device if running in user namespace
|
||||
return nil
|
||||
}
|
||||
|
||||
mode := uint32(hdr.Mode & 07777)
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeBlock:
|
||||
mode |= unix.S_IFBLK
|
||||
case tar.TypeChar:
|
||||
mode |= unix.S_IFCHR
|
||||
case tar.TypeFifo:
|
||||
mode |= unix.S_IFIFO
|
||||
}
|
||||
|
||||
return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
|
||||
}
|
||||
|
||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
67
vendor/github.com/docker/docker/pkg/archive/archive_windows.go
generated
vendored
Normal file
67
vendor/github.com/docker/docker/pkg/archive/archive_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
)
|
||||
|
||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||
// the path being passed in is not in a volume path format, convert it to one.
|
||||
func fixVolumePathPrefix(srcPath string) string {
|
||||
return longpath.AddPrefix(srcPath)
|
||||
}
|
||||
|
||||
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||
// We use a separate function as this is platform specific.
|
||||
func getWalkRoot(srcPath string, include string) string {
|
||||
return filepath.Join(srcPath, include)
|
||||
}
|
||||
|
||||
// CanonicalTarNameForPath returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
func CanonicalTarNameForPath(p string) string {
|
||||
return filepath.ToSlash(p)
|
||||
}
|
||||
|
||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||
// on the platform the archival is done.
|
||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||
//perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
|
||||
permPart := perm & os.ModePerm
|
||||
noPermPart := perm &^ os.ModePerm
|
||||
// Add the x bit: make everything +x from windows
|
||||
permPart |= 0111
|
||||
permPart &= 0755
|
||||
|
||||
return noPermPart | permPart
|
||||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||
// do nothing. no notion of Rdev, Nlink in stat on Windows
|
||||
return
|
||||
}
|
||||
|
||||
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||
// do nothing. no notion of Inode in stat on Windows
|
||||
return
|
||||
}
|
||||
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
|
||||
// no notion of file ownership mapping yet on Windows
|
||||
return idtools.Identity{UID: 0, GID: 0}, nil
|
||||
}
|
||||
445
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
Normal file
445
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
Normal file
|
|
@ -0,0 +1,445 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ChangeType represents the change type.
|
||||
type ChangeType int
|
||||
|
||||
const (
|
||||
// ChangeModify represents the modify operation.
|
||||
ChangeModify = iota
|
||||
// ChangeAdd represents the add operation.
|
||||
ChangeAdd
|
||||
// ChangeDelete represents the delete operation.
|
||||
ChangeDelete
|
||||
)
|
||||
|
||||
func (c ChangeType) String() string {
|
||||
switch c {
|
||||
case ChangeModify:
|
||||
return "C"
|
||||
case ChangeAdd:
|
||||
return "A"
|
||||
case ChangeDelete:
|
||||
return "D"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Change represents a change, it wraps the change type and path.
|
||||
// It describes changes of the files in the path respect to the
|
||||
// parent layers. The change could be modify, add, delete.
|
||||
// This is used for layer diff.
|
||||
type Change struct {
|
||||
Path string
|
||||
Kind ChangeType
|
||||
}
|
||||
|
||||
func (change *Change) String() string {
|
||||
return fmt.Sprintf("%s %s", change.Kind, change.Path)
|
||||
}
|
||||
|
||||
// for sort.Sort
|
||||
type changesByPath []Change
|
||||
|
||||
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
|
||||
func (c changesByPath) Len() int { return len(c) }
|
||||
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
|
||||
|
||||
// Gnu tar doesn't have sub-second mtime precision. The go tar
|
||||
// writer (1.10+) does when using PAX format, but we round times to seconds
|
||||
// to ensure archives have the same hashes for backwards compatibility.
|
||||
// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4.
|
||||
//
|
||||
// Non-sub-second is problematic when we apply changes via tar
|
||||
// files. We handle this by comparing for exact times, *or* same
|
||||
// second count and either a or b having exactly 0 nanoseconds
|
||||
func sameFsTime(a, b time.Time) bool {
|
||||
return a.Equal(b) ||
|
||||
(a.Unix() == b.Unix() &&
|
||||
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||
}
|
||||
|
||||
func sameFsTimeSpec(a, b syscall.Timespec) bool {
|
||||
return a.Sec == b.Sec &&
|
||||
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
|
||||
}
|
||||
|
||||
// Changes walks the path rw and determines changes for the files in the path,
|
||||
// with respect to the parent layers
|
||||
func Changes(layers []string, rw string) ([]Change, error) {
|
||||
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
|
||||
}
|
||||
|
||||
func aufsMetadataSkip(path string) (skip bool, err error) {
|
||||
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
|
||||
if err != nil {
|
||||
skip = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
||||
f := filepath.Base(path)
|
||||
|
||||
// If there is a whiteout, then the file was removed
|
||||
if strings.HasPrefix(f, WhiteoutPrefix) {
|
||||
originalFile := f[len(WhiteoutPrefix):]
|
||||
return filepath.Join(filepath.Dir(path), originalFile), nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
type skipChange func(string) (bool, error)
|
||||
type deleteChange func(string, string, os.FileInfo) (string, error)
|
||||
|
||||
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
|
||||
var (
|
||||
changes []Change
|
||||
changedDirs = make(map[string]struct{})
|
||||
)
|
||||
|
||||
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
path, err = filepath.Rel(rw, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
path = filepath.Join(string(os.PathSeparator), path)
|
||||
|
||||
// Skip root
|
||||
if path == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if sc != nil {
|
||||
if skip, err := sc(path); skip {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
change := Change{
|
||||
Path: path,
|
||||
}
|
||||
|
||||
deletedFile, err := dc(rw, path, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find out what kind of modification happened
|
||||
if deletedFile != "" {
|
||||
change.Path = deletedFile
|
||||
change.Kind = ChangeDelete
|
||||
} else {
|
||||
// Otherwise, the file was added
|
||||
change.Kind = ChangeAdd
|
||||
|
||||
// ...Unless it already existed in a top layer, in which case, it's a modification
|
||||
for _, layer := range layers {
|
||||
stat, err := os.Stat(filepath.Join(layer, path))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
// The file existed in the top layer, so that's a modification
|
||||
|
||||
// However, if it's a directory, maybe it wasn't actually modified.
|
||||
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
||||
if stat.IsDir() && f.IsDir() {
|
||||
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
|
||||
// Both directories are the same, don't record the change
|
||||
return nil
|
||||
}
|
||||
}
|
||||
change.Kind = ChangeModify
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
|
||||
// This block is here to ensure the change is recorded even if the
|
||||
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
|
||||
// Check https://github.com/docker/docker/pull/13590 for details.
|
||||
if f.IsDir() {
|
||||
changedDirs[path] = struct{}{}
|
||||
}
|
||||
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
|
||||
parent := filepath.Dir(path)
|
||||
if _, ok := changedDirs[parent]; !ok && parent != "/" {
|
||||
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
|
||||
changedDirs[parent] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Record change
|
||||
changes = append(changes, change)
|
||||
return nil
|
||||
})
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
// FileInfo describes the information of a file.
|
||||
type FileInfo struct {
|
||||
parent *FileInfo
|
||||
name string
|
||||
stat *system.StatT
|
||||
children map[string]*FileInfo
|
||||
capability []byte
|
||||
added bool
|
||||
}
|
||||
|
||||
// LookUp looks up the file information of a file.
|
||||
func (info *FileInfo) LookUp(path string) *FileInfo {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
parent := info
|
||||
if path == string(os.PathSeparator) {
|
||||
return info
|
||||
}
|
||||
|
||||
pathElements := strings.Split(path, string(os.PathSeparator))
|
||||
for _, elem := range pathElements {
|
||||
if elem != "" {
|
||||
child := parent.children[elem]
|
||||
if child == nil {
|
||||
return nil
|
||||
}
|
||||
parent = child
|
||||
}
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
func (info *FileInfo) path() string {
|
||||
if info.parent == nil {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
return string(os.PathSeparator)
|
||||
}
|
||||
return filepath.Join(info.parent.path(), info.name)
|
||||
}
|
||||
|
||||
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
|
||||
sizeAtEntry := len(*changes)
|
||||
|
||||
if oldInfo == nil {
|
||||
// add
|
||||
change := Change{
|
||||
Path: info.path(),
|
||||
Kind: ChangeAdd,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
info.added = true
|
||||
}
|
||||
|
||||
// We make a copy so we can modify it to detect additions
|
||||
// also, we only recurse on the old dir if the new info is a directory
|
||||
// otherwise any previous delete/change is considered recursive
|
||||
oldChildren := make(map[string]*FileInfo)
|
||||
if oldInfo != nil && info.isDir() {
|
||||
for k, v := range oldInfo.children {
|
||||
oldChildren[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
for name, newChild := range info.children {
|
||||
oldChild := oldChildren[name]
|
||||
if oldChild != nil {
|
||||
// change?
|
||||
oldStat := oldChild.stat
|
||||
newStat := newChild.stat
|
||||
// Note: We can't compare inode or ctime or blocksize here, because these change
|
||||
// when copying a file into a container. However, that is not generally a problem
|
||||
// because any content change will change mtime, and any status change should
|
||||
// be visible when actually comparing the stat fields. The only time this
|
||||
// breaks down is if some code intentionally hides a change by setting
|
||||
// back mtime
|
||||
if statDifferent(oldStat, newStat) ||
|
||||
!bytes.Equal(oldChild.capability, newChild.capability) {
|
||||
change := Change{
|
||||
Path: newChild.path(),
|
||||
Kind: ChangeModify,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
newChild.added = true
|
||||
}
|
||||
|
||||
// Remove from copy so we can detect deletions
|
||||
delete(oldChildren, name)
|
||||
}
|
||||
|
||||
newChild.addChanges(oldChild, changes)
|
||||
}
|
||||
for _, oldChild := range oldChildren {
|
||||
// delete
|
||||
change := Change{
|
||||
Path: oldChild.path(),
|
||||
Kind: ChangeDelete,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
}
|
||||
|
||||
// If there were changes inside this directory, we need to add it, even if the directory
|
||||
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
|
||||
change := Change{
|
||||
Path: info.path(),
|
||||
Kind: ChangeModify,
|
||||
}
|
||||
// Let's insert the directory entry before the recently added entries located inside this dir
|
||||
*changes = append(*changes, change) // just to resize the slice, will be overwritten
|
||||
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
|
||||
(*changes)[sizeAtEntry] = change
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Changes add changes to file information.
|
||||
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
|
||||
var changes []Change
|
||||
|
||||
info.addChanges(oldInfo, &changes)
|
||||
|
||||
return changes
|
||||
}
|
||||
|
||||
func newRootFileInfo() *FileInfo {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
root := &FileInfo{
|
||||
name: string(os.PathSeparator),
|
||||
children: make(map[string]*FileInfo),
|
||||
}
|
||||
return root
|
||||
}
|
||||
|
||||
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
|
||||
// If oldDir is "", then all files in newDir will be Add-Changes.
|
||||
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
|
||||
var (
|
||||
oldRoot, newRoot *FileInfo
|
||||
)
|
||||
if oldDir == "" {
|
||||
emptyDir, err := ioutil.TempDir("", "empty")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.Remove(emptyDir)
|
||||
oldDir = emptyDir
|
||||
}
|
||||
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newRoot.Changes(oldRoot), nil
|
||||
}
|
||||
|
||||
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
|
||||
func ChangesSize(newDir string, changes []Change) int64 {
|
||||
var (
|
||||
size int64
|
||||
sf = make(map[uint64]struct{})
|
||||
)
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
|
||||
file := filepath.Join(newDir, change.Path)
|
||||
fileInfo, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
logrus.Errorf("Can not stat %q: %s", file, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if fileInfo != nil && !fileInfo.IsDir() {
|
||||
if hasHardlinks(fileInfo) {
|
||||
inode := getIno(fileInfo)
|
||||
if _, ok := sf[inode]; !ok {
|
||||
size += fileInfo.Size()
|
||||
sf[inode] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
size += fileInfo.Size()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// ExportChanges produces an Archive from the provided changes, relative to dir.
|
||||
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
go func() {
|
||||
ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
|
||||
|
||||
// this buffer is needed for the duration of this piped stream
|
||||
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||
|
||||
sort.Sort(changesByPath(changes))
|
||||
|
||||
// In general we log errors here but ignore them because
|
||||
// during e.g. a diff operation the container can continue
|
||||
// mutating the filesystem and we can see transient errors
|
||||
// from this
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeDelete {
|
||||
whiteOutDir := filepath.Dir(change.Path)
|
||||
whiteOutBase := filepath.Base(change.Path)
|
||||
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
|
||||
timestamp := time.Now()
|
||||
hdr := &tar.Header{
|
||||
Name: whiteOut[1:],
|
||||
Size: 0,
|
||||
ModTime: timestamp,
|
||||
AccessTime: timestamp,
|
||||
ChangeTime: timestamp,
|
||||
}
|
||||
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||
logrus.Debugf("Can't write whiteout header: %s", err)
|
||||
}
|
||||
} else {
|
||||
path := filepath.Join(dir, change.Path)
|
||||
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
|
||||
logrus.Debugf("Can't add file %s to tar: %s", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure to check the error on Close.
|
||||
if err := ta.TarWriter.Close(); err != nil {
|
||||
logrus.Debugf("Can't close layer: %s", err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
logrus.Debugf("failed close Changes writer: %s", err)
|
||||
}
|
||||
}()
|
||||
return reader, nil
|
||||
}
|
||||
286
vendor/github.com/docker/docker/pkg/archive/changes_linux.go
generated
vendored
Normal file
286
vendor/github.com/docker/docker/pkg/archive/changes_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,286 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// walker is used to implement collectFileInfoForChanges on linux. Where this
|
||||
// method in general returns the entire contents of two directory trees, we
|
||||
// optimize some FS calls out on linux. In particular, we take advantage of the
|
||||
// fact that getdents(2) returns the inode of each file in the directory being
|
||||
// walked, which, when walking two trees in parallel to generate a list of
|
||||
// changes, can be used to prune subtrees without ever having to lstat(2) them
|
||||
// directly. Eliminating stat calls in this way can save up to seconds on large
|
||||
// images.
|
||||
type walker struct {
|
||||
dir1 string
|
||||
dir2 string
|
||||
root1 *FileInfo
|
||||
root2 *FileInfo
|
||||
}
|
||||
|
||||
// collectFileInfoForChanges returns a complete representation of the trees
|
||||
// rooted at dir1 and dir2, with one important exception: any subtree or
|
||||
// leaf where the inode and device numbers are an exact match between dir1
|
||||
// and dir2 will be pruned from the results. This method is *only* to be used
|
||||
// to generating a list of changes between the two directories, as it does not
|
||||
// reflect the full contents.
|
||||
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
|
||||
w := &walker{
|
||||
dir1: dir1,
|
||||
dir2: dir2,
|
||||
root1: newRootFileInfo(),
|
||||
root2: newRootFileInfo(),
|
||||
}
|
||||
|
||||
i1, err := os.Lstat(w.dir1)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
i2, err := os.Lstat(w.dir2)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err := w.walk("/", i1, i2); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return w.root1, w.root2, nil
|
||||
}
|
||||
|
||||
// Given a FileInfo, its path info, and a reference to the root of the tree
|
||||
// being constructed, register this file with the tree.
|
||||
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
||||
if fi == nil {
|
||||
return nil
|
||||
}
|
||||
parent := root.LookUp(filepath.Dir(path))
|
||||
if parent == nil {
|
||||
return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
|
||||
}
|
||||
info := &FileInfo{
|
||||
name: filepath.Base(path),
|
||||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
}
|
||||
cpath := filepath.Join(dir, path)
|
||||
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.stat = stat
|
||||
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
|
||||
parent.children[info.name] = info
|
||||
return nil
|
||||
}
|
||||
|
||||
// Walk a subtree rooted at the same path in both trees being iterated. For
|
||||
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
|
||||
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
|
||||
// Register these nodes with the return trees, unless we're still at the
|
||||
// (already-created) roots:
|
||||
if path != "/" {
|
||||
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
is1Dir := i1 != nil && i1.IsDir()
|
||||
is2Dir := i2 != nil && i2.IsDir()
|
||||
|
||||
sameDevice := false
|
||||
if i1 != nil && i2 != nil {
|
||||
si1 := i1.Sys().(*syscall.Stat_t)
|
||||
si2 := i2.Sys().(*syscall.Stat_t)
|
||||
if si1.Dev == si2.Dev {
|
||||
sameDevice = true
|
||||
}
|
||||
}
|
||||
|
||||
// If these files are both non-existent, or leaves (non-dirs), we are done.
|
||||
if !is1Dir && !is2Dir {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetch the names of all the files contained in both directories being walked:
|
||||
var names1, names2 []nameIno
|
||||
if is1Dir {
|
||||
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if is2Dir {
|
||||
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We have lists of the files contained in both parallel directories, sorted
|
||||
// in the same order. Walk them in parallel, generating a unique merged list
|
||||
// of all items present in either or both directories.
|
||||
var names []string
|
||||
ix1 := 0
|
||||
ix2 := 0
|
||||
|
||||
for {
|
||||
if ix1 >= len(names1) {
|
||||
break
|
||||
}
|
||||
if ix2 >= len(names2) {
|
||||
break
|
||||
}
|
||||
|
||||
ni1 := names1[ix1]
|
||||
ni2 := names2[ix2]
|
||||
|
||||
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
|
||||
case -1: // ni1 < ni2 -- advance ni1
|
||||
// we will not encounter ni1 in names2
|
||||
names = append(names, ni1.name)
|
||||
ix1++
|
||||
case 0: // ni1 == ni2
|
||||
if ni1.ino != ni2.ino || !sameDevice {
|
||||
names = append(names, ni1.name)
|
||||
}
|
||||
ix1++
|
||||
ix2++
|
||||
case 1: // ni1 > ni2 -- advance ni2
|
||||
// we will not encounter ni2 in names1
|
||||
names = append(names, ni2.name)
|
||||
ix2++
|
||||
}
|
||||
}
|
||||
for ix1 < len(names1) {
|
||||
names = append(names, names1[ix1].name)
|
||||
ix1++
|
||||
}
|
||||
for ix2 < len(names2) {
|
||||
names = append(names, names2[ix2].name)
|
||||
ix2++
|
||||
}
|
||||
|
||||
// For each of the names present in either or both of the directories being
|
||||
// iterated, stat the name under each root, and recurse the pair of them:
|
||||
for _, name := range names {
|
||||
fname := filepath.Join(path, name)
|
||||
var cInfo1, cInfo2 os.FileInfo
|
||||
if is1Dir {
|
||||
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if is2Dir {
|
||||
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// {name,inode} pairs used to support the early-pruning logic of the walker type
|
||||
type nameIno struct {
|
||||
name string
|
||||
ino uint64
|
||||
}
|
||||
|
||||
type nameInoSlice []nameIno
|
||||
|
||||
func (s nameInoSlice) Len() int { return len(s) }
|
||||
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
|
||||
|
||||
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
|
||||
// numbers further up the stack when reading directory contents. Unlike
|
||||
// os.Readdirnames, which returns a list of filenames, this function returns a
|
||||
// list of {filename,inode} pairs.
|
||||
func readdirnames(dirname string) (names []nameIno, err error) {
|
||||
var (
|
||||
size = 100
|
||||
buf = make([]byte, 4096)
|
||||
nbuf int
|
||||
bufp int
|
||||
nb int
|
||||
)
|
||||
|
||||
f, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
names = make([]nameIno, 0, size) // Empty with room to grow.
|
||||
for {
|
||||
// Refill the buffer if necessary
|
||||
if bufp >= nbuf {
|
||||
bufp = 0
|
||||
nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
|
||||
if nbuf < 0 {
|
||||
nbuf = 0
|
||||
}
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("readdirent", err)
|
||||
}
|
||||
if nbuf <= 0 {
|
||||
break // EOF
|
||||
}
|
||||
}
|
||||
|
||||
// Drain the buffer
|
||||
nb, names = parseDirent(buf[bufp:nbuf], names)
|
||||
bufp += nb
|
||||
}
|
||||
|
||||
sl := nameInoSlice(names)
|
||||
sort.Sort(sl)
|
||||
return sl, nil
|
||||
}
|
||||
|
||||
// parseDirent is a minor modification of unix.ParseDirent (linux version)
|
||||
// which returns {name,inode} pairs instead of just names.
|
||||
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
|
||||
origlen := len(buf)
|
||||
for len(buf) > 0 {
|
||||
dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
|
||||
buf = buf[dirent.Reclen:]
|
||||
if dirent.Ino == 0 { // File absent in directory.
|
||||
continue
|
||||
}
|
||||
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
|
||||
var name = string(bytes[0:clen(bytes[:])])
|
||||
if name == "." || name == ".." { // Useless names
|
||||
continue
|
||||
}
|
||||
names = append(names, nameIno{name, dirent.Ino})
|
||||
}
|
||||
return origlen - len(buf), names
|
||||
}
|
||||
|
||||
func clen(n []byte) int {
|
||||
for i := 0; i < len(n); i++ {
|
||||
if n[i] == 0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(n)
|
||||
}
|
||||
97
vendor/github.com/docker/docker/pkg/archive/changes_other.go
generated
vendored
Normal file
97
vendor/github.com/docker/docker/pkg/archive/changes_other.go
generated
vendored
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
// +build !linux
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
|
||||
var (
|
||||
oldRoot, newRoot *FileInfo
|
||||
err1, err2 error
|
||||
errs = make(chan error, 2)
|
||||
)
|
||||
go func() {
|
||||
oldRoot, err1 = collectFileInfo(oldDir)
|
||||
errs <- err1
|
||||
}()
|
||||
go func() {
|
||||
newRoot, err2 = collectFileInfo(newDir)
|
||||
errs <- err2
|
||||
}()
|
||||
|
||||
// block until both routines have returned
|
||||
for i := 0; i < 2; i++ {
|
||||
if err := <-errs; err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return oldRoot, newRoot, nil
|
||||
}
|
||||
|
||||
func collectFileInfo(sourceDir string) (*FileInfo, error) {
|
||||
root := newRootFileInfo()
|
||||
|
||||
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
relPath, err := filepath.Rel(sourceDir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
relPath = filepath.Join(string(os.PathSeparator), relPath)
|
||||
|
||||
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
|
||||
// Temporary workaround. If the returned path starts with two backslashes,
|
||||
// trim it down to a single backslash. Only relevant on Windows.
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.HasPrefix(relPath, `\\`) {
|
||||
relPath = relPath[1:]
|
||||
}
|
||||
}
|
||||
|
||||
if relPath == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
parent := root.LookUp(filepath.Dir(relPath))
|
||||
if parent == nil {
|
||||
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
|
||||
}
|
||||
|
||||
info := &FileInfo{
|
||||
name: filepath.Base(relPath),
|
||||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
}
|
||||
|
||||
s, err := system.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.stat = s
|
||||
|
||||
info.capability, _ = system.Lgetxattr(path, "security.capability")
|
||||
|
||||
parent.children[info.name] = info
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
43
vendor/github.com/docker/docker/pkg/archive/changes_unix.go
generated
vendored
Normal file
43
vendor/github.com/docker/docker/pkg/archive/changes_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
// +build !windows
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
if oldStat.Mode() != newStat.Mode() ||
|
||||
oldStat.UID() != newStat.UID() ||
|
||||
oldStat.GID() != newStat.GID() ||
|
||||
oldStat.Rdev() != newStat.Rdev() ||
|
||||
// Don't look at size or modification time for dirs, its not a good
|
||||
// measure of change. See https://github.com/moby/moby/issues/9874
|
||||
// for a description of the issue with modification time, and
|
||||
// https://github.com/moby/moby/pull/11422 for the change.
|
||||
// (Note that in the Windows implementation of this function,
|
||||
// modification time IS taken as a change). See
|
||||
// https://github.com/moby/moby/pull/37982 for more information.
|
||||
(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
|
||||
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
|
||||
}
|
||||
|
||||
func getIno(fi os.FileInfo) uint64 {
|
||||
return fi.Sys().(*syscall.Stat_t).Ino
|
||||
}
|
||||
|
||||
func hasHardlinks(fi os.FileInfo) bool {
|
||||
return fi.Sys().(*syscall.Stat_t).Nlink > 1
|
||||
}
|
||||
34
vendor/github.com/docker/docker/pkg/archive/changes_windows.go
generated
vendored
Normal file
34
vendor/github.com/docker/docker/pkg/archive/changes_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||
// Note there is slight difference between the Linux and Windows
|
||||
// implementations here. Due to https://github.com/moby/moby/issues/9874,
|
||||
// and the fix at https://github.com/moby/moby/pull/11422, Linux does not
|
||||
// consider a change to the directory time as a change. Windows on NTFS
|
||||
// does. See https://github.com/moby/moby/pull/37982 for more information.
|
||||
|
||||
if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) ||
|
||||
oldStat.Mode() != newStat.Mode() ||
|
||||
oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.Mode().IsDir()
|
||||
}
|
||||
|
||||
func getIno(fi os.FileInfo) (inode uint64) {
|
||||
return
|
||||
}
|
||||
|
||||
func hasHardlinks(fi os.FileInfo) bool {
|
||||
return false
|
||||
}
|
||||
480
vendor/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
Normal file
480
vendor/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
Normal file
|
|
@ -0,0 +1,480 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Errors used or returned by this file.
|
||||
var (
|
||||
ErrNotDirectory = errors.New("not a directory")
|
||||
ErrDirNotExists = errors.New("no such directory")
|
||||
ErrCannotCopyDir = errors.New("cannot copy directory")
|
||||
ErrInvalidCopySource = errors.New("invalid copy source content")
|
||||
)
|
||||
|
||||
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
|
||||
// processing using any utility functions from the path or filepath stdlib
|
||||
// packages) and appends a trailing `/.` or `/` if its corresponding original
|
||||
// path (from before being processed by utility functions from the path or
|
||||
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
|
||||
// path already ends in a `.` path segment, then another is not added. If the
|
||||
// clean path already ends in the separator, then another is not added.
|
||||
func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
|
||||
// Ensure paths are in platform semantics
|
||||
cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
|
||||
originalPath = strings.Replace(originalPath, "/", string(sep), -1)
|
||||
|
||||
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
|
||||
if !hasTrailingPathSeparator(cleanedPath, sep) {
|
||||
// Add a separator if it doesn't already end with one (a cleaned
|
||||
// path would only end in a separator if it is the root).
|
||||
cleanedPath += string(sep)
|
||||
}
|
||||
cleanedPath += "."
|
||||
}
|
||||
|
||||
if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
|
||||
cleanedPath += string(sep)
|
||||
}
|
||||
|
||||
return cleanedPath
|
||||
}
|
||||
|
||||
// assertsDirectory returns whether the given path is
|
||||
// asserted to be a directory, i.e., the path ends with
|
||||
// a trailing '/' or `/.`, assuming a path separator of `/`.
|
||||
func assertsDirectory(path string, sep byte) bool {
|
||||
return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
|
||||
}
|
||||
|
||||
// hasTrailingPathSeparator returns whether the given
|
||||
// path ends with the system's path separator character.
|
||||
func hasTrailingPathSeparator(path string, sep byte) bool {
|
||||
return len(path) > 0 && path[len(path)-1] == sep
|
||||
}
|
||||
|
||||
// specifiesCurrentDir returns whether the given path specifies
|
||||
// a "current directory", i.e., the last path segment is `.`.
|
||||
func specifiesCurrentDir(path string) bool {
|
||||
return filepath.Base(path) == "."
|
||||
}
|
||||
|
||||
// SplitPathDirEntry splits the given path between its directory name and its
|
||||
// basename by first cleaning the path but preserves a trailing "." if the
|
||||
// original path specified the current directory.
|
||||
func SplitPathDirEntry(path string) (dir, base string) {
|
||||
cleanedPath := filepath.Clean(filepath.FromSlash(path))
|
||||
|
||||
if specifiesCurrentDir(path) {
|
||||
cleanedPath += string(os.PathSeparator) + "."
|
||||
}
|
||||
|
||||
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
|
||||
}
|
||||
|
||||
// TarResource archives the resource described by the given CopyInfo to a Tar
|
||||
// archive. A non-nil error is returned if sourcePath does not exist or is
|
||||
// asserted to be a directory but exists as another type of file.
|
||||
//
|
||||
// This function acts as a convenient wrapper around TarWithOptions, which
|
||||
// requires a directory as the source path. TarResource accepts either a
|
||||
// directory or a file path and correctly sets the Tar options.
|
||||
func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
|
||||
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
|
||||
}
|
||||
|
||||
// TarResourceRebase is like TarResource but renames the first path element of
|
||||
// items in the resulting tar archive to match the given rebaseName if not "".
|
||||
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
|
||||
sourcePath = normalizePath(sourcePath)
|
||||
if _, err = os.Lstat(sourcePath); err != nil {
|
||||
// Catches the case where the source does not exist or is not a
|
||||
// directory if asserted to be a directory, as this also causes an
|
||||
// error.
|
||||
return
|
||||
}
|
||||
|
||||
// Separate the source path between its directory and
|
||||
// the entry in that directory which we are archiving.
|
||||
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
||||
opts := TarResourceRebaseOpts(sourceBase, rebaseName)
|
||||
|
||||
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
|
||||
return TarWithOptions(sourceDir, opts)
|
||||
}
|
||||
|
||||
// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
|
||||
// parameters to be sent to TarWithOptions (the TarOptions struct)
|
||||
func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
|
||||
filter := []string{sourceBase}
|
||||
return &TarOptions{
|
||||
Compression: Uncompressed,
|
||||
IncludeFiles: filter,
|
||||
IncludeSourceDir: true,
|
||||
RebaseNames: map[string]string{
|
||||
sourceBase: rebaseName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CopyInfo holds basic info about the source
|
||||
// or destination path of a copy operation.
|
||||
type CopyInfo struct {
|
||||
Path string
|
||||
Exists bool
|
||||
IsDir bool
|
||||
RebaseName string
|
||||
}
|
||||
|
||||
// CopyInfoSourcePath stats the given path to create a CopyInfo
|
||||
// struct representing that resource for the source of an archive copy
|
||||
// operation. The given path should be an absolute local path. A source path
|
||||
// has all symlinks evaluated that appear before the last path separator ("/"
|
||||
// on Unix). As it is to be a copy source, the path must exist.
|
||||
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
|
||||
// normalize the file path and then evaluate the symbol link
|
||||
// we will use the target file instead of the symbol link if
|
||||
// followLink is set
|
||||
path = normalizePath(path)
|
||||
|
||||
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
stat, err := os.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
return CopyInfo{
|
||||
Path: resolvedPath,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
RebaseName: rebaseName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CopyInfoDestinationPath stats the given path to create a CopyInfo
|
||||
// struct representing that resource for the destination of an archive copy
|
||||
// operation. The given path should be an absolute local path.
|
||||
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
|
||||
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
|
||||
path = normalizePath(path)
|
||||
originalPath := path
|
||||
|
||||
stat, err := os.Lstat(path)
|
||||
|
||||
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
|
||||
// The path exists and is not a symlink.
|
||||
return CopyInfo{
|
||||
Path: path,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// While the path is a symlink.
|
||||
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
|
||||
if n > maxSymlinkIter {
|
||||
// Don't follow symlinks more than this arbitrary number of times.
|
||||
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
|
||||
}
|
||||
|
||||
// The path is a symbolic link. We need to evaluate it so that the
|
||||
// destination of the copy operation is the link target and not the
|
||||
// link itself. This is notably different than CopyInfoSourcePath which
|
||||
// only evaluates symlinks before the last appearing path separator.
|
||||
// Also note that it is okay if the last path element is a broken
|
||||
// symlink as the copy operation should create the target.
|
||||
var linkTarget string
|
||||
|
||||
linkTarget, err = os.Readlink(path)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
if !system.IsAbs(linkTarget) {
|
||||
// Join with the parent directory.
|
||||
dstParent, _ := SplitPathDirEntry(path)
|
||||
linkTarget = filepath.Join(dstParent, linkTarget)
|
||||
}
|
||||
|
||||
path = linkTarget
|
||||
stat, err = os.Lstat(path)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// It's okay if the destination path doesn't exist. We can still
|
||||
// continue the copy operation if the parent directory exists.
|
||||
if !os.IsNotExist(err) {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
// Ensure destination parent dir exists.
|
||||
dstParent, _ := SplitPathDirEntry(path)
|
||||
|
||||
parentDirStat, err := os.Stat(dstParent)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
if !parentDirStat.IsDir() {
|
||||
return CopyInfo{}, ErrNotDirectory
|
||||
}
|
||||
|
||||
return CopyInfo{Path: path}, nil
|
||||
}
|
||||
|
||||
// The path exists after resolving symlinks.
|
||||
return CopyInfo{
|
||||
Path: path,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PrepareArchiveCopy prepares the given srcContent archive, which should
|
||||
// contain the archived resource described by srcInfo, to the destination
|
||||
// described by dstInfo. Returns the possibly modified content archive along
|
||||
// with the path to the destination directory which it should be extracted to.
|
||||
func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
|
||||
// Ensure in platform semantics
|
||||
srcInfo.Path = normalizePath(srcInfo.Path)
|
||||
dstInfo.Path = normalizePath(dstInfo.Path)
|
||||
|
||||
// Separate the destination path between its directory and base
|
||||
// components in case the source archive contents need to be rebased.
|
||||
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
|
||||
_, srcBase := SplitPathDirEntry(srcInfo.Path)
|
||||
|
||||
switch {
|
||||
case dstInfo.Exists && dstInfo.IsDir:
|
||||
// The destination exists as a directory. No alteration
|
||||
// to srcContent is needed as its contents can be
|
||||
// simply extracted to the destination directory.
|
||||
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
|
||||
case dstInfo.Exists && srcInfo.IsDir:
|
||||
// The destination exists as some type of file and the source
|
||||
// content is a directory. This is an error condition since
|
||||
// you cannot copy a directory to an existing file location.
|
||||
return "", nil, ErrCannotCopyDir
|
||||
case dstInfo.Exists:
|
||||
// The destination exists as some type of file and the source content
|
||||
// is also a file. The source content entry will have to be renamed to
|
||||
// have a basename which matches the destination path's basename.
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
srcBase = srcInfo.RebaseName
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
case srcInfo.IsDir:
|
||||
// The destination does not exist and the source content is an archive
|
||||
// of a directory. The archive should be extracted to the parent of
|
||||
// the destination path instead, and when it is, the directory that is
|
||||
// created as a result should take the name of the destination path.
|
||||
// The source content entries will have to be renamed to have a
|
||||
// basename which matches the destination path's basename.
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
srcBase = srcInfo.RebaseName
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
case assertsDirectory(dstInfo.Path, os.PathSeparator):
|
||||
// The destination does not exist and is asserted to be created as a
|
||||
// directory, but the source content is not a directory. This is an
|
||||
// error condition since you cannot create a directory from a file
|
||||
// source.
|
||||
return "", nil, ErrDirNotExists
|
||||
default:
|
||||
// The last remaining case is when the destination does not exist, is
|
||||
// not asserted to be a directory, and the source content is not an
|
||||
// archive of a directory. It this case, the destination file will need
|
||||
// to be created when the archive is extracted and the source content
|
||||
// entry will have to be renamed to have a basename which matches the
|
||||
// destination path's basename.
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
srcBase = srcInfo.RebaseName
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// RebaseArchiveEntries rewrites the given srcContent archive replacing
|
||||
// an occurrence of oldBase with newBase at the beginning of entry names.
|
||||
func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
|
||||
if oldBase == string(os.PathSeparator) {
|
||||
// If oldBase specifies the root directory, use an empty string as
|
||||
// oldBase instead so that newBase doesn't replace the path separator
|
||||
// that all paths will start with.
|
||||
oldBase = ""
|
||||
}
|
||||
|
||||
rebased, w := io.Pipe()
|
||||
|
||||
go func() {
|
||||
srcTar := tar.NewReader(srcContent)
|
||||
rebasedTar := tar.NewWriter(w)
|
||||
|
||||
for {
|
||||
hdr, err := srcTar.Next()
|
||||
if err == io.EOF {
|
||||
// Signals end of archive.
|
||||
rebasedTar.Close()
|
||||
w.Close()
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
// srcContent tar stream, as served by TarWithOptions(), is
|
||||
// definitely in PAX format, but tar.Next() mistakenly guesses it
|
||||
// as USTAR, which creates a problem: if the newBase is >100
|
||||
// characters long, WriteHeader() returns an error like
|
||||
// "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...".
|
||||
//
|
||||
// To fix, set the format to PAX here. See docker/for-linux issue #484.
|
||||
hdr.Format = tar.FormatPAX
|
||||
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
|
||||
}
|
||||
|
||||
if err = rebasedTar.WriteHeader(hdr); err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return rebased
|
||||
}
|
||||
|
||||
// TODO @gupta-ak. These might have to be changed in the future to be
|
||||
// continuity driver aware as well to support LCOW.
|
||||
|
||||
// CopyResource performs an archive copy from the given source path to the
|
||||
// given destination path. The source path MUST exist and the destination
|
||||
// path's parent directory must exist.
|
||||
func CopyResource(srcPath, dstPath string, followLink bool) error {
|
||||
var (
|
||||
srcInfo CopyInfo
|
||||
err error
|
||||
)
|
||||
|
||||
// Ensure in platform semantics
|
||||
srcPath = normalizePath(srcPath)
|
||||
dstPath = normalizePath(dstPath)
|
||||
|
||||
// Clean the source and destination paths.
|
||||
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
|
||||
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
|
||||
|
||||
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
content, err := TarResource(srcInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
return CopyTo(content, srcInfo, dstPath)
|
||||
}
|
||||
|
||||
// CopyTo handles extracting the given content whose
|
||||
// entries should be sourced from srcInfo to dstPath.
|
||||
func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
|
||||
// The destination path need not exist, but CopyInfoDestinationPath will
|
||||
// ensure that at least the parent directory exists.
|
||||
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer copyArchive.Close()
|
||||
|
||||
options := &TarOptions{
|
||||
NoLchown: true,
|
||||
NoOverwriteDirNonDir: true,
|
||||
}
|
||||
|
||||
return Untar(copyArchive, dstDir, options)
|
||||
}
|
||||
|
||||
// ResolveHostSourcePath decides real path need to be copied with parameters such as
|
||||
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
|
||||
// link target of any symbol link file, else it will only resolve symlink of directory
|
||||
// but return symbol link file itself without resolving.
|
||||
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
|
||||
if followLink {
|
||||
resolvedPath, err = filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
|
||||
} else {
|
||||
dirPath, basePath := filepath.Split(path)
|
||||
|
||||
// if not follow symbol link, then resolve symbol link of parent dir
|
||||
var resolvedDirPath string
|
||||
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||
// we can manually join it with the base path element.
|
||||
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
|
||||
if hasTrailingPathSeparator(path, os.PathSeparator) &&
|
||||
filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||
rebaseName = filepath.Base(path)
|
||||
}
|
||||
}
|
||||
return resolvedPath, rebaseName, nil
|
||||
}
|
||||
|
||||
// GetRebaseName normalizes and compares path and resolvedPath,
|
||||
// return completed resolved path and rebased file name
|
||||
func GetRebaseName(path, resolvedPath string) (string, string) {
|
||||
// linkTarget will have been cleaned (no trailing path separators and dot) so
|
||||
// we can manually join it with them
|
||||
var rebaseName string
|
||||
if specifiesCurrentDir(path) &&
|
||||
!specifiesCurrentDir(resolvedPath) {
|
||||
resolvedPath += string(filepath.Separator) + "."
|
||||
}
|
||||
|
||||
if hasTrailingPathSeparator(path, os.PathSeparator) &&
|
||||
!hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
|
||||
resolvedPath += string(filepath.Separator)
|
||||
}
|
||||
|
||||
if filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||
// In the case where the path had a trailing separator and a symlink
|
||||
// evaluation has changed the last path component, we will need to
|
||||
// rebase the name in the archive that is being copied to match the
|
||||
// originally requested name.
|
||||
rebaseName = filepath.Base(path)
|
||||
}
|
||||
return resolvedPath, rebaseName
|
||||
}
|
||||
11
vendor/github.com/docker/docker/pkg/archive/copy_unix.go
generated
vendored
Normal file
11
vendor/github.com/docker/docker/pkg/archive/copy_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
// +build !windows
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func normalizePath(path string) string {
|
||||
return filepath.ToSlash(path)
|
||||
}
|
||||
9
vendor/github.com/docker/docker/pkg/archive/copy_windows.go
generated
vendored
Normal file
9
vendor/github.com/docker/docker/pkg/archive/copy_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func normalizePath(path string) string {
|
||||
return filepath.FromSlash(path)
|
||||
}
|
||||
260
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
Normal file
260
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
Normal file
|
|
@ -0,0 +1,260 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
|
||||
// compressed or uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
|
||||
tr := tar.NewReader(layer)
|
||||
trBuf := pools.BufioReader32KPool.Get(tr)
|
||||
defer pools.BufioReader32KPool.Put(trBuf)
|
||||
|
||||
var dirs []*tar.Header
|
||||
unpackedPaths := make(map[string]struct{})
|
||||
|
||||
if options == nil {
|
||||
options = &TarOptions{}
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||
|
||||
aufsTempdir := ""
|
||||
aufsHardlinks := make(map[string]*tar.Header)
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
size += hdr.Size
|
||||
|
||||
// Normalize name, for safety and for a simple is-root check
|
||||
hdr.Name = filepath.Clean(hdr.Name)
|
||||
|
||||
// Windows does not support filenames with colons in them. Ignore
|
||||
// these files. This is not a problem though (although it might
|
||||
// appear that it is). Let's suppose a client is running docker pull.
|
||||
// The daemon it points to is Windows. Would it make sense for the
|
||||
// client to be doing a docker pull Ubuntu for example (which has files
|
||||
// with colons in the name under /usr/share/man/man3)? No, absolutely
|
||||
// not as it would really only make sense that they were pulling a
|
||||
// Windows image. However, for development, it is necessary to be able
|
||||
// to pull Linux images which are in the repository.
|
||||
//
|
||||
// TODO Windows. Once the registry is aware of what images are Windows-
|
||||
// specific or Linux-specific, this warning should be changed to an error
|
||||
// to cater for the situation where someone does manage to upload a Linux
|
||||
// image but have it tagged as Windows inadvertently.
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.Contains(hdr.Name, ":") {
|
||||
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Note as these operations are platform specific, so must the slash be.
|
||||
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||
// Not the root directory, ensure that the parent directory exists.
|
||||
// This happened in some tests where an image had a tarfile without any
|
||||
// parent directories.
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = system.MkdirAll(parentPath, 0600, "")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skip AUFS metadata dirs
|
||||
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
|
||||
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
|
||||
// We don't want this directory, but we need the files in them so that
|
||||
// such hardlinks can be resolved.
|
||||
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
|
||||
basename := filepath.Base(hdr.Name)
|
||||
aufsHardlinks[basename] = hdr
|
||||
if aufsTempdir == "" {
|
||||
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer os.RemoveAll(aufsTempdir)
|
||||
}
|
||||
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if hdr.Name != WhiteoutOpaqueDir {
|
||||
continue
|
||||
}
|
||||
}
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
rel, err := filepath.Rel(dest, path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Note as these operations are platform specific, so must the slash be.
|
||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
|
||||
}
|
||||
base := filepath.Base(path)
|
||||
|
||||
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||
dir := filepath.Dir(path)
|
||||
if base == WhiteoutOpaqueDir {
|
||||
_, err := os.Lstat(dir)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = nil // parent was deleted
|
||||
}
|
||||
return err
|
||||
}
|
||||
if path == dir {
|
||||
return nil
|
||||
}
|
||||
if _, exists := unpackedPaths[path]; !exists {
|
||||
err := os.RemoveAll(path)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
originalBase := base[len(WhiteoutPrefix):]
|
||||
originalPath := filepath.Join(dir, originalBase)
|
||||
if err := os.RemoveAll(originalPath); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If path exits we almost always just want to remove and replace it.
|
||||
// The only exception is when it is a directory *and* the file from
|
||||
// the layer is also a directory. Then we want to merge them (i.e.
|
||||
// just apply the metadata from the layer).
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trBuf.Reset(tr)
|
||||
srcData := io.Reader(trBuf)
|
||||
srcHdr := hdr
|
||||
|
||||
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
|
||||
// we manually retarget these into the temporary files we extracted them into
|
||||
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
|
||||
linkBasename := filepath.Base(hdr.Linkname)
|
||||
srcHdr = aufsHardlinks[linkBasename]
|
||||
if srcHdr == nil {
|
||||
return 0, fmt.Errorf("Invalid aufs hardlink")
|
||||
}
|
||||
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
srcData = tmpFile
|
||||
}
|
||||
|
||||
if err := remapIDs(idMapping, srcHdr); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Directory mtimes must be handled at the end to avoid further
|
||||
// file creation in them to modify the directory mtime
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
dirs = append(dirs, hdr)
|
||||
}
|
||||
unpackedPaths[path] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for _, hdr := range dirs {
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||
// and applies it to the directory `dest`. The stream `layer` can be
|
||||
// compressed or uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyLayer(dest string, layer io.Reader) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, &TarOptions{}, true)
|
||||
}
|
||||
|
||||
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||
// can only be uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, options, false)
|
||||
}
|
||||
|
||||
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
|
||||
func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
|
||||
dest = filepath.Clean(dest)
|
||||
|
||||
// We need to be able to set any perms
|
||||
if runtime.GOOS != "windows" {
|
||||
oldmask, err := system.Umask(0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer system.Umask(oldmask)
|
||||
}
|
||||
|
||||
if decompress {
|
||||
decompLayer, err := DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer decompLayer.Close()
|
||||
layer = decompLayer
|
||||
}
|
||||
return UnpackLayer(dest, layer, options)
|
||||
}
|
||||
16
vendor/github.com/docker/docker/pkg/archive/time_linux.go
generated
vendored
Normal file
16
vendor/github.com/docker/docker/pkg/archive/time_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||
if time.IsZero() {
|
||||
// Return UTIME_OMIT special value
|
||||
ts.Sec = 0
|
||||
ts.Nsec = (1 << 30) - 2
|
||||
return
|
||||
}
|
||||
return syscall.NsecToTimespec(time.UnixNano())
|
||||
}
|
||||
16
vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
generated
vendored
Normal file
16
vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
generated
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
// +build !linux
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||
nsec := int64(0)
|
||||
if !time.IsZero() {
|
||||
nsec = time.UnixNano()
|
||||
}
|
||||
return syscall.NsecToTimespec(nsec)
|
||||
}
|
||||
23
vendor/github.com/docker/docker/pkg/archive/whiteouts.go
generated
vendored
Normal file
23
vendor/github.com/docker/docker/pkg/archive/whiteouts.go
generated
vendored
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
// Whiteouts are files with a special meaning for the layered filesystem.
|
||||
// Docker uses AUFS whiteout files inside exported archives. In other
|
||||
// filesystems these files are generated/handled on tar creation/extraction.
|
||||
|
||||
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
|
||||
// filename this means that file has been removed from the base layer.
|
||||
const WhiteoutPrefix = ".wh."
|
||||
|
||||
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
|
||||
// for removing an actual file. Normally these files are excluded from exported
|
||||
// archives.
|
||||
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
|
||||
|
||||
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
|
||||
// layers. Normally these should not go into exported archives and all changed
|
||||
// hardlinks should be copied to the top layer.
|
||||
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
|
||||
|
||||
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
|
||||
// readdir calls to this directory do not follow to lower layers.
|
||||
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
|
||||
59
vendor/github.com/docker/docker/pkg/archive/wrap.go
generated
vendored
Normal file
59
vendor/github.com/docker/docker/pkg/archive/wrap.go
generated
vendored
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Generate generates a new archive from the content provided
|
||||
// as input.
|
||||
//
|
||||
// `files` is a sequence of path/content pairs. A new file is
|
||||
// added to the archive for each pair.
|
||||
// If the last pair is incomplete, the file is created with an
|
||||
// empty content. For example:
|
||||
//
|
||||
// Generate("foo.txt", "hello world", "emptyfile")
|
||||
//
|
||||
// The above call will return an archive with 2 files:
|
||||
// * ./foo.txt with content "hello world"
|
||||
// * ./empty with empty content
|
||||
//
|
||||
// FIXME: stream content instead of buffering
|
||||
// FIXME: specify permissions and other archive metadata
|
||||
func Generate(input ...string) (io.Reader, error) {
|
||||
files := parseStringPairs(input...)
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
for _, file := range files {
|
||||
name, content := file[0], file[1]
|
||||
hdr := &tar.Header{
|
||||
Name: name,
|
||||
Size: int64(len(content)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := tw.Write([]byte(content)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func parseStringPairs(input ...string) (output [][2]string) {
|
||||
output = make([][2]string, 0, len(input)/2+1)
|
||||
for i := 0; i < len(input); i += 2 {
|
||||
var pair [2]string
|
||||
pair[0] = input[i]
|
||||
if i+1 < len(input) {
|
||||
pair[1] = input[i+1]
|
||||
}
|
||||
output = append(output, pair)
|
||||
}
|
||||
return
|
||||
}
|
||||
298
vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
generated
vendored
Normal file
298
vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
generated
vendored
Normal file
|
|
@ -0,0 +1,298 @@
|
|||
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"text/scanner"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// PatternMatcher allows checking paths against a list of patterns
|
||||
type PatternMatcher struct {
|
||||
patterns []*Pattern
|
||||
exclusions bool
|
||||
}
|
||||
|
||||
// NewPatternMatcher creates a new matcher object for specific patterns that can
|
||||
// be used later to match against patterns against paths
|
||||
func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
|
||||
pm := &PatternMatcher{
|
||||
patterns: make([]*Pattern, 0, len(patterns)),
|
||||
}
|
||||
for _, p := range patterns {
|
||||
// Eliminate leading and trailing whitespace.
|
||||
p = strings.TrimSpace(p)
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
p = filepath.Clean(p)
|
||||
newp := &Pattern{}
|
||||
if p[0] == '!' {
|
||||
if len(p) == 1 {
|
||||
return nil, errors.New("illegal exclusion pattern: \"!\"")
|
||||
}
|
||||
newp.exclusion = true
|
||||
p = p[1:]
|
||||
pm.exclusions = true
|
||||
}
|
||||
// Do some syntax checking on the pattern.
|
||||
// filepath's Match() has some really weird rules that are inconsistent
|
||||
// so instead of trying to dup their logic, just call Match() for its
|
||||
// error state and if there is an error in the pattern return it.
|
||||
// If this becomes an issue we can remove this since its really only
|
||||
// needed in the error (syntax) case - which isn't really critical.
|
||||
if _, err := filepath.Match(p, "."); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newp.cleanedPattern = p
|
||||
newp.dirs = strings.Split(p, string(os.PathSeparator))
|
||||
pm.patterns = append(pm.patterns, newp)
|
||||
}
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
// Matches matches path against all the patterns. Matches is not safe to be
|
||||
// called concurrently
|
||||
func (pm *PatternMatcher) Matches(file string) (bool, error) {
|
||||
matched := false
|
||||
file = filepath.FromSlash(file)
|
||||
parentPath := filepath.Dir(file)
|
||||
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
||||
|
||||
for _, pattern := range pm.patterns {
|
||||
negative := false
|
||||
|
||||
if pattern.exclusion {
|
||||
negative = true
|
||||
}
|
||||
|
||||
match, err := pattern.match(file)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !match && parentPath != "." {
|
||||
// Check to see if the pattern matches one of our parent dirs.
|
||||
if len(pattern.dirs) <= len(parentPathDirs) {
|
||||
match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator)))
|
||||
}
|
||||
}
|
||||
|
||||
if match {
|
||||
matched = !negative
|
||||
}
|
||||
}
|
||||
|
||||
if matched {
|
||||
logrus.Debugf("Skipping excluded path: %s", file)
|
||||
}
|
||||
|
||||
return matched, nil
|
||||
}
|
||||
|
||||
// Exclusions returns true if any of the patterns define exclusions
|
||||
func (pm *PatternMatcher) Exclusions() bool {
|
||||
return pm.exclusions
|
||||
}
|
||||
|
||||
// Patterns returns array of active patterns
|
||||
func (pm *PatternMatcher) Patterns() []*Pattern {
|
||||
return pm.patterns
|
||||
}
|
||||
|
||||
// Pattern defines a single regexp used to filter file paths.
|
||||
type Pattern struct {
|
||||
cleanedPattern string
|
||||
dirs []string
|
||||
regexp *regexp.Regexp
|
||||
exclusion bool
|
||||
}
|
||||
|
||||
func (p *Pattern) String() string {
|
||||
return p.cleanedPattern
|
||||
}
|
||||
|
||||
// Exclusion returns true if this pattern defines exclusion
|
||||
func (p *Pattern) Exclusion() bool {
|
||||
return p.exclusion
|
||||
}
|
||||
|
||||
func (p *Pattern) match(path string) (bool, error) {
|
||||
|
||||
if p.regexp == nil {
|
||||
if err := p.compile(); err != nil {
|
||||
return false, filepath.ErrBadPattern
|
||||
}
|
||||
}
|
||||
|
||||
b := p.regexp.MatchString(path)
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (p *Pattern) compile() error {
|
||||
regStr := "^"
|
||||
pattern := p.cleanedPattern
|
||||
// Go through the pattern and convert it to a regexp.
|
||||
// We use a scanner so we can support utf-8 chars.
|
||||
var scan scanner.Scanner
|
||||
scan.Init(strings.NewReader(pattern))
|
||||
|
||||
sl := string(os.PathSeparator)
|
||||
escSL := sl
|
||||
if sl == `\` {
|
||||
escSL += `\`
|
||||
}
|
||||
|
||||
for scan.Peek() != scanner.EOF {
|
||||
ch := scan.Next()
|
||||
|
||||
if ch == '*' {
|
||||
if scan.Peek() == '*' {
|
||||
// is some flavor of "**"
|
||||
scan.Next()
|
||||
|
||||
// Treat **/ as ** so eat the "/"
|
||||
if string(scan.Peek()) == sl {
|
||||
scan.Next()
|
||||
}
|
||||
|
||||
if scan.Peek() == scanner.EOF {
|
||||
// is "**EOF" - to align with .gitignore just accept all
|
||||
regStr += ".*"
|
||||
} else {
|
||||
// is "**"
|
||||
// Note that this allows for any # of /'s (even 0) because
|
||||
// the .* will eat everything, even /'s
|
||||
regStr += "(.*" + escSL + ")?"
|
||||
}
|
||||
} else {
|
||||
// is "*" so map it to anything but "/"
|
||||
regStr += "[^" + escSL + "]*"
|
||||
}
|
||||
} else if ch == '?' {
|
||||
// "?" is any char except "/"
|
||||
regStr += "[^" + escSL + "]"
|
||||
} else if ch == '.' || ch == '$' {
|
||||
// Escape some regexp special chars that have no meaning
|
||||
// in golang's filepath.Match
|
||||
regStr += `\` + string(ch)
|
||||
} else if ch == '\\' {
|
||||
// escape next char. Note that a trailing \ in the pattern
|
||||
// will be left alone (but need to escape it)
|
||||
if sl == `\` {
|
||||
// On windows map "\" to "\\", meaning an escaped backslash,
|
||||
// and then just continue because filepath.Match on
|
||||
// Windows doesn't allow escaping at all
|
||||
regStr += escSL
|
||||
continue
|
||||
}
|
||||
if scan.Peek() != scanner.EOF {
|
||||
regStr += `\` + string(scan.Next())
|
||||
} else {
|
||||
regStr += `\`
|
||||
}
|
||||
} else {
|
||||
regStr += string(ch)
|
||||
}
|
||||
}
|
||||
|
||||
regStr += "$"
|
||||
|
||||
re, err := regexp.Compile(regStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.regexp = re
|
||||
return nil
|
||||
}
|
||||
|
||||
// Matches returns true if file matches any of the patterns
|
||||
// and isn't excluded by any of the subsequent patterns.
|
||||
func Matches(file string, patterns []string) (bool, error) {
|
||||
pm, err := NewPatternMatcher(patterns)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
file = filepath.Clean(file)
|
||||
|
||||
if file == "." {
|
||||
// Don't let them exclude everything, kind of silly.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return pm.Matches(file)
|
||||
}
|
||||
|
||||
// CopyFile copies from src to dst until either EOF is reached
|
||||
// on src or an error occurs. It verifies src exists and removes
|
||||
// the dst if it exists.
|
||||
func CopyFile(src, dst string) (int64, error) {
|
||||
cleanSrc := filepath.Clean(src)
|
||||
cleanDst := filepath.Clean(dst)
|
||||
if cleanSrc == cleanDst {
|
||||
return 0, nil
|
||||
}
|
||||
sf, err := os.Open(cleanSrc)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer sf.Close()
|
||||
if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
|
||||
return 0, err
|
||||
}
|
||||
df, err := os.Create(cleanDst)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer df.Close()
|
||||
return io.Copy(df, sf)
|
||||
}
|
||||
|
||||
// ReadSymlinkedDirectory returns the target directory of a symlink.
|
||||
// The target of the symbolic link may not be a file.
|
||||
func ReadSymlinkedDirectory(path string) (string, error) {
|
||||
var realPath string
|
||||
var err error
|
||||
if realPath, err = filepath.Abs(path); err != nil {
|
||||
return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
|
||||
}
|
||||
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
|
||||
return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
|
||||
}
|
||||
realPathInfo, err := os.Stat(realPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
|
||||
}
|
||||
if !realPathInfo.Mode().IsDir() {
|
||||
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
|
||||
}
|
||||
return realPath, nil
|
||||
}
|
||||
|
||||
// CreateIfNotExists creates a file or a directory only if it does not already exist.
|
||||
func CreateIfNotExists(path string, isDir bool) error {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if isDir {
|
||||
return os.MkdirAll(path, 0755)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(path, os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
27
vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
generated
vendored
Normal file
27
vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
generated
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GetTotalUsedFds returns the number of used File Descriptors by
|
||||
// executing `lsof -p PID`
|
||||
func GetTotalUsedFds() int {
|
||||
pid := os.Getpid()
|
||||
|
||||
cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
outputStr := strings.TrimSpace(string(output))
|
||||
|
||||
fds := strings.Split(outputStr, "\n")
|
||||
|
||||
return len(fds) - 1
|
||||
}
|
||||
22
vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
generated
vendored
Normal file
22
vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
// +build linux freebsd
|
||||
|
||||
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetTotalUsedFds Returns the number of used File Descriptors by
|
||||
// reading it via /proc filesystem.
|
||||
func GetTotalUsedFds() int {
|
||||
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
||||
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
||||
} else {
|
||||
return len(fds)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
7
vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
generated
vendored
Normal file
7
vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
|
||||
// on Windows.
|
||||
func GetTotalUsedFds() int {
|
||||
return -1
|
||||
}
|
||||
267
vendor/github.com/docker/docker/pkg/idtools/idtools.go
generated
vendored
Normal file
267
vendor/github.com/docker/docker/pkg/idtools/idtools.go
generated
vendored
Normal file
|
|
@ -0,0 +1,267 @@
|
|||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// IDMap contains a single entry for user namespace range remapping. An array
|
||||
// of IDMap entries represents the structure that will be provided to the Linux
|
||||
// kernel for creating a user namespace.
|
||||
type IDMap struct {
|
||||
ContainerID int `json:"container_id"`
|
||||
HostID int `json:"host_id"`
|
||||
Size int `json:"size"`
|
||||
}
|
||||
|
||||
type subIDRange struct {
|
||||
Start int
|
||||
Length int
|
||||
}
|
||||
|
||||
type ranges []subIDRange
|
||||
|
||||
func (e ranges) Len() int { return len(e) }
|
||||
func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
|
||||
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
|
||||
|
||||
const (
|
||||
subuidFileName = "/etc/subuid"
|
||||
subgidFileName = "/etc/subgid"
|
||||
)
|
||||
|
||||
// MkdirAllAndChown creates a directory (include any along the path) and then modifies
|
||||
// ownership to the requested uid/gid. If the directory already exists, this
|
||||
// function will still change ownership to the requested uid/gid pair.
|
||||
func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error {
|
||||
return mkdirAs(path, mode, owner, true, true)
|
||||
}
|
||||
|
||||
// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
|
||||
// If the directory already exists, this function still changes ownership.
|
||||
// Note that unlike os.Mkdir(), this function does not return IsExist error
|
||||
// in case path already exists.
|
||||
func MkdirAndChown(path string, mode os.FileMode, owner Identity) error {
|
||||
return mkdirAs(path, mode, owner, false, true)
|
||||
}
|
||||
|
||||
// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
|
||||
// ownership ONLY of newly created directories to the requested uid/gid. If the
|
||||
// directories along the path exist, no change of ownership will be performed
|
||||
func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error {
|
||||
return mkdirAs(path, mode, owner, true, false)
|
||||
}
|
||||
|
||||
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
|
||||
// If the maps are empty, then the root uid/gid will default to "real" 0/0
|
||||
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
||||
uid, err := toHost(0, uidMap)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
gid, err := toHost(0, gidMap)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
return uid, gid, nil
|
||||
}
|
||||
|
||||
// toContainer takes an id mapping, and uses it to translate a
|
||||
// host ID to the remapped ID. If no map is provided, then the translation
|
||||
// assumes a 1-to-1 mapping and returns the passed in id
|
||||
func toContainer(hostID int, idMap []IDMap) (int, error) {
|
||||
if idMap == nil {
|
||||
return hostID, nil
|
||||
}
|
||||
for _, m := range idMap {
|
||||
if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
|
||||
contID := m.ContainerID + (hostID - m.HostID)
|
||||
return contID, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
|
||||
}
|
||||
|
||||
// toHost takes an id mapping and a remapped ID, and translates the
|
||||
// ID to the mapped host ID. If no map is provided, then the translation
|
||||
// assumes a 1-to-1 mapping and returns the passed in id #
|
||||
func toHost(contID int, idMap []IDMap) (int, error) {
|
||||
if idMap == nil {
|
||||
return contID, nil
|
||||
}
|
||||
for _, m := range idMap {
|
||||
if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
|
||||
hostID := m.HostID + (contID - m.ContainerID)
|
||||
return hostID, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
|
||||
}
|
||||
|
||||
// Identity is either a UID and GID pair or a SID (but not both)
|
||||
type Identity struct {
|
||||
UID int
|
||||
GID int
|
||||
SID string
|
||||
}
|
||||
|
||||
// IdentityMapping contains a mappings of UIDs and GIDs
|
||||
type IdentityMapping struct {
|
||||
uids []IDMap
|
||||
gids []IDMap
|
||||
}
|
||||
|
||||
// NewIdentityMapping takes a requested user and group name and
|
||||
// using the data from /etc/sub{uid,gid} ranges, creates the
|
||||
// proper uid and gid remapping ranges for that user/group pair
|
||||
func NewIdentityMapping(username, groupname string) (*IdentityMapping, error) {
|
||||
subuidRanges, err := parseSubuid(username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subgidRanges, err := parseSubgid(groupname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(subuidRanges) == 0 {
|
||||
return nil, fmt.Errorf("No subuid ranges found for user %q", username)
|
||||
}
|
||||
if len(subgidRanges) == 0 {
|
||||
return nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
|
||||
}
|
||||
|
||||
return &IdentityMapping{
|
||||
uids: createIDMap(subuidRanges),
|
||||
gids: createIDMap(subgidRanges),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewIDMappingsFromMaps creates a new mapping from two slices
|
||||
// Deprecated: this is a temporary shim while transitioning to IDMapping
|
||||
func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping {
|
||||
return &IdentityMapping{uids: uids, gids: gids}
|
||||
}
|
||||
|
||||
// RootPair returns a uid and gid pair for the root user. The error is ignored
|
||||
// because a root user always exists, and the defaults are correct when the uid
|
||||
// and gid maps are empty.
|
||||
func (i *IdentityMapping) RootPair() Identity {
|
||||
uid, gid, _ := GetRootUIDGID(i.uids, i.gids)
|
||||
return Identity{UID: uid, GID: gid}
|
||||
}
|
||||
|
||||
// ToHost returns the host UID and GID for the container uid, gid.
|
||||
// Remapping is only performed if the ids aren't already the remapped root ids
|
||||
func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) {
|
||||
var err error
|
||||
target := i.RootPair()
|
||||
|
||||
if pair.UID != target.UID {
|
||||
target.UID, err = toHost(pair.UID, i.uids)
|
||||
if err != nil {
|
||||
return target, err
|
||||
}
|
||||
}
|
||||
|
||||
if pair.GID != target.GID {
|
||||
target.GID, err = toHost(pair.GID, i.gids)
|
||||
}
|
||||
return target, err
|
||||
}
|
||||
|
||||
// ToContainer returns the container UID and GID for the host uid and gid
|
||||
func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) {
|
||||
uid, err := toContainer(pair.UID, i.uids)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
gid, err := toContainer(pair.GID, i.gids)
|
||||
return uid, gid, err
|
||||
}
|
||||
|
||||
// Empty returns true if there are no id mappings
|
||||
func (i *IdentityMapping) Empty() bool {
|
||||
return len(i.uids) == 0 && len(i.gids) == 0
|
||||
}
|
||||
|
||||
// UIDs return the UID mapping
|
||||
// TODO: remove this once everything has been refactored to use pairs
|
||||
func (i *IdentityMapping) UIDs() []IDMap {
|
||||
return i.uids
|
||||
}
|
||||
|
||||
// GIDs return the UID mapping
|
||||
// TODO: remove this once everything has been refactored to use pairs
|
||||
func (i *IdentityMapping) GIDs() []IDMap {
|
||||
return i.gids
|
||||
}
|
||||
|
||||
func createIDMap(subidRanges ranges) []IDMap {
|
||||
idMap := []IDMap{}
|
||||
|
||||
// sort the ranges by lowest ID first
|
||||
sort.Sort(subidRanges)
|
||||
containerID := 0
|
||||
for _, idrange := range subidRanges {
|
||||
idMap = append(idMap, IDMap{
|
||||
ContainerID: containerID,
|
||||
HostID: idrange.Start,
|
||||
Size: idrange.Length,
|
||||
})
|
||||
containerID = containerID + idrange.Length
|
||||
}
|
||||
return idMap
|
||||
}
|
||||
|
||||
func parseSubuid(username string) (ranges, error) {
|
||||
return parseSubidFile(subuidFileName, username)
|
||||
}
|
||||
|
||||
func parseSubgid(username string) (ranges, error) {
|
||||
return parseSubidFile(subgidFileName, username)
|
||||
}
|
||||
|
||||
// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
|
||||
// and return all found ranges for a specified username. If the special value
|
||||
// "ALL" is supplied for username, then all ranges in the file will be returned
|
||||
func parseSubidFile(path, username string) (ranges, error) {
|
||||
var rangeList ranges
|
||||
|
||||
subidFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return rangeList, err
|
||||
}
|
||||
defer subidFile.Close()
|
||||
|
||||
s := bufio.NewScanner(subidFile)
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return rangeList, err
|
||||
}
|
||||
|
||||
text := strings.TrimSpace(s.Text())
|
||||
if text == "" || strings.HasPrefix(text, "#") {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(text, ":")
|
||||
if len(parts) != 3 {
|
||||
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
|
||||
}
|
||||
if parts[0] == username || username == "ALL" {
|
||||
startid, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
||||
}
|
||||
length, err := strconv.Atoi(parts[2])
|
||||
if err != nil {
|
||||
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
||||
}
|
||||
rangeList = append(rangeList, subIDRange{startid, length})
|
||||
}
|
||||
}
|
||||
return rangeList, nil
|
||||
}
|
||||
231
vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
Normal file
231
vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,231 @@
|
|||
// +build !windows
|
||||
|
||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/opencontainers/runc/libcontainer/user"
|
||||
)
|
||||
|
||||
var (
|
||||
entOnce sync.Once
|
||||
getentCmd string
|
||||
)
|
||||
|
||||
func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error {
|
||||
// make an array containing the original path asked for, plus (for mkAll == true)
|
||||
// all path components leading up to the complete path that don't exist before we MkdirAll
|
||||
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
|
||||
// chown the full directory path if it exists
|
||||
|
||||
var paths []string
|
||||
|
||||
stat, err := system.Stat(path)
|
||||
if err == nil {
|
||||
if !stat.IsDir() {
|
||||
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
|
||||
}
|
||||
if !chownExisting {
|
||||
return nil
|
||||
}
|
||||
|
||||
// short-circuit--we were called with an existing directory and chown was requested
|
||||
return lazyChown(path, owner.UID, owner.GID, stat)
|
||||
}
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
paths = []string{path}
|
||||
}
|
||||
|
||||
if mkAll {
|
||||
// walk back to "/" looking for directories which do not exist
|
||||
// and add them to the paths array for chown after creation
|
||||
dirPath := path
|
||||
for {
|
||||
dirPath = filepath.Dir(dirPath)
|
||||
if dirPath == "/" {
|
||||
break
|
||||
}
|
||||
if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
|
||||
paths = append(paths, dirPath)
|
||||
}
|
||||
}
|
||||
if err := system.MkdirAll(path, mode, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// even if it existed, we will chown the requested path + any subpaths that
|
||||
// didn't exist when we called MkdirAll
|
||||
for _, pathComponent := range paths {
|
||||
if err := lazyChown(pathComponent, owner.UID, owner.GID, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
|
||||
// if that uid, gid pair has access (execute bit) to the directory
|
||||
func CanAccess(path string, pair Identity) bool {
|
||||
statInfo, err := system.Stat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fileMode := os.FileMode(statInfo.Mode())
|
||||
permBits := fileMode.Perm()
|
||||
return accessible(statInfo.UID() == uint32(pair.UID),
|
||||
statInfo.GID() == uint32(pair.GID), permBits)
|
||||
}
|
||||
|
||||
func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
|
||||
if isOwner && (perms&0100 == 0100) {
|
||||
return true
|
||||
}
|
||||
if isGroup && (perms&0010 == 0010) {
|
||||
return true
|
||||
}
|
||||
if perms&0001 == 0001 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
|
||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||
func LookupUser(username string) (user.User, error) {
|
||||
// first try a local system files lookup using existing capabilities
|
||||
usr, err := user.LookupUser(username)
|
||||
if err == nil {
|
||||
return usr, nil
|
||||
}
|
||||
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
|
||||
usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username))
|
||||
if err != nil {
|
||||
return user.User{}, err
|
||||
}
|
||||
return usr, nil
|
||||
}
|
||||
|
||||
// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid,
|
||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||
func LookupUID(uid int) (user.User, error) {
|
||||
// first try a local system files lookup using existing capabilities
|
||||
usr, err := user.LookupUid(uid)
|
||||
if err == nil {
|
||||
return usr, nil
|
||||
}
|
||||
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
|
||||
return getentUser(fmt.Sprintf("%s %d", "passwd", uid))
|
||||
}
|
||||
|
||||
func getentUser(args string) (user.User, error) {
|
||||
reader, err := callGetent(args)
|
||||
if err != nil {
|
||||
return user.User{}, err
|
||||
}
|
||||
users, err := user.ParsePasswd(reader)
|
||||
if err != nil {
|
||||
return user.User{}, err
|
||||
}
|
||||
if len(users) == 0 {
|
||||
return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1])
|
||||
}
|
||||
return users[0], nil
|
||||
}
|
||||
|
||||
// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
|
||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||
func LookupGroup(groupname string) (user.Group, error) {
|
||||
// first try a local system files lookup using existing capabilities
|
||||
group, err := user.LookupGroup(groupname)
|
||||
if err == nil {
|
||||
return group, nil
|
||||
}
|
||||
// local files lookup failed; attempt to call `getent` to query configured group dbs
|
||||
return getentGroup(fmt.Sprintf("%s %s", "group", groupname))
|
||||
}
|
||||
|
||||
// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
|
||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||
func LookupGID(gid int) (user.Group, error) {
|
||||
// first try a local system files lookup using existing capabilities
|
||||
group, err := user.LookupGid(gid)
|
||||
if err == nil {
|
||||
return group, nil
|
||||
}
|
||||
// local files lookup failed; attempt to call `getent` to query configured group dbs
|
||||
return getentGroup(fmt.Sprintf("%s %d", "group", gid))
|
||||
}
|
||||
|
||||
func getentGroup(args string) (user.Group, error) {
|
||||
reader, err := callGetent(args)
|
||||
if err != nil {
|
||||
return user.Group{}, err
|
||||
}
|
||||
groups, err := user.ParseGroup(reader)
|
||||
if err != nil {
|
||||
return user.Group{}, err
|
||||
}
|
||||
if len(groups) == 0 {
|
||||
return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1])
|
||||
}
|
||||
return groups[0], nil
|
||||
}
|
||||
|
||||
func callGetent(args string) (io.Reader, error) {
|
||||
entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
|
||||
// if no `getent` command on host, can't do anything else
|
||||
if getentCmd == "" {
|
||||
return nil, fmt.Errorf("")
|
||||
}
|
||||
out, err := execCmd(getentCmd, args)
|
||||
if err != nil {
|
||||
exitCode, errC := system.GetExitCode(err)
|
||||
if errC != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch exitCode {
|
||||
case 1:
|
||||
return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
|
||||
case 2:
|
||||
terms := strings.Split(args, " ")
|
||||
return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0])
|
||||
case 3:
|
||||
return nil, fmt.Errorf("getent database doesn't support enumeration")
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
return bytes.NewReader(out), nil
|
||||
}
|
||||
|
||||
// lazyChown performs a chown only if the uid/gid don't match what's requested
|
||||
// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the
|
||||
// dir is on an NFS share, so don't call chown unless we absolutely must.
|
||||
func lazyChown(p string, uid, gid int, stat *system.StatT) error {
|
||||
if stat == nil {
|
||||
var err error
|
||||
stat, err = system.Stat(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) {
|
||||
return nil
|
||||
}
|
||||
return os.Chown(p, uid, gid)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue