mirror of
https://github.com/kreuzwerker/terraform-provider-docker.git
synced 2025-12-20 22:59:42 -05:00
Updated via: go get github.com/hashicorp/terraform@sdk-v0.11-with-go-modules and go mod tidy
This commit is contained in:
parent
2eed46629c
commit
ff3adce2f9
1052 changed files with 343158 additions and 6889 deletions
27
go.mod
27
go.mod
|
|
@ -2,45 +2,20 @@ module github.com/terraform-providers/terraform-provider-docker
|
|||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.4.11 // indirect
|
||||
github.com/apparentlymart/go-cidr v0.0.0-20170418151526-7e4b007599d4 // indirect
|
||||
github.com/aws/aws-sdk-go v1.8.34 // indirect
|
||||
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/docker/distribution v0.0.0-20180522175653-f0cc92778478 // indirect
|
||||
github.com/docker/docker v0.0.0-20180530012807-65bd038fc5e4
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-units v0.0.0-20171221200356-d59758554a3d
|
||||
github.com/go-ini/ini v1.23.1 // indirect
|
||||
github.com/gogo/protobuf v0.0.0-20180121160031-26de2f9a7d3b // indirect
|
||||
github.com/google/go-cmp v0.2.0 // indirect
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.0.0-20170211013415-3573b8b52aa7 // indirect
|
||||
github.com/hashicorp/go-getter v0.0.0-20170207215532-c3d66e76678d // indirect
|
||||
github.com/hashicorp/go-multierror v0.0.0-20150916205742-d30f09973e19 // indirect
|
||||
github.com/hashicorp/go-plugin v0.0.0-20170217162722-f72692aebca2 // indirect
|
||||
github.com/hashicorp/go-uuid v0.0.0-20160120003506-36289988d83c // indirect
|
||||
github.com/hashicorp/go-version v0.0.0-20161031182605-e96d38404026 // indirect
|
||||
github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f // indirect
|
||||
github.com/hashicorp/hil v0.0.0-20170512213305-fac2259da677 // indirect
|
||||
github.com/hashicorp/logutils v0.0.0-20150609070431-0dc08b1671f3 // indirect
|
||||
github.com/hashicorp/terraform v0.10.0
|
||||
github.com/hashicorp/yamux v0.0.0-20160720233140-d1caa6c97c9f // indirect
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 // indirect
|
||||
github.com/mitchellh/copystructure v0.0.0-20161013195342-5af94aef99f5 // indirect
|
||||
github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747 // indirect
|
||||
github.com/mitchellh/hashstructure v0.0.0-20160209213820-6b17d669fac5 // indirect
|
||||
github.com/mitchellh/mapstructure v0.0.0-20170307201123-53818660ed49 // indirect
|
||||
github.com/mitchellh/reflectwalk v0.0.0-20161003174516-92573fe8d000 // indirect
|
||||
github.com/hashicorp/terraform v0.11.12-beta1.0.20190227065421-fc531f54a878
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
|
||||
github.com/opencontainers/image-spec v0.0.0-20171125024018-577479e4dc27 // indirect
|
||||
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e // indirect
|
||||
github.com/satori/go.uuid v0.0.0-20160927100844-b061729afc07 // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa // indirect
|
||||
github.com/stretchr/testify v1.3.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20180119165957-a66000089151 // indirect
|
||||
golang.org/x/net v0.0.0-20180530034148-89e543239a64 // indirect
|
||||
golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b // indirect
|
||||
golang.org/x/text v0.3.0 // indirect
|
||||
gotest.tools v2.2.0+incompatible // indirect
|
||||
)
|
||||
|
|
|
|||
260
go.sum
260
go.sum
|
|
@ -1,15 +1,63 @@
|
|||
cloud.google.com/go v0.15.0 h1:/e2wXYguItvFu4fJCvhMRPIwwrimuUxI+aCVx/ahLjg=
|
||||
cloud.google.com/go v0.15.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/Azure/azure-sdk-for-go v10.3.0-beta+incompatible h1:TP+nmGmOP7psi7CvIq/1pCliRBRj73vmMTDjaPrTnr8=
|
||||
github.com/Azure/azure-sdk-for-go v10.3.0-beta+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-autorest v9.10.0+incompatible h1:bsri0JnC11oSNMWYkx5tCcfZziOjp8wKoAFaH9xI5Mc=
|
||||
github.com/Azure/go-autorest v9.10.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20170803034930-c92175d54006 h1:dVyNL14dq1500JomYVzJTVi0XEcZFCYwwiNpDeCfoes=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20170803034930-c92175d54006/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/ChrisTrenkamp/goxpath v0.0.0-20170625215350-4fe035839290 h1:K9I21XUHNbYD3GNMmJBN0UKJCpdP+glftwNZ7Bo8kqY=
|
||||
github.com/ChrisTrenkamp/goxpath v0.0.0-20170625215350-4fe035839290/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4=
|
||||
github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q=
|
||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/apparentlymart/go-cidr v0.0.0-20170418151526-7e4b007599d4 h1:bpmA3CCh0K829XIR5kfcV+YDt+Gwi7SEYPCcYEVKWUo=
|
||||
github.com/apparentlymart/go-cidr v0.0.0-20170418151526-7e4b007599d4/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc=
|
||||
github.com/aws/aws-sdk-go v1.8.34 h1:zDNBtR25rYYUi7aEh2HRa2f2WTm6LSHWSAMCNY2WdXA=
|
||||
github.com/aws/aws-sdk-go v1.8.34/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k=
|
||||
github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292 h1:tuQ7w+my8a8mkwN7x2TSd7OzTjkZ7rAeSyH4xncuAMI=
|
||||
github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292/go.mod h1:KYCjqMOeHpNuTOiFQU6WEcTG7poCJrUs0YgyHNtn1no=
|
||||
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw=
|
||||
github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8=
|
||||
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||
github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6 h1:LoeFxdq5zUCBQPhbQKE6zvoGwHMxCBlqwbH9+9kHoHA=
|
||||
github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0=
|
||||
github.com/antchfx/xpath v0.0.0-20170728053731-b5c552e1acbd/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
|
||||
github.com/antchfx/xquery v0.0.0-20170730121040-eb8c3c172607/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M=
|
||||
github.com/apparentlymart/go-cidr v0.0.0-20170616213631-2bd8b58cf427 h1:2P/DTyNDU+7qJOB6E5KeIpdc3qcT9IYjyA8hZ9HGz50=
|
||||
github.com/apparentlymart/go-cidr v0.0.0-20170616213631-2bd8b58cf427/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc=
|
||||
github.com/apparentlymart/go-textseg v0.0.0-20170531203952-b836f5c4d331 h1:AIKxo1t7QE7MAqADwrmzMiaFC+QfHfXOk8lrmibN5Lk=
|
||||
github.com/apparentlymart/go-textseg v0.0.0-20170531203952-b836f5c4d331/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20160115234725-4239b77079c7 h1:MBXhrxjNkjdqJysfNbKMMPFNXlz6EzpOnPcsoYBeD3E=
|
||||
github.com/armon/go-radix v0.0.0-20160115234725-4239b77079c7/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aws/aws-sdk-go v1.14.31 h1:amhorvKh1zNxo9YCntvA5uDmgw+pCYXOp4xO8WS1oDg=
|
||||
github.com/aws/aws-sdk-go v1.14.31/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
|
||||
github.com/beevik/etree v0.0.0-20171015221209-af219c0c7ea1 h1:6fqkBkx5cRbd8Pq0UEMxyteIAPoE1KiPptnx1yEzJLU=
|
||||
github.com/beevik/etree v0.0.0-20171015221209-af219c0c7ea1/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=
|
||||
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
|
||||
github.com/bgentry/speakeasy v0.0.0-20161015143505-675b82c74c0e h1:giZ2nnSSH4ntzmoNPwdncPXXA2nWdlO7NiebK0gozNI=
|
||||
github.com/bgentry/speakeasy v0.0.0-20161015143505-675b82c74c0e/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/blang/semver v0.0.0-20170202183821-4a1e882c79dc/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/chzyer/logex v1.1.11-0.20160617073814-96a4d311aa9b/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20161106042343-c914be64f07d h1:aG5FcWiZTOhPQzYIxwxSR1zEOxzL32fwr1CsaCfhO6w=
|
||||
github.com/chzyer/readline v0.0.0-20161106042343-c914be64f07d/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20160617131543-bea8f082b6fd/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/coreos/bbolt v1.3.1-coreos.1/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.2.0-rc.1.0.20170908195435-80aa810309d4+incompatible h1:VLCxgrfBsnJtqTy0WFP0GsjjwWZQiuQiNgiWnY6g6Gc=
|
||||
github.com/coreos/etcd v3.2.0-rc.1.0.20170908195435-80aa810309d4+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v0.0.0-20160617170158-f0777076321a h1:pzKxqfSfp4kqrm6jfyVYYkWhf+e1hPRt3rX+Yj/3UBU=
|
||||
github.com/dgrijalva/jwt-go v0.0.0-20160617170158-f0777076321a/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dnaeon/go-vcr v0.0.0-20170218072653-87d4990451a8/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/docker/distribution v0.0.0-20180522175653-f0cc92778478 h1:yCzgNaIN+6iGPBy5h8LVw9ULFerVIiCdPMKiH2A6I/g=
|
||||
github.com/docker/distribution v0.0.0-20180522175653-f0cc92778478/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.0.0-20180530012807-65bd038fc5e4 h1:l3X/U8oT8xvht8bB/kHUntALLPIYSzmUf4Wyr6UnGWU=
|
||||
|
|
@ -18,78 +66,234 @@ github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKoh
|
|||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.0.0-20171221200356-d59758554a3d h1:QEYTOUa4JROW5CSAHU8qc0rD8uhx1tsUkBsSDk8P5eM=
|
||||
github.com/docker/go-units v0.0.0-20171221200356-d59758554a3d/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/go-ini/ini v1.23.1 h1:amNPHl+tCb4BolL2NAIQaKLY+ZiL1Ju7OqZ9Fx6PTBQ=
|
||||
github.com/go-ini/ini v1.23.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/dylanmei/iso8601 v0.1.0 h1:812NGQDBcqquTfH5Yeo7lwR0nzx/cKdsmf3qMjPURUI=
|
||||
github.com/dylanmei/iso8601 v0.1.0/go.mod h1:w9KhXSgIyROl1DefbMYIE7UVSIvELTbMrCfx+QkYnoQ=
|
||||
github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08/go.mod h1:VBVDFSBXCIW8JaHQpI8lldSKfYaLMzP9oyq6IJ4fhzY=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo=
|
||||
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-test/deep v1.0.1 h1:UQhStjbkDClarlmv0am7OXXO4/GaPdCGiUiMTvi28sg=
|
||||
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/gogo/protobuf v0.0.0-20170307180453-100ba4e88506/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v0.0.0-20180121160031-26de2f9a7d3b h1:Ni9Ctfwt7JmOFxuB2/Nhs9Ai8LdSTOAmT7mDG9ftKHQ=
|
||||
github.com/gogo/protobuf v0.0.0-20180121160031-26de2f9a7d3b/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.1.1-0.20171002171727-8ebdfab36c66/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/googleapis/gax-go v0.0.0-20161107002406-da06d194a00e h1:CYRpN206UTHUinz3VJoLaBdy1gEGeJNsqT0mvswDcMw=
|
||||
github.com/googleapis/gax-go v0.0.0-20161107002406-da06d194a00e/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968 h1:Pu+HW4kcQozw0QyrTTgLE+3RXNqFhQNNzhbnoLFL83c=
|
||||
github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
|
||||
github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01 h1:OgCNGSnEalfkRpn//WGJHhpo7fkP+LhTpvEITZ7CkK4=
|
||||
github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v0.0.0-20160910222444-6b7015e65d36/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.2.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/hashicorp/atlas-go v0.0.0-20161107204910-1792bd8de119 h1:6w2v93RpNf+cK7V6I5g3S2uCsR55tSv6L+WK4V8j9nI=
|
||||
github.com/hashicorp/atlas-go v0.0.0-20161107204910-1792bd8de119/go.mod h1:ckHDuH0pxfnmXZkq1niVSguIIV0pA65gifQv3so9llw=
|
||||
github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089 h1:1eDpXAxTh0iPv+1kc9/gfSI2pxRERDsTk/lNGolwHn8=
|
||||
github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI=
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4=
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.0.0-20170211013415-3573b8b52aa7 h1:67fHcS+inUoiIqWCKIqeDuq2AlPHNHPiTqp97LdQ+bc=
|
||||
github.com/hashicorp/go-cleanhttp v0.0.0-20170211013415-3573b8b52aa7/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-getter v0.0.0-20170207215532-c3d66e76678d h1:gYDVHhIS6JMuuWr/k6SYGR3//pr5IOiNRzM/Hr4UKjk=
|
||||
github.com/hashicorp/go-getter v0.0.0-20170207215532-c3d66e76678d/go.mod h1:6rdJFnhkXnzGOJbvkrdv4t9nLwKcVA+tmbQeUlkIzrU=
|
||||
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de h1:XDCSythtg8aWSRSO29uwhgh7b127fWr+m5SemqjSUL8=
|
||||
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de/go.mod h1:xIwEieBHERyEvaeKF/TcHh1Hu+lxPM+n2vT1+g9I4m4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-getter v0.0.0-20180327010114-90bb99a48d86 h1:hLYM35twiyKH44g36g+GFYODcrZQetEAY4+zrJtGea0=
|
||||
github.com/hashicorp/go-getter v0.0.0-20180327010114-90bb99a48d86/go.mod h1:6rdJFnhkXnzGOJbvkrdv4t9nLwKcVA+tmbQeUlkIzrU=
|
||||
github.com/hashicorp/go-hclog v0.0.0-20170716174523-b4e5765d1e5f h1:5onjUM14Pu2IrXp+iFQJYxswZoCn8PmSvVQ6IOic8uE=
|
||||
github.com/hashicorp/go-hclog v0.0.0-20170716174523-b4e5765d1e5f/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v0.0.0-20150916205742-d30f09973e19 h1:gb61U/o4ZJ6TRYvZqJUKYidIhJOEAvNyVMesryROxAY=
|
||||
github.com/hashicorp/go-multierror v0.0.0-20150916205742-d30f09973e19/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
|
||||
github.com/hashicorp/go-plugin v0.0.0-20170217162722-f72692aebca2 h1:C979il8uGAI9k9qFbaDE355/0UXqPZ8gyrwZDXSlTRk=
|
||||
github.com/hashicorp/go-plugin v0.0.0-20170217162722-f72692aebca2/go.mod h1:JSqWYsict+jzcj0+xElxyrBQRPNoiWQuddnxArJ7XHQ=
|
||||
github.com/hashicorp/go-uuid v0.0.0-20160120003506-36289988d83c h1:74JBd5YKJzNH8YVYfXI2LVmLdRj2udNzzysg9Y0x2dw=
|
||||
github.com/hashicorp/go-uuid v0.0.0-20160120003506-36289988d83c/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v0.0.0-20161031182605-e96d38404026 h1:qWx/DcC6l4ZzuS+JBAzI5XjtLFDCc08zYeZ0kLnaH2g=
|
||||
github.com/hashicorp/go-version v0.0.0-20161031182605-e96d38404026/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go-plugin v0.0.0-20180125190438-e53f54cbf51e h1:v7Pi8dJoDS0h0BAyFll8mfbrBrXg2vtfPg+J0XnIibM=
|
||||
github.com/hashicorp/go-plugin v0.0.0-20180125190438-e53f54cbf51e/go.mod h1:JSqWYsict+jzcj0+xElxyrBQRPNoiWQuddnxArJ7XHQ=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.1 h1:Vsx5XKPqPs3M6sM4U4GWyUqFS8aBiL9U5gkgvpkg4SE=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.1/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E=
|
||||
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg=
|
||||
github.com/hashicorp/go-safetemp v0.0.0-20180326211150-b1a1dbde6fdc h1:wAa9fGALVHfjYxZuXRnmuJG2CnwRpJYOTvY6YdErAh0=
|
||||
github.com/hashicorp/go-safetemp v0.0.0-20180326211150-b1a1dbde6fdc/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
|
||||
github.com/hashicorp/go-slug v0.2.0 h1:MVdZAkTmDsUi1AT+3NQDsn8n3ssnVSIHwiM6RcUHvE8=
|
||||
github.com/hashicorp/go-slug v0.2.0/go.mod h1:+zDycQOzGqOqMW7Kn2fp9vz/NtqpMLQlgb9JUF+0km4=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-tfe v0.3.10 h1:6uPnPHNPxXDe3k/Vt6fovygYTaWJ8f/7zdHc++f7NJU=
|
||||
github.com/hashicorp/go-tfe v0.3.10/go.mod h1:LHLchj07PCYgQqcyE5Sz+g4zrMNW+nALKbiSNTZedEs=
|
||||
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.0.0 h1:21MVWPKDphxa7ineQQTrCU5brh7OuVVAzGOCnnCPtE8=
|
||||
github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws=
|
||||
github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w=
|
||||
github.com/hashicorp/hil v0.0.0-20170512213305-fac2259da677 h1:Yj0RcrbLT/5z2k1UdRW+2r0nDUgrKjUNLQiXzMU4a5k=
|
||||
github.com/hashicorp/hil v0.0.0-20170512213305-fac2259da677/go.mod h1:KHvg/R2/dPtaePb16oW4qIyzkMxXOL38xjRN64adsts=
|
||||
github.com/hashicorp/hcl2 v0.0.0-20180308163058-5f8ed954abd8 h1:laCE8EKBOUVN6LwBt7Be9IX7i2RQ2cnfbt+Z5a+0PRI=
|
||||
github.com/hashicorp/hcl2 v0.0.0-20180308163058-5f8ed954abd8/go.mod h1:xp1eMAxqhQKBxz+yQUTsig9bBMRRWRWw+rK3FJmHf/A=
|
||||
github.com/hashicorp/hil v0.0.0-20170627220502-fa9f258a9250 h1:fooK5IvDL/KIsi4LxF/JH68nVdrBSiGNPhS2JAQjtjo=
|
||||
github.com/hashicorp/hil v0.0.0-20170627220502-fa9f258a9250/go.mod h1:KHvg/R2/dPtaePb16oW4qIyzkMxXOL38xjRN64adsts=
|
||||
github.com/hashicorp/logutils v0.0.0-20150609070431-0dc08b1671f3 h1:oD64EFjELI9RY9yoWlfua58r+etdnoIC871z+rr6lkA=
|
||||
github.com/hashicorp/logutils v0.0.0-20150609070431-0dc08b1671f3/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/terraform v0.10.0 h1:qDay454qAPcAThGMoWfO7snCwbtsr6O2DwJiFqfbMSM=
|
||||
github.com/hashicorp/terraform v0.10.0/go.mod h1:uN1KUiT7Wdg61fPwsGXQwK3c8PmpIVZrt5Vcb1VrSoM=
|
||||
github.com/hashicorp/memberlist v0.0.0-20170208211506-23ad4b7d7b38/go.mod h1:ncdBp14cuox2iFOq3kDiquKU6fqsTBc3W6JvZwjxxsE=
|
||||
github.com/hashicorp/serf v0.8.2-0.20171022020050-c20a0b1b1ea9 h1:SYvpFFTluyu7KQoR1vFbk72jMMeXa8TedEo9VihOiI4=
|
||||
github.com/hashicorp/serf v0.8.2-0.20171022020050-c20a0b1b1ea9/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE=
|
||||
github.com/hashicorp/terraform v0.11.12-beta1.0.20190227065421-fc531f54a878 h1:BEtsPcwrOE8gFSU/SPbY9mdr1CHsKAvEeZqh2PWNVqo=
|
||||
github.com/hashicorp/terraform v0.11.12-beta1.0.20190227065421-fc531f54a878/go.mod h1:pR/Ri/puIH6hcylTZLaCDDS/16fCkjGr+/VGfHdb9zk=
|
||||
github.com/hashicorp/vault v0.0.0-20161029210149-9a60bf2a50e4 h1:SGDekHLK2IRoVS7Fb4olLyWvc2VmwKgyFC05j6X3NII=
|
||||
github.com/hashicorp/vault v0.0.0-20161029210149-9a60bf2a50e4/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0=
|
||||
github.com/hashicorp/yamux v0.0.0-20160720233140-d1caa6c97c9f h1:K4RDeor/qhbs5ETM85SN8xekXkk+KkOBclNXXM8+UR0=
|
||||
github.com/hashicorp/yamux v0.0.0-20160720233140-d1caa6c97c9f/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
github.com/jen20/awspolicyequivalence v0.0.0-20170831201602-3d48364a137a h1:FyS/ubzBR5xJlnJGRTwe7GUHpJOR4ukYK3y+LFNffuA=
|
||||
github.com/jen20/awspolicyequivalence v0.0.0-20170831201602-3d48364a137a/go.mod h1:uoIMjNxUfXi48Ci40IXkPRbghZ1vbti6v9LCbNqRgHY=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 h1:SMvOWPJCES2GdFracYbBQh93GXac8fq7HeN6JnpduB8=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926 h1:kie3qOosvRKqwij2HGzXWffwpXvcqfPPXRUw8I4F/mg=
|
||||
github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA=
|
||||
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/mitchellh/copystructure v0.0.0-20161013195342-5af94aef99f5 h1:zpaRAYp00glucts07iugR0W6zvhvLPp+9oR20M8ULM0=
|
||||
github.com/mitchellh/copystructure v0.0.0-20161013195342-5af94aef99f5/go.mod h1:eOsF2yLPlBBJPvD+nhl5QMTBSOBbOph6N7j/IDUw7PY=
|
||||
github.com/kardianos/osext v0.0.0-20160811001526-c2c54e542fb7 h1:pKv4oHt3kat9yf1jofmaRv3KxGaY5B7VV55GrfXFa74=
|
||||
github.com/kardianos/osext v0.0.0-20160811001526-c2c54e542fb7/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba h1:NARVGAAgEXvoMeNPHhPFt1SBt1VMznA3Gnz9d0qj+co=
|
||||
github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
|
||||
github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82 h1:wnfcqULT+N2seWf6y4yHzmi7GD2kNx4Ute0qArktD48=
|
||||
github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82/go.mod h1:y54tfGmO3NKssKveTEFFzH8C/akrSOy/iW9qEAUDV84=
|
||||
github.com/masterzen/azure-sdk-for-go v0.0.0-20161014135628-ee4f0065d00c h1:FMUOnVGy8nWk1cvlMCAoftRItQGMxI0vzJ3dQjeZTCE=
|
||||
github.com/masterzen/azure-sdk-for-go v0.0.0-20161014135628-ee4f0065d00c/go.mod h1:mf8fjOu33zCqxUjuiU3I8S1lJMyEAlH+0F2+M5xl3hE=
|
||||
github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9 h1:SmVbOZFWAlyQshuMfOkiAx1f5oUTsOGG5IXplAEYeeM=
|
||||
github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc=
|
||||
github.com/masterzen/winrm v0.0.0-20180224160350-7e40f93ae939 h1:cRFHA33ER97Xy5jmjS519OXCS/yE3AT3zdbQAg0Z53g=
|
||||
github.com/masterzen/winrm v0.0.0-20180224160350-7e40f93ae939/go.mod h1:CfZSN7zwz5gJiFhZJz49Uzk7mEBHIceWmbFmYx7Hf7E=
|
||||
github.com/mattn/go-colorable v0.0.0-20160220075935-9cbef7c35391 h1:x4vT4RoTH2BNkPx0LgrBKeFuPQPviK1aSAIWG6ruc+Y=
|
||||
github.com/mattn/go-colorable v0.0.0-20160220075935-9cbef7c35391/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.0-20161123143637-30a891c33c7c h1:YHHK/dEmr2Jo1cWD1VMB2waEeHJhHFp3CEylwWy/VcY=
|
||||
github.com/mattn/go-isatty v0.0.0-20161123143637-30a891c33c7c/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-shellwords v1.0.1 h1:2/mQs/EosKUge1MHnAavnrNwa0wLnWDjG4dTYMGf/kI=
|
||||
github.com/mattn/go-shellwords v1.0.1/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v0.0.0-20171129193617-33edc47170b5 h1:OYr3N2fY3e3kP/x/d81CJXlcZrIV2hH8gPnuRLpiME4=
|
||||
github.com/mitchellh/cli v0.0.0-20171129193617-33edc47170b5/go.mod h1:oGumspjLm2kTyiT1QMGpFqRlmxnKHfCvhZEVnx+5UeE=
|
||||
github.com/mitchellh/colorstring v0.0.0-20150917214807-8631ce90f286 h1:KHyL+3mQOF9sPfs26lsefckcFNDcIZtiACQiECzIUkw=
|
||||
github.com/mitchellh/colorstring v0.0.0-20150917214807-8631ce90f286/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
|
||||
github.com/mitchellh/copystructure v0.0.0-20170525013902-d23ffcb85de3 h1:dECZqiJYhKdj9QlLpiQaRDXHDXRTdiyZI3owdDGhlYY=
|
||||
github.com/mitchellh/copystructure v0.0.0-20170525013902-d23ffcb85de3/go.mod h1:eOsF2yLPlBBJPvD+nhl5QMTBSOBbOph6N7j/IDUw7PY=
|
||||
github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747 h1:eQox4Rh4ewJF+mqYPxCkmBAirRnPaHEB26UkNuPyjlk=
|
||||
github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-linereader v0.0.0-20141013185533-07bab5fdd958 h1:wN+5lV34eSnVSZgBLWRGHr6L4giR3/wI2B9DLmVnlfI=
|
||||
github.com/mitchellh/go-linereader v0.0.0-20141013185533-07bab5fdd958/go.mod h1:OaY7UOoTkkrX3wRwjpYRKafIkkyeD0UtweSHAWWiqQM=
|
||||
github.com/mitchellh/go-testing-interface v0.0.0-20170730050907-9a441910b168 h1:FW/lWFII8EehRx+hVNy5OkkIhWXz9NC69vO5Zr2RExY=
|
||||
github.com/mitchellh/go-testing-interface v0.0.0-20170730050907-9a441910b168/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=
|
||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/hashstructure v0.0.0-20160209213820-6b17d669fac5 h1:h+4fp6yIoLPf/K2egDK3kvYM2zqb28gJIWWMiDzBdKM=
|
||||
github.com/mitchellh/hashstructure v0.0.0-20160209213820-6b17d669fac5/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20170307201123-53818660ed49 h1:kaWdlw4YogwkDl8CG+/VxhXkrL9uz3n1D9QBC2pEGLE=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20170307201123-53818660ed49/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/reflectwalk v0.0.0-20161003174516-92573fe8d000 h1:ajR9C+BMu/Q7NN8pmQQAchGkMwlLivNVWFC06dH+K/4=
|
||||
github.com/mitchellh/reflectwalk v0.0.0-20161003174516-92573fe8d000/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/mitchellh/panicwrap v0.0.0-20161208170302-ba9e1a65e0f7 h1:+PBI9A4rLQJlch3eQI/RkTY2HzX+bl2lPUf3goenBxs=
|
||||
github.com/mitchellh/panicwrap v0.0.0-20161208170302-ba9e1a65e0f7/go.mod h1:QuAqW7/z+iv6aWFJdrA8kCbsF0OOJVKCICqTcYBexuY=
|
||||
github.com/mitchellh/prefixedio v0.0.0-20151214002211-6e6954073784 h1:+DAetXqxv/mSyCkE9KBIYOZs9b68y7SUaDCxQMRjA68=
|
||||
github.com/mitchellh/prefixedio v0.0.0-20151214002211-6e6954073784/go.mod h1:kB1naBgV9ORnkiTVeyJOI1DavaJkG4oNIq0Af6ZVKUo=
|
||||
github.com/mitchellh/reflectwalk v0.0.0-20170726202117-63d60e9d0dbc h1:gqYjvctjtX4GHzgfutJxZpvZ7XhGwQLGR5BASwhpO2o=
|
||||
github.com/mitchellh/reflectwalk v0.0.0-20170726202117-63d60e9d0dbc/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ=
|
||||
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U=
|
||||
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v0.0.0-20171125024018-577479e4dc27 h1:eNXDpuTnIrrQmG7HYGA2YljYjhFPQF10c1vV3iutuyY=
|
||||
github.com/opencontainers/image-spec v0.0.0-20171125024018-577479e4dc27/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58 h1:m3CEgv3ah1Rhy82L+c0QG/U3VyY1UsvsIdkh0/rU97Y=
|
||||
github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e h1:+RHxT/gm0O3UF7nLJbdNzAmULvCFt4XfXHWzh3XI/zs=
|
||||
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v0.0.0-20171219111128-6bee943216c8 h1:lcb1zvdlaZyEbl2OXifN3uOYYyIvllofUbmp9bwbL+0=
|
||||
github.com/posener/complete v0.0.0-20171219111128-6bee943216c8/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/ryanuber/columnize v0.0.0-20161220214920-0fbbb3f0e3fb h1:/im8B/AMa1Yj8MuHvva4/KoXXWG+QR2rv8PtyKlqpVQ=
|
||||
github.com/ryanuber/columnize v0.0.0-20161220214920-0fbbb3f0e3fb/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/satori/go.uuid v0.0.0-20160927100844-b061729afc07 h1:DEZDfcCVq3xDJrjqdCgyN/dHYVoqR92MCsdqCdxmnhM=
|
||||
github.com/satori/go.uuid v0.0.0-20160927100844-b061729afc07/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/satori/uuid v0.0.0-20160927100844-b061729afc07 h1:81vvGlnI/AZ1/TxGDirw3ofUoS64TyjmPQt5C9XODTw=
|
||||
github.com/satori/uuid v0.0.0-20160927100844-b061729afc07/go.mod h1:B8HLsPLik/YNn6KKWVMDJ8nzCL8RP5WyfsnmvnAEwIU=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY=
|
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa h1:E+gaaifzi2xF65PbDmuKI3PhLWY6G5opMLniFq8vmXA=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spf13/afero v1.0.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
golang.org/x/crypto v0.0.0-20180119165957-a66000089151 h1:du7dTgoJooIwkyksVuOCfUbQDgZE9oIUdVk/v/tqou4=
|
||||
golang.org/x/crypto v0.0.0-20180119165957-a66000089151/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d h1:Z4EH+5EffvBEhh37F0C0DnpklTMh00JOkjW5zK3ofBI=
|
||||
github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d/go.mod h1:BSTlc8jOjh0niykqEGVXOLXdi9o0r0kR8tCYiMvjFgw=
|
||||
github.com/terraform-providers/terraform-provider-aws v1.29.0 h1:gvi87HjR5Q1YSD2ihBadB+gd/DNk+6F8Ki4vGxIVnlM=
|
||||
github.com/terraform-providers/terraform-provider-aws v1.29.0/go.mod h1:uvqaeKnm2ydZ2LuKuW1NDNBu6heC/7IDGXWm36/6oKs=
|
||||
github.com/terraform-providers/terraform-provider-openstack v1.15.0 h1:adpjqej+F8BAX9dHmuPF47sUIkgifeqBu6p7iCsyj0Y=
|
||||
github.com/terraform-providers/terraform-provider-openstack v1.15.0/go.mod h1:2aQ6n/BtChAl1y2S60vebhyJyZXBsuAI5G4+lHrT1Ew=
|
||||
github.com/terraform-providers/terraform-provider-template v1.0.0/go.mod h1:/J+B8me5DCMa0rEBH5ic2aKPjhtpWNeScmxFJWxB1EU=
|
||||
github.com/terraform-providers/terraform-provider-tls v1.2.0/go.mod h1:Mxe/v5u31LDW4m32O1z6Ursdh95dpc9Puq6otkYg7tU=
|
||||
github.com/ugorji/go v0.0.0-20170107133203-ded73eae5db7 h1:BPPUhSq7uU6E9lFzyb81vjwVOhiWwMXp0EpKL75NX+8=
|
||||
github.com/ugorji/go v0.0.0-20170107133203-ded73eae5db7/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
|
||||
github.com/ulikunitz/xz v0.5.4 h1:zATC2OoZ8H1TZll3FpbX+ikwmadbO699PE06cIkm9oU=
|
||||
github.com/ulikunitz/xz v0.5.4/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||
github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
|
||||
github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
|
||||
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557 h1:Jpn2j6wHkC9wJv5iMfJhKqrZJx3TahFx+7sbZ7zQdxs=
|
||||
github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
|
||||
github.com/zclconf/go-cty v0.0.0-20180302160414-49fa5e03c418 h1:uZKhc0PzQtIg+6+BqQU1m0zzcIgY2hHJk/Xwf00QUNw=
|
||||
github.com/zclconf/go-cty v0.0.0-20180302160414-49fa5e03c418/go.mod h1:LnDKxj8gN4aatfXUqmUNooaDjvmDcLPbAN3hYBIVoJE=
|
||||
golang.org/x/crypto v0.0.0-20180211211603-9de5f2eaf759 h1:6W75OzsrwJByqag5GxxtYVTVEyP+Sy+aLDUsJ9CD8OU=
|
||||
golang.org/x/crypto v0.0.0-20180211211603-9de5f2eaf759/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/net v0.0.0-20171004034648-a04bdaca5b32/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180530034148-89e543239a64 h1:otRUcJnCzmletog6NvNtimZZStU31VhmAuuno53i0Nk=
|
||||
golang.org/x/net v0.0.0-20180530034148-89e543239a64/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b h1:mxo/dXmtEd5rXc/ZzMKg0qDhMT+51+LvV65S9dP6nh4=
|
||||
golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/oauth2 v0.0.0-20170928010508-bb50c06baba3 h1:YGx0PRKSN/2n/OcdFycCC0JUA/Ln+i5lPcN8VoNDus0=
|
||||
golang.org/x/oauth2 v0.0.0-20170928010508-bb50c06baba3/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.0.0-20171013141220-c01e4764d870/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
google.golang.org/api v0.0.0-20171005000305-7a7376eff6a5 h1:PDkJGYjSvxJyevtZRGmBSO+HjbIKuqYEEc8gB51or4o=
|
||||
google.golang.org/api v0.0.0-20171005000305-7a7376eff6a5/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/appengine v0.0.0-20150527042145-b667a5000b08/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/genproto v0.0.0-20171002232614-f676e0f3ac63 h1:yNBw5bwywOTguAu+h6SkCUaWdEZ7ZXgfiwb2YTN1eQw=
|
||||
google.golang.org/genproto v0.0.0-20171002232614-f676e0f3ac63/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v0.0.0-20170809211603-7657092a1303 h1:C5gwSQxZkG33JZoP+ZjEclrLu6DIRLVw743KKZfIXP4=
|
||||
google.golang.org/grpc v0.0.0-20170809211603-7657092a1303/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170407172122-cd8b52f8269e h1:o/mfNjxpTLivuKEfxzzwrJ8PmulH2wEp7t713uMwKAA=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170407172122-cd8b52f8269e/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
|
|
|
|||
2
vendor/github.com/agext/levenshtein/.gitignore
generated
vendored
Normal file
2
vendor/github.com/agext/levenshtein/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
README.html
|
||||
coverage.out
|
||||
70
vendor/github.com/agext/levenshtein/.travis.yml
generated
vendored
Normal file
70
vendor/github.com/agext/levenshtein/.travis.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
language: go
|
||||
sudo: false
|
||||
go:
|
||||
- 1.8
|
||||
- 1.7.5
|
||||
- 1.7.4
|
||||
- 1.7.3
|
||||
- 1.7.2
|
||||
- 1.7.1
|
||||
- 1.7
|
||||
- tip
|
||||
- 1.6.4
|
||||
- 1.6.3
|
||||
- 1.6.2
|
||||
- 1.6.1
|
||||
- 1.6
|
||||
- 1.5.4
|
||||
- 1.5.3
|
||||
- 1.5.2
|
||||
- 1.5.1
|
||||
- 1.5
|
||||
- 1.4.3
|
||||
- 1.4.2
|
||||
- 1.4.1
|
||||
- 1.4
|
||||
- 1.3.3
|
||||
- 1.3.2
|
||||
- 1.3.1
|
||||
- 1.3
|
||||
- 1.2.2
|
||||
- 1.2.1
|
||||
- 1.2
|
||||
- 1.1.2
|
||||
- 1.1.1
|
||||
- 1.1
|
||||
before_install:
|
||||
- go get github.com/mattn/goveralls
|
||||
script:
|
||||
- $HOME/gopath/bin/goveralls -service=travis-ci
|
||||
notifications:
|
||||
email:
|
||||
on_success: never
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: tip
|
||||
- go: 1.6.4
|
||||
- go: 1.6.3
|
||||
- go: 1.6.2
|
||||
- go: 1.6.1
|
||||
- go: 1.6
|
||||
- go: 1.5.4
|
||||
- go: 1.5.3
|
||||
- go: 1.5.2
|
||||
- go: 1.5.1
|
||||
- go: 1.5
|
||||
- go: 1.4.3
|
||||
- go: 1.4.2
|
||||
- go: 1.4.1
|
||||
- go: 1.4
|
||||
- go: 1.3.3
|
||||
- go: 1.3.2
|
||||
- go: 1.3.1
|
||||
- go: 1.3
|
||||
- go: 1.2.2
|
||||
- go: 1.2.1
|
||||
- go: 1.2
|
||||
- go: 1.1.2
|
||||
- go: 1.1.1
|
||||
- go: 1.1
|
||||
36
vendor/github.com/agext/levenshtein/DCO
generated
vendored
Normal file
36
vendor/github.com/agext/levenshtein/DCO
generated
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
201
vendor/github.com/agext/levenshtein/LICENSE
generated
vendored
Normal file
201
vendor/github.com/agext/levenshtein/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
1
vendor/github.com/agext/levenshtein/MAINTAINERS
generated
vendored
Normal file
1
vendor/github.com/agext/levenshtein/MAINTAINERS
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
Alex Bucataru <alex@alrux.com> (@AlexBucataru)
|
||||
5
vendor/github.com/agext/levenshtein/NOTICE
generated
vendored
Normal file
5
vendor/github.com/agext/levenshtein/NOTICE
generated
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
Alrux Go EXTensions (AGExt) - package levenshtein
|
||||
Copyright 2016 ALRUX Inc.
|
||||
|
||||
This product includes software developed at ALRUX Inc.
|
||||
(http://www.alrux.com/).
|
||||
38
vendor/github.com/agext/levenshtein/README.md
generated
vendored
Normal file
38
vendor/github.com/agext/levenshtein/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
# A Go package for calculating the Levenshtein distance between two strings
|
||||
|
||||
[](https://github.com/agext/levenshtein/releases/latest)
|
||||
[](https://godoc.org/github.com/agext/levenshtein)
|
||||
[](https://travis-ci.org/agext/levenshtein)
|
||||
[](https://coveralls.io/github/agext/levenshtein)
|
||||
[](https://goreportcard.com/report/github.com/agext/levenshtein)
|
||||
|
||||
|
||||
This package implements distance and similarity metrics for strings, based on the Levenshtein measure, in [Go](http://golang.org).
|
||||
|
||||
## Project Status
|
||||
|
||||
v1.2.1 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis.
|
||||
|
||||
This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome.
|
||||
|
||||
## Overview
|
||||
|
||||
The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0.
|
||||
|
||||
A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded.
|
||||
|
||||
The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0.
|
||||
|
||||
The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest.
|
||||
|
||||
The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed.
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
go get github.com/agext/levenshtein
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Package levenshtein is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
|
||||
290
vendor/github.com/agext/levenshtein/levenshtein.go
generated
vendored
Normal file
290
vendor/github.com/agext/levenshtein/levenshtein.go
generated
vendored
Normal file
|
|
@ -0,0 +1,290 @@
|
|||
// Copyright 2016 ALRUX Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package levenshtein implements distance and similarity metrics for strings, based on the Levenshtein measure.
|
||||
|
||||
The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0.
|
||||
|
||||
A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded.
|
||||
|
||||
The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0.
|
||||
|
||||
The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest.
|
||||
|
||||
The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed.
|
||||
*/
|
||||
package levenshtein
|
||||
|
||||
// Calculate determines the Levenshtein distance between two strings, using
|
||||
// the given costs for each edit operation. It returns the distance along with
|
||||
// the lengths of the longest common prefix and suffix.
|
||||
//
|
||||
// If maxCost is non-zero, the calculation stops as soon as the distance is determined
|
||||
// to be greater than maxCost. Therefore, any return value higher than maxCost is a
|
||||
// lower bound for the actual distance.
|
||||
func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, prefixLen, suffixLen int) {
|
||||
l1, l2 := len(str1), len(str2)
|
||||
// trim common prefix, if any, as it doesn't affect the distance
|
||||
for ; prefixLen < l1 && prefixLen < l2; prefixLen++ {
|
||||
if str1[prefixLen] != str2[prefixLen] {
|
||||
break
|
||||
}
|
||||
}
|
||||
str1, str2 = str1[prefixLen:], str2[prefixLen:]
|
||||
l1 -= prefixLen
|
||||
l2 -= prefixLen
|
||||
// trim common suffix, if any, as it doesn't affect the distance
|
||||
for 0 < l1 && 0 < l2 {
|
||||
if str1[l1-1] != str2[l2-1] {
|
||||
str1, str2 = str1[:l1], str2[:l2]
|
||||
break
|
||||
}
|
||||
l1--
|
||||
l2--
|
||||
suffixLen++
|
||||
}
|
||||
// if the first string is empty, the distance is the length of the second string times the cost of insertion
|
||||
if l1 == 0 {
|
||||
dist = l2 * insCost
|
||||
return
|
||||
}
|
||||
// if the second string is empty, the distance is the length of the first string times the cost of deletion
|
||||
if l2 == 0 {
|
||||
dist = l1 * delCost
|
||||
return
|
||||
}
|
||||
|
||||
// variables used in inner "for" loops
|
||||
var y, dy, c, l int
|
||||
|
||||
// if maxCost is greater than or equal to the maximum possible distance, it's equivalent to 'unlimited'
|
||||
if maxCost > 0 {
|
||||
if subCost < delCost+insCost {
|
||||
if maxCost >= l1*subCost+(l2-l1)*insCost {
|
||||
maxCost = 0
|
||||
}
|
||||
} else {
|
||||
if maxCost >= l1*delCost+l2*insCost {
|
||||
maxCost = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if maxCost > 0 {
|
||||
// prefer the longer string first, to minimize time;
|
||||
// a swap also transposes the meanings of insertion and deletion.
|
||||
if l1 < l2 {
|
||||
str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost
|
||||
}
|
||||
|
||||
// the length differential times cost of deletion is a lower bound for the cost;
|
||||
// if it is higher than the maxCost, there is no point going into the main calculation.
|
||||
if dist = (l1 - l2) * delCost; dist > maxCost {
|
||||
return
|
||||
}
|
||||
|
||||
d := make([]int, l1+1)
|
||||
|
||||
// offset and length of d in the current row
|
||||
doff, dlen := 0, 1
|
||||
for y, dy = 1, delCost; y <= l1 && dy <= maxCost; dlen++ {
|
||||
d[y] = dy
|
||||
y++
|
||||
dy = y * delCost
|
||||
}
|
||||
// fmt.Printf("%q -> %q: init doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, doff, dlen, doff, doff+dlen, d[doff:doff+dlen])
|
||||
|
||||
for x := 0; x < l2; x++ {
|
||||
dy, d[doff] = d[doff], d[doff]+insCost
|
||||
for d[doff] > maxCost && dlen > 0 {
|
||||
if str1[doff] != str2[x] {
|
||||
dy += subCost
|
||||
}
|
||||
doff++
|
||||
dlen--
|
||||
if c = d[doff] + insCost; c < dy {
|
||||
dy = c
|
||||
}
|
||||
dy, d[doff] = d[doff], dy
|
||||
}
|
||||
for y, l = doff, doff+dlen-1; y < l; dy, d[y] = d[y], dy {
|
||||
if str1[y] != str2[x] {
|
||||
dy += subCost
|
||||
}
|
||||
if c = d[y] + delCost; c < dy {
|
||||
dy = c
|
||||
}
|
||||
y++
|
||||
if c = d[y] + insCost; c < dy {
|
||||
dy = c
|
||||
}
|
||||
}
|
||||
if y < l1 {
|
||||
if str1[y] != str2[x] {
|
||||
dy += subCost
|
||||
}
|
||||
if c = d[y] + delCost; c < dy {
|
||||
dy = c
|
||||
}
|
||||
for ; dy <= maxCost && y < l1; dy, d[y] = dy+delCost, dy {
|
||||
y++
|
||||
dlen++
|
||||
}
|
||||
}
|
||||
// fmt.Printf("%q -> %q: x=%d doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, x, doff, dlen, doff, doff+dlen, d[doff:doff+dlen])
|
||||
if dlen == 0 {
|
||||
dist = maxCost + 1
|
||||
return
|
||||
}
|
||||
}
|
||||
if doff+dlen-1 < l1 {
|
||||
dist = maxCost + 1
|
||||
return
|
||||
}
|
||||
dist = d[l1]
|
||||
} else {
|
||||
// ToDo: This is O(l1*l2) time and O(min(l1,l2)) space; investigate if it is
|
||||
// worth to implement diagonal approach - O(l1*(1+dist)) time, up to O(l1*l2) space
|
||||
// http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html
|
||||
|
||||
// prefer the shorter string first, to minimize space; time is O(l1*l2) anyway;
|
||||
// a swap also transposes the meanings of insertion and deletion.
|
||||
if l1 > l2 {
|
||||
str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost
|
||||
}
|
||||
d := make([]int, l1+1)
|
||||
|
||||
for y = 1; y <= l1; y++ {
|
||||
d[y] = y * delCost
|
||||
}
|
||||
for x := 0; x < l2; x++ {
|
||||
dy, d[0] = d[0], d[0]+insCost
|
||||
for y = 0; y < l1; dy, d[y] = d[y], dy {
|
||||
if str1[y] != str2[x] {
|
||||
dy += subCost
|
||||
}
|
||||
if c = d[y] + delCost; c < dy {
|
||||
dy = c
|
||||
}
|
||||
y++
|
||||
if c = d[y] + insCost; c < dy {
|
||||
dy = c
|
||||
}
|
||||
}
|
||||
}
|
||||
dist = d[l1]
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Distance returns the Levenshtein distance between str1 and str2, using the
|
||||
// default or provided cost values. Pass nil for the third argument to use the
|
||||
// default cost of 1 for all three operations, with no maximum.
|
||||
func Distance(str1, str2 string, p *Params) int {
|
||||
if p == nil {
|
||||
p = defaultParams
|
||||
}
|
||||
dist, _, _ := Calculate([]rune(str1), []rune(str2), p.maxCost, p.insCost, p.subCost, p.delCost)
|
||||
return dist
|
||||
}
|
||||
|
||||
// Similarity returns a score in the range of 0..1 for how similar the two strings are.
|
||||
// A score of 1 means the strings are identical, and 0 means they have nothing in common.
|
||||
//
|
||||
// A nil third argument uses the default cost of 1 for all three operations.
|
||||
//
|
||||
// If a non-zero MinScore value is provided in the parameters, scores lower than it
|
||||
// will be returned as 0.
|
||||
func Similarity(str1, str2 string, p *Params) float64 {
|
||||
return Match(str1, str2, p.Clone().BonusThreshold(1.1)) // guaranteed no bonus
|
||||
}
|
||||
|
||||
// Match returns a similarity score adjusted by the same method as proposed by Winkler for
|
||||
// the Jaro distance - giving a bonus to string pairs that share a common prefix, only if their
|
||||
// similarity score is already over a threshold.
|
||||
//
|
||||
// The score is in the range of 0..1, with 1 meaning the strings are identical,
|
||||
// and 0 meaning they have nothing in common.
|
||||
//
|
||||
// A nil third argument uses the default cost of 1 for all three operations, maximum length of
|
||||
// common prefix to consider for bonus of 4, scaling factor of 0.1, and bonus threshold of 0.7.
|
||||
//
|
||||
// If a non-zero MinScore value is provided in the parameters, scores lower than it
|
||||
// will be returned as 0.
|
||||
func Match(str1, str2 string, p *Params) float64 {
|
||||
s1, s2 := []rune(str1), []rune(str2)
|
||||
l1, l2 := len(s1), len(s2)
|
||||
// two empty strings are identical; shortcut also avoids divByZero issues later on.
|
||||
if l1 == 0 && l2 == 0 {
|
||||
return 1
|
||||
}
|
||||
|
||||
if p == nil {
|
||||
p = defaultParams
|
||||
}
|
||||
|
||||
// a min over 1 can never be satisfied, so the score is 0.
|
||||
if p.minScore > 1 {
|
||||
return 0
|
||||
}
|
||||
|
||||
insCost, delCost, maxDist, max := p.insCost, p.delCost, 0, 0
|
||||
if l1 > l2 {
|
||||
l1, l2, insCost, delCost = l2, l1, delCost, insCost
|
||||
}
|
||||
|
||||
if p.subCost < delCost+insCost {
|
||||
maxDist = l1*p.subCost + (l2-l1)*insCost
|
||||
} else {
|
||||
maxDist = l1*delCost + l2*insCost
|
||||
}
|
||||
|
||||
// a zero min is always satisfied, so no need to set a max cost.
|
||||
if p.minScore > 0 {
|
||||
// if p.minScore is lower than p.bonusThreshold, we can use a simplified formula
|
||||
// for the max cost, because a sim score below min cannot receive a bonus.
|
||||
if p.minScore < p.bonusThreshold {
|
||||
// round down the max - a cost equal to a rounded up max would already be under min.
|
||||
max = int((1 - p.minScore) * float64(maxDist))
|
||||
} else {
|
||||
// p.minScore <= sim + p.bonusPrefix*p.bonusScale*(1-sim)
|
||||
// p.minScore <= (1-dist/maxDist) + p.bonusPrefix*p.bonusScale*(1-(1-dist/maxDist))
|
||||
// p.minScore <= 1 - dist/maxDist + p.bonusPrefix*p.bonusScale*dist/maxDist
|
||||
// 1 - p.minScore >= dist/maxDist - p.bonusPrefix*p.bonusScale*dist/maxDist
|
||||
// (1-p.minScore)*maxDist/(1-p.bonusPrefix*p.bonusScale) >= dist
|
||||
max = int((1 - p.minScore) * float64(maxDist) / (1 - float64(p.bonusPrefix)*p.bonusScale))
|
||||
}
|
||||
}
|
||||
|
||||
dist, pl, _ := Calculate(s1, s2, max, p.insCost, p.subCost, p.delCost)
|
||||
if max > 0 && dist > max {
|
||||
return 0
|
||||
}
|
||||
sim := 1 - float64(dist)/float64(maxDist)
|
||||
|
||||
if sim >= p.bonusThreshold && sim < 1 && p.bonusPrefix > 0 && p.bonusScale > 0 {
|
||||
if pl > p.bonusPrefix {
|
||||
pl = p.bonusPrefix
|
||||
}
|
||||
sim += float64(pl) * p.bonusScale * (1 - sim)
|
||||
}
|
||||
|
||||
if sim < p.minScore {
|
||||
return 0
|
||||
}
|
||||
|
||||
return sim
|
||||
}
|
||||
152
vendor/github.com/agext/levenshtein/params.go
generated
vendored
Normal file
152
vendor/github.com/agext/levenshtein/params.go
generated
vendored
Normal file
|
|
@ -0,0 +1,152 @@
|
|||
// Copyright 2016 ALRUX Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package levenshtein
|
||||
|
||||
// Params represents a set of parameter values for the various formulas involved
|
||||
// in the calculation of the Levenshtein string metrics.
|
||||
type Params struct {
|
||||
insCost int
|
||||
subCost int
|
||||
delCost int
|
||||
maxCost int
|
||||
minScore float64
|
||||
bonusPrefix int
|
||||
bonusScale float64
|
||||
bonusThreshold float64
|
||||
}
|
||||
|
||||
var (
|
||||
defaultParams = NewParams()
|
||||
)
|
||||
|
||||
// NewParams creates a new set of parameters and initializes it with the default values.
|
||||
func NewParams() *Params {
|
||||
return &Params{
|
||||
insCost: 1,
|
||||
subCost: 1,
|
||||
delCost: 1,
|
||||
maxCost: 0,
|
||||
minScore: 0,
|
||||
bonusPrefix: 4,
|
||||
bonusScale: .1,
|
||||
bonusThreshold: .7,
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns a pointer to a copy of the receiver parameter set, or of a new
|
||||
// default parameter set if the receiver is nil.
|
||||
func (p *Params) Clone() *Params {
|
||||
if p == nil {
|
||||
return NewParams()
|
||||
}
|
||||
return &Params{
|
||||
insCost: p.insCost,
|
||||
subCost: p.subCost,
|
||||
delCost: p.delCost,
|
||||
maxCost: p.maxCost,
|
||||
minScore: p.minScore,
|
||||
bonusPrefix: p.bonusPrefix,
|
||||
bonusScale: p.bonusScale,
|
||||
bonusThreshold: p.bonusThreshold,
|
||||
}
|
||||
}
|
||||
|
||||
// InsCost overrides the default value of 1 for the cost of insertion.
|
||||
// The new value must be zero or positive.
|
||||
func (p *Params) InsCost(v int) *Params {
|
||||
if v >= 0 {
|
||||
p.insCost = v
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// SubCost overrides the default value of 1 for the cost of substitution.
|
||||
// The new value must be zero or positive.
|
||||
func (p *Params) SubCost(v int) *Params {
|
||||
if v >= 0 {
|
||||
p.subCost = v
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// DelCost overrides the default value of 1 for the cost of deletion.
|
||||
// The new value must be zero or positive.
|
||||
func (p *Params) DelCost(v int) *Params {
|
||||
if v >= 0 {
|
||||
p.delCost = v
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// MaxCost overrides the default value of 0 (meaning unlimited) for the maximum cost.
|
||||
// The calculation of Distance() stops when the result is guaranteed to exceed
|
||||
// this maximum, returning a lower-bound rather than exact value.
|
||||
// The new value must be zero or positive.
|
||||
func (p *Params) MaxCost(v int) *Params {
|
||||
if v >= 0 {
|
||||
p.maxCost = v
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// MinScore overrides the default value of 0 for the minimum similarity score.
|
||||
// Scores below this threshold are returned as 0 by Similarity() and Match().
|
||||
// The new value must be zero or positive. Note that a minimum greater than 1
|
||||
// can never be satisfied, resulting in a score of 0 for any pair of strings.
|
||||
func (p *Params) MinScore(v float64) *Params {
|
||||
if v >= 0 {
|
||||
p.minScore = v
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// BonusPrefix overrides the default value for the maximum length of
|
||||
// common prefix to be considered for bonus by Match().
|
||||
// The new value must be zero or positive.
|
||||
func (p *Params) BonusPrefix(v int) *Params {
|
||||
if v >= 0 {
|
||||
p.bonusPrefix = v
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// BonusScale overrides the default value for the scaling factor used by Match()
|
||||
// in calculating the bonus.
|
||||
// The new value must be zero or positive. To guarantee that the similarity score
|
||||
// remains in the interval 0..1, this scaling factor is not allowed to exceed
|
||||
// 1 / BonusPrefix.
|
||||
func (p *Params) BonusScale(v float64) *Params {
|
||||
if v >= 0 {
|
||||
p.bonusScale = v
|
||||
}
|
||||
|
||||
// the bonus cannot exceed (1-sim), or the score may become greater than 1.
|
||||
if float64(p.bonusPrefix)*p.bonusScale > 1 {
|
||||
p.bonusScale = 1 / float64(p.bonusPrefix)
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// BonusThreshold overrides the default value for the minimum similarity score
|
||||
// for which Match() can assign a bonus.
|
||||
// The new value must be zero or positive. Note that a threshold greater than 1
|
||||
// effectively makes Match() become the equivalent of Similarity().
|
||||
func (p *Params) BonusThreshold(v float64) *Params {
|
||||
if v >= 0 {
|
||||
p.bonusThreshold = v
|
||||
}
|
||||
return p
|
||||
}
|
||||
102
vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go
generated
vendored
102
vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go
generated
vendored
|
|
@ -71,8 +71,13 @@ func Host(base *net.IPNet, num int) (net.IP, error) {
|
|||
if numUint64 > maxHostNum {
|
||||
return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num)
|
||||
}
|
||||
|
||||
return insertNumIntoIP(ip, num, 32), nil
|
||||
var bitlength int
|
||||
if ip.To4() != nil {
|
||||
bitlength = 32
|
||||
} else {
|
||||
bitlength = 128
|
||||
}
|
||||
return insertNumIntoIP(ip, num, bitlength), nil
|
||||
}
|
||||
|
||||
// AddressRange returns the first and last addresses in the given CIDR range.
|
||||
|
|
@ -110,3 +115,96 @@ func AddressCount(network *net.IPNet) uint64 {
|
|||
prefixLen, bits := network.Mask.Size()
|
||||
return 1 << (uint64(bits) - uint64(prefixLen))
|
||||
}
|
||||
|
||||
//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies
|
||||
//none of the subnets overlap and all subnets are in the supernet
|
||||
//it returns an error if any of those conditions are not satisfied
|
||||
func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error {
|
||||
firstLastIP := make([][]net.IP, len(subnets))
|
||||
for i, s := range subnets {
|
||||
first, last := AddressRange(s)
|
||||
firstLastIP[i] = []net.IP{first, last}
|
||||
}
|
||||
for i, s := range subnets {
|
||||
if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) {
|
||||
return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String())
|
||||
}
|
||||
for j := i + 1; j < len(subnets); j++ {
|
||||
first := firstLastIP[j][0]
|
||||
last := firstLastIP[j][1]
|
||||
if s.Contains(first) || s.Contains(last) {
|
||||
return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PreviousSubnet returns the subnet of the desired mask in the IP space
|
||||
// just lower than the start of IPNet provided. If the IP space rolls over
|
||||
// then the second return value is true
|
||||
func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) {
|
||||
startIP := checkIPv4(network.IP)
|
||||
previousIP := make(net.IP, len(startIP))
|
||||
copy(previousIP, startIP)
|
||||
cMask := net.CIDRMask(prefixLen, 8*len(previousIP))
|
||||
previousIP = Dec(previousIP)
|
||||
previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask}
|
||||
if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) {
|
||||
return previous, true
|
||||
}
|
||||
return previous, false
|
||||
}
|
||||
|
||||
// NextSubnet returns the next available subnet of the desired mask size
|
||||
// starting for the maximum IP of the offset subnet
|
||||
// If the IP exceeds the maxium IP then the second return value is true
|
||||
func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) {
|
||||
_, currentLast := AddressRange(network)
|
||||
mask := net.CIDRMask(prefixLen, 8*len(currentLast))
|
||||
currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask}
|
||||
_, last := AddressRange(currentSubnet)
|
||||
last = Inc(last)
|
||||
next := &net.IPNet{IP: last.Mask(mask), Mask: mask}
|
||||
if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) {
|
||||
return next, true
|
||||
}
|
||||
return next, false
|
||||
}
|
||||
|
||||
//Inc increases the IP by one this returns a new []byte for the IP
|
||||
func Inc(IP net.IP) net.IP {
|
||||
IP = checkIPv4(IP)
|
||||
incIP := make([]byte, len(IP))
|
||||
copy(incIP, IP)
|
||||
for j := len(incIP) - 1; j >= 0; j-- {
|
||||
incIP[j]++
|
||||
if incIP[j] > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return incIP
|
||||
}
|
||||
|
||||
//Dec decreases the IP by one this returns a new []byte for the IP
|
||||
func Dec(IP net.IP) net.IP {
|
||||
IP = checkIPv4(IP)
|
||||
decIP := make([]byte, len(IP))
|
||||
copy(decIP, IP)
|
||||
decIP = checkIPv4(decIP)
|
||||
for j := len(decIP) - 1; j >= 0; j-- {
|
||||
decIP[j]--
|
||||
if decIP[j] < 255 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return decIP
|
||||
}
|
||||
|
||||
func checkIPv4(ip net.IP) net.IP {
|
||||
// Go for some reason allocs IPv6len for IPv4 so we have to correct it
|
||||
if v4 := ip.To4(); v4 != nil {
|
||||
return v4
|
||||
}
|
||||
return ip
|
||||
}
|
||||
|
|
|
|||
95
vendor/github.com/apparentlymart/go-textseg/LICENSE
generated
vendored
Normal file
95
vendor/github.com/apparentlymart/go-textseg/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
Copyright (c) 2017 Martin Atkins
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
---------
|
||||
|
||||
Unicode table generation programs are under a separate copyright and license:
|
||||
|
||||
Copyright (c) 2014 Couchbase, Inc.
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
|
||||
except in compliance with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the
|
||||
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
either express or implied. See the License for the specific language governing permissions
|
||||
and limitations under the License.
|
||||
|
||||
---------
|
||||
|
||||
Grapheme break data is provided as part of the Unicode character database,
|
||||
copright 2016 Unicode, Inc, which is provided with the following license:
|
||||
|
||||
Unicode Data Files include all data files under the directories
|
||||
http://www.unicode.org/Public/, http://www.unicode.org/reports/,
|
||||
http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
|
||||
http://www.unicode.org/utility/trac/browser/.
|
||||
|
||||
Unicode Data Files do not include PDF online code charts under the
|
||||
directory http://www.unicode.org/Public/.
|
||||
|
||||
Software includes any source code published in the Unicode Standard
|
||||
or under the directories
|
||||
http://www.unicode.org/Public/, http://www.unicode.org/reports/,
|
||||
http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
|
||||
http://www.unicode.org/utility/trac/browser/.
|
||||
|
||||
NOTICE TO USER: Carefully read the following legal agreement.
|
||||
BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
|
||||
DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
|
||||
YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
|
||||
TERMS AND CONDITIONS OF THIS AGREEMENT.
|
||||
IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
|
||||
THE DATA FILES OR SOFTWARE.
|
||||
|
||||
COPYRIGHT AND PERMISSION NOTICE
|
||||
|
||||
Copyright © 1991-2017 Unicode, Inc. All rights reserved.
|
||||
Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of the Unicode data files and any associated documentation
|
||||
(the "Data Files") or Unicode software and any associated documentation
|
||||
(the "Software") to deal in the Data Files or Software
|
||||
without restriction, including without limitation the rights to use,
|
||||
copy, modify, merge, publish, distribute, and/or sell copies of
|
||||
the Data Files or Software, and to permit persons to whom the Data Files
|
||||
or Software are furnished to do so, provided that either
|
||||
(a) this copyright and permission notice appear with all copies
|
||||
of the Data Files or Software, or
|
||||
(b) this copyright and permission notice appear in associated
|
||||
Documentation.
|
||||
|
||||
THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
|
||||
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT OF THIRD PARTY RIGHTS.
|
||||
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
|
||||
NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
|
||||
DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
|
||||
DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
PERFORMANCE OF THE DATA FILES OR SOFTWARE.
|
||||
|
||||
Except as contained in this notice, the name of a copyright holder
|
||||
shall not be used in advertising or otherwise to promote the sale,
|
||||
use or other dealings in these Data Files or Software without prior
|
||||
written authorization of the copyright holder.
|
||||
30
vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go
generated
vendored
Normal file
30
vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go
generated
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
package textseg
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
)
|
||||
|
||||
// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of
|
||||
// all of the recognized tokens in the given buffer.
|
||||
func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(buf))
|
||||
scanner.Split(splitFunc)
|
||||
var ret [][]byte
|
||||
for scanner.Scan() {
|
||||
ret = append(ret, scanner.Bytes())
|
||||
}
|
||||
return ret, scanner.Err()
|
||||
}
|
||||
|
||||
// TokenCount is a utility that uses a bufio.SplitFunc to count the number of
|
||||
// recognized tokens in the given buffer.
|
||||
func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(buf))
|
||||
scanner.Split(splitFunc)
|
||||
var ret int
|
||||
for scanner.Scan() {
|
||||
ret++
|
||||
}
|
||||
return ret, scanner.Err()
|
||||
}
|
||||
7
vendor/github.com/apparentlymart/go-textseg/textseg/generate.go
generated
vendored
Normal file
7
vendor/github.com/apparentlymart/go-textseg/textseg/generate.go
generated
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
package textseg
|
||||
|
||||
//go:generate go run make_tables.go -output tables.go
|
||||
//go:generate go run make_test_tables.go -output tables_test.go
|
||||
//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,E_Base,E_Modifier,ZWJ,Glue_After_Zwj,E_Base_GAZ" -o grapheme_clusters_table.rl
|
||||
//go:generate ragel -Z grapheme_clusters.rl
|
||||
//go:generate gofmt -w grapheme_clusters.go
|
||||
5276
vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go
generated
vendored
Normal file
5276
vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
132
vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl
generated
vendored
Normal file
132
vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl
generated
vendored
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
package textseg
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Generated from grapheme_clusters.rl. DO NOT EDIT
|
||||
%%{
|
||||
# (except you are actually in grapheme_clusters.rl here, so edit away!)
|
||||
|
||||
machine graphclust;
|
||||
write data;
|
||||
}%%
|
||||
|
||||
var Error = errors.New("invalid UTF8 text")
|
||||
|
||||
// ScanGraphemeClusters is a split function for bufio.Scanner that splits
|
||||
// on grapheme cluster boundaries.
|
||||
func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) {
|
||||
if len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
// Ragel state
|
||||
cs := 0 // Current State
|
||||
p := 0 // "Pointer" into data
|
||||
pe := len(data) // End-of-data "pointer"
|
||||
ts := 0
|
||||
te := 0
|
||||
act := 0
|
||||
eof := pe
|
||||
|
||||
// Make Go compiler happy
|
||||
_ = ts
|
||||
_ = te
|
||||
_ = act
|
||||
_ = eof
|
||||
|
||||
startPos := 0
|
||||
endPos := 0
|
||||
|
||||
%%{
|
||||
include GraphemeCluster "grapheme_clusters_table.rl";
|
||||
|
||||
action start {
|
||||
startPos = p
|
||||
}
|
||||
|
||||
action end {
|
||||
endPos = p
|
||||
}
|
||||
|
||||
action emit {
|
||||
return endPos+1, data[startPos:endPos+1], nil
|
||||
}
|
||||
|
||||
ZWJGlue = ZWJ (Glue_After_Zwj | E_Base_GAZ Extend* E_Modifier?)?;
|
||||
AnyExtender = Extend | ZWJGlue | SpacingMark;
|
||||
Extension = AnyExtender*;
|
||||
ReplacementChar = (0xEF 0xBF 0xBD);
|
||||
|
||||
CRLFSeq = CR LF;
|
||||
ControlSeq = Control | ReplacementChar;
|
||||
HangulSeq = (
|
||||
L+ (((LV? V+ | LVT) T*)?|LV?) |
|
||||
LV V* T* |
|
||||
V+ T* |
|
||||
LVT T* |
|
||||
T+
|
||||
) Extension;
|
||||
EmojiSeq = (E_Base | E_Base_GAZ) Extend* E_Modifier? Extension;
|
||||
ZWJSeq = ZWJGlue Extension;
|
||||
EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension;
|
||||
|
||||
UTF8Cont = 0x80 .. 0xBF;
|
||||
AnyUTF8 = (
|
||||
0x00..0x7F |
|
||||
0xC0..0xDF . UTF8Cont |
|
||||
0xE0..0xEF . UTF8Cont . UTF8Cont |
|
||||
0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
|
||||
);
|
||||
|
||||
# OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension
|
||||
OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|E_Base|E_Base_GAZ|ZWJ|Regional_Indicator|Prepend)) Extension;
|
||||
|
||||
# PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break
|
||||
PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?;
|
||||
|
||||
CRLFTok = CRLFSeq >start @end;
|
||||
ControlTok = ControlSeq >start @end;
|
||||
HangulTok = HangulSeq >start @end;
|
||||
EmojiTok = EmojiSeq >start @end;
|
||||
ZWJTok = ZWJSeq >start @end;
|
||||
EmojiFlagTok = EmojiFlagSeq >start @end;
|
||||
OtherTok = OtherSeq >start @end;
|
||||
PrependTok = PrependSeq >start @end;
|
||||
|
||||
main := |*
|
||||
CRLFTok => emit;
|
||||
ControlTok => emit;
|
||||
HangulTok => emit;
|
||||
EmojiTok => emit;
|
||||
ZWJTok => emit;
|
||||
EmojiFlagTok => emit;
|
||||
PrependTok => emit;
|
||||
OtherTok => emit;
|
||||
|
||||
# any single valid UTF-8 character would also be valid per spec,
|
||||
# but we'll handle that separately after the loop so we can deal
|
||||
# with requesting more bytes if we're not at EOF.
|
||||
*|;
|
||||
|
||||
write init;
|
||||
write exec;
|
||||
}%%
|
||||
|
||||
// If we fall out here then we were unable to complete a sequence.
|
||||
// If we weren't able to complete a sequence then either we've
|
||||
// reached the end of a partial buffer (so there's more data to come)
|
||||
// or we have an isolated symbol that would normally be part of a
|
||||
// grapheme cluster but has appeared in isolation here.
|
||||
|
||||
if !atEOF {
|
||||
// Request more
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
// Just take the first UTF-8 sequence and return that.
|
||||
_, seqLen := utf8.DecodeRune(data)
|
||||
return seqLen, data[:seqLen], nil
|
||||
}
|
||||
1583
vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl
generated
vendored
Normal file
1583
vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
307
vendor/github.com/apparentlymart/go-textseg/textseg/make_tables.go
generated
vendored
Normal file
307
vendor/github.com/apparentlymart/go-textseg/textseg/make_tables.go
generated
vendored
Normal file
|
|
@ -0,0 +1,307 @@
|
|||
// Copyright (c) 2014 Couchbase, Inc.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
|
||||
// except in compliance with the License. You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed under the
|
||||
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
// either express or implied. See the License for the specific language governing permissions
|
||||
// and limitations under the License.
|
||||
|
||||
// Modified by Martin Atkins to serve the needs of package textseg.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
var url = flag.String("url",
|
||||
"http://www.unicode.org/Public/"+unicode.Version+"/ucd/auxiliary/",
|
||||
"URL of Unicode database directory")
|
||||
var verbose = flag.Bool("verbose",
|
||||
false,
|
||||
"write data to stdout as it is parsed")
|
||||
var localFiles = flag.Bool("local",
|
||||
false,
|
||||
"data files have been copied to the current directory; for debugging only")
|
||||
var outputFile = flag.String("output",
|
||||
"",
|
||||
"output file for generated tables; default stdout")
|
||||
|
||||
var output *bufio.Writer
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
setupOutput()
|
||||
|
||||
graphemePropertyRanges := make(map[string]*unicode.RangeTable)
|
||||
loadUnicodeData("GraphemeBreakProperty.txt", graphemePropertyRanges)
|
||||
wordPropertyRanges := make(map[string]*unicode.RangeTable)
|
||||
loadUnicodeData("WordBreakProperty.txt", wordPropertyRanges)
|
||||
sentencePropertyRanges := make(map[string]*unicode.RangeTable)
|
||||
loadUnicodeData("SentenceBreakProperty.txt", sentencePropertyRanges)
|
||||
|
||||
fmt.Fprintf(output, fileHeader, *url)
|
||||
generateTables("Grapheme", graphemePropertyRanges)
|
||||
generateTables("Word", wordPropertyRanges)
|
||||
generateTables("Sentence", sentencePropertyRanges)
|
||||
|
||||
flushOutput()
|
||||
}
|
||||
|
||||
// WordBreakProperty.txt has the form:
|
||||
// 05F0..05F2 ; Hebrew_Letter # Lo [3] HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW LIGATURE YIDDISH DOUBLE YOD
|
||||
// FB1D ; Hebrew_Letter # Lo HEBREW LETTER YOD WITH HIRIQ
|
||||
func openReader(file string) (input io.ReadCloser) {
|
||||
if *localFiles {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
input = f
|
||||
} else {
|
||||
path := *url + file
|
||||
resp, err := http.Get(path)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
log.Fatal("bad GET status for "+file, resp.Status)
|
||||
}
|
||||
input = resp.Body
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func loadUnicodeData(filename string, propertyRanges map[string]*unicode.RangeTable) {
|
||||
f := openReader(filename)
|
||||
defer f.Close()
|
||||
bufioReader := bufio.NewReader(f)
|
||||
line, err := bufioReader.ReadString('\n')
|
||||
for err == nil {
|
||||
parseLine(line, propertyRanges)
|
||||
line, err = bufioReader.ReadString('\n')
|
||||
}
|
||||
// if the err was EOF still need to process last value
|
||||
if err == io.EOF {
|
||||
parseLine(line, propertyRanges)
|
||||
}
|
||||
}
|
||||
|
||||
const comment = "#"
|
||||
const sep = ";"
|
||||
const rnge = ".."
|
||||
|
||||
func parseLine(line string, propertyRanges map[string]*unicode.RangeTable) {
|
||||
if strings.HasPrefix(line, comment) {
|
||||
return
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) == 0 {
|
||||
return
|
||||
}
|
||||
commentStart := strings.Index(line, comment)
|
||||
if commentStart > 0 {
|
||||
line = line[0:commentStart]
|
||||
}
|
||||
pieces := strings.Split(line, sep)
|
||||
if len(pieces) != 2 {
|
||||
log.Printf("unexpected %d pieces in %s", len(pieces), line)
|
||||
return
|
||||
}
|
||||
|
||||
propertyName := strings.TrimSpace(pieces[1])
|
||||
|
||||
rangeTable, ok := propertyRanges[propertyName]
|
||||
if !ok {
|
||||
rangeTable = &unicode.RangeTable{
|
||||
LatinOffset: 0,
|
||||
}
|
||||
propertyRanges[propertyName] = rangeTable
|
||||
}
|
||||
|
||||
codepointRange := strings.TrimSpace(pieces[0])
|
||||
rngeIndex := strings.Index(codepointRange, rnge)
|
||||
|
||||
if rngeIndex < 0 {
|
||||
// single codepoint, not range
|
||||
codepointInt, err := strconv.ParseUint(codepointRange, 16, 64)
|
||||
if err != nil {
|
||||
log.Printf("error parsing int: %v", err)
|
||||
return
|
||||
}
|
||||
if codepointInt < 0x10000 {
|
||||
r16 := unicode.Range16{
|
||||
Lo: uint16(codepointInt),
|
||||
Hi: uint16(codepointInt),
|
||||
Stride: 1,
|
||||
}
|
||||
addR16ToTable(rangeTable, r16)
|
||||
} else {
|
||||
r32 := unicode.Range32{
|
||||
Lo: uint32(codepointInt),
|
||||
Hi: uint32(codepointInt),
|
||||
Stride: 1,
|
||||
}
|
||||
addR32ToTable(rangeTable, r32)
|
||||
}
|
||||
} else {
|
||||
rngeStart := codepointRange[0:rngeIndex]
|
||||
rngeEnd := codepointRange[rngeIndex+2:]
|
||||
rngeStartInt, err := strconv.ParseUint(rngeStart, 16, 64)
|
||||
if err != nil {
|
||||
log.Printf("error parsing int: %v", err)
|
||||
return
|
||||
}
|
||||
rngeEndInt, err := strconv.ParseUint(rngeEnd, 16, 64)
|
||||
if err != nil {
|
||||
log.Printf("error parsing int: %v", err)
|
||||
return
|
||||
}
|
||||
if rngeStartInt < 0x10000 && rngeEndInt < 0x10000 {
|
||||
r16 := unicode.Range16{
|
||||
Lo: uint16(rngeStartInt),
|
||||
Hi: uint16(rngeEndInt),
|
||||
Stride: 1,
|
||||
}
|
||||
addR16ToTable(rangeTable, r16)
|
||||
} else if rngeStartInt >= 0x10000 && rngeEndInt >= 0x10000 {
|
||||
r32 := unicode.Range32{
|
||||
Lo: uint32(rngeStartInt),
|
||||
Hi: uint32(rngeEndInt),
|
||||
Stride: 1,
|
||||
}
|
||||
addR32ToTable(rangeTable, r32)
|
||||
} else {
|
||||
log.Printf("unexpected range")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addR16ToTable(r *unicode.RangeTable, r16 unicode.Range16) {
|
||||
if r.R16 == nil {
|
||||
r.R16 = make([]unicode.Range16, 0, 1)
|
||||
}
|
||||
r.R16 = append(r.R16, r16)
|
||||
if r16.Hi <= unicode.MaxLatin1 {
|
||||
r.LatinOffset++
|
||||
}
|
||||
}
|
||||
|
||||
func addR32ToTable(r *unicode.RangeTable, r32 unicode.Range32) {
|
||||
if r.R32 == nil {
|
||||
r.R32 = make([]unicode.Range32, 0, 1)
|
||||
}
|
||||
r.R32 = append(r.R32, r32)
|
||||
}
|
||||
|
||||
func generateTables(prefix string, propertyRanges map[string]*unicode.RangeTable) {
|
||||
prNames := make([]string, 0, len(propertyRanges))
|
||||
for k := range propertyRanges {
|
||||
prNames = append(prNames, k)
|
||||
}
|
||||
sort.Strings(prNames)
|
||||
for _, key := range prNames {
|
||||
rt := propertyRanges[key]
|
||||
fmt.Fprintf(output, "var _%s%s = %s\n", prefix, key, generateRangeTable(rt))
|
||||
}
|
||||
fmt.Fprintf(output, "type _%sRuneRange unicode.RangeTable\n", prefix)
|
||||
|
||||
fmt.Fprintf(output, "func _%sRuneType(r rune) *_%sRuneRange {\n", prefix, prefix)
|
||||
fmt.Fprintf(output, "\tswitch {\n")
|
||||
for _, key := range prNames {
|
||||
fmt.Fprintf(output, "\tcase unicode.Is(_%s%s, r):\n\t\treturn (*_%sRuneRange)(_%s%s)\n", prefix, key, prefix, prefix, key)
|
||||
}
|
||||
fmt.Fprintf(output, "\tdefault:\n\t\treturn nil\n")
|
||||
fmt.Fprintf(output, "\t}\n")
|
||||
fmt.Fprintf(output, "}\n")
|
||||
|
||||
fmt.Fprintf(output, "func (rng *_%sRuneRange) String() string {\n", prefix)
|
||||
fmt.Fprintf(output, "\tswitch (*unicode.RangeTable)(rng) {\n")
|
||||
for _, key := range prNames {
|
||||
fmt.Fprintf(output, "\tcase _%s%s:\n\t\treturn %q\n", prefix, key, key)
|
||||
}
|
||||
fmt.Fprintf(output, "\tdefault:\n\t\treturn \"Other\"\n")
|
||||
fmt.Fprintf(output, "\t}\n")
|
||||
fmt.Fprintf(output, "}\n")
|
||||
}
|
||||
|
||||
func generateRangeTable(rt *unicode.RangeTable) string {
|
||||
rv := "&unicode.RangeTable{\n"
|
||||
if rt.R16 != nil {
|
||||
rv += "\tR16: []unicode.Range16{\n"
|
||||
for _, r16 := range rt.R16 {
|
||||
rv += fmt.Sprintf("\t\t%#v,\n", r16)
|
||||
}
|
||||
rv += "\t},\n"
|
||||
}
|
||||
if rt.R32 != nil {
|
||||
rv += "\tR32: []unicode.Range32{\n"
|
||||
for _, r32 := range rt.R32 {
|
||||
rv += fmt.Sprintf("\t\t%#v,\n", r32)
|
||||
}
|
||||
rv += "\t},\n"
|
||||
}
|
||||
rv += fmt.Sprintf("\t\tLatinOffset: %d,\n", rt.LatinOffset)
|
||||
rv += "}\n"
|
||||
return rv
|
||||
}
|
||||
|
||||
const fileHeader = `// Generated by running
|
||||
// maketables --url=%s
|
||||
// DO NOT EDIT
|
||||
|
||||
package textseg
|
||||
|
||||
import(
|
||||
"unicode"
|
||||
)
|
||||
`
|
||||
|
||||
func setupOutput() {
|
||||
output = bufio.NewWriter(startGofmt())
|
||||
}
|
||||
|
||||
// startGofmt connects output to a gofmt process if -output is set.
|
||||
func startGofmt() io.Writer {
|
||||
if *outputFile == "" {
|
||||
return os.Stdout
|
||||
}
|
||||
stdout, err := os.Create(*outputFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Pipe output to gofmt.
|
||||
gofmt := exec.Command("gofmt")
|
||||
fd, err := gofmt.StdinPipe()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
gofmt.Stdout = stdout
|
||||
gofmt.Stderr = os.Stderr
|
||||
err = gofmt.Start()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return fd
|
||||
}
|
||||
|
||||
func flushOutput() {
|
||||
err := output.Flush()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
212
vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go
generated
vendored
Normal file
212
vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go
generated
vendored
Normal file
|
|
@ -0,0 +1,212 @@
|
|||
// Copyright (c) 2014 Couchbase, Inc.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
|
||||
// except in compliance with the License. You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed under the
|
||||
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
// either express or implied. See the License for the specific language governing permissions
|
||||
// and limitations under the License.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
var url = flag.String("url",
|
||||
"http://www.unicode.org/Public/"+unicode.Version+"/ucd/auxiliary/",
|
||||
"URL of Unicode database directory")
|
||||
var verbose = flag.Bool("verbose",
|
||||
false,
|
||||
"write data to stdout as it is parsed")
|
||||
var localFiles = flag.Bool("local",
|
||||
false,
|
||||
"data files have been copied to the current directory; for debugging only")
|
||||
|
||||
var outputFile = flag.String("output",
|
||||
"",
|
||||
"output file for generated tables; default stdout")
|
||||
|
||||
var output *bufio.Writer
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
setupOutput()
|
||||
|
||||
graphemeTests := make([]test, 0)
|
||||
graphemeTests = loadUnicodeData("GraphemeBreakTest.txt", graphemeTests)
|
||||
wordTests := make([]test, 0)
|
||||
wordTests = loadUnicodeData("WordBreakTest.txt", wordTests)
|
||||
sentenceTests := make([]test, 0)
|
||||
sentenceTests = loadUnicodeData("SentenceBreakTest.txt", sentenceTests)
|
||||
|
||||
fmt.Fprintf(output, fileHeader, *url)
|
||||
generateTestTables("Grapheme", graphemeTests)
|
||||
generateTestTables("Word", wordTests)
|
||||
generateTestTables("Sentence", sentenceTests)
|
||||
|
||||
flushOutput()
|
||||
}
|
||||
|
||||
// WordBreakProperty.txt has the form:
|
||||
// 05F0..05F2 ; Hebrew_Letter # Lo [3] HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW LIGATURE YIDDISH DOUBLE YOD
|
||||
// FB1D ; Hebrew_Letter # Lo HEBREW LETTER YOD WITH HIRIQ
|
||||
func openReader(file string) (input io.ReadCloser) {
|
||||
if *localFiles {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
input = f
|
||||
} else {
|
||||
path := *url + file
|
||||
resp, err := http.Get(path)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
log.Fatal("bad GET status for "+file, resp.Status)
|
||||
}
|
||||
input = resp.Body
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func loadUnicodeData(filename string, tests []test) []test {
|
||||
f := openReader(filename)
|
||||
defer f.Close()
|
||||
bufioReader := bufio.NewReader(f)
|
||||
line, err := bufioReader.ReadString('\n')
|
||||
for err == nil {
|
||||
tests = parseLine(line, tests)
|
||||
line, err = bufioReader.ReadString('\n')
|
||||
}
|
||||
// if the err was EOF still need to process last value
|
||||
if err == io.EOF {
|
||||
tests = parseLine(line, tests)
|
||||
}
|
||||
return tests
|
||||
}
|
||||
|
||||
const comment = "#"
|
||||
const brk = "÷"
|
||||
const nbrk = "×"
|
||||
|
||||
type test [][]byte
|
||||
|
||||
func parseLine(line string, tests []test) []test {
|
||||
if strings.HasPrefix(line, comment) {
|
||||
return tests
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) == 0 {
|
||||
return tests
|
||||
}
|
||||
commentStart := strings.Index(line, comment)
|
||||
if commentStart > 0 {
|
||||
line = line[0:commentStart]
|
||||
}
|
||||
pieces := strings.Split(line, brk)
|
||||
t := make(test, 0)
|
||||
for _, piece := range pieces {
|
||||
piece = strings.TrimSpace(piece)
|
||||
if len(piece) > 0 {
|
||||
codePoints := strings.Split(piece, nbrk)
|
||||
word := ""
|
||||
for _, codePoint := range codePoints {
|
||||
codePoint = strings.TrimSpace(codePoint)
|
||||
r, err := strconv.ParseInt(codePoint, 16, 64)
|
||||
if err != nil {
|
||||
log.Printf("err: %v for '%s'", err, string(r))
|
||||
return tests
|
||||
}
|
||||
|
||||
word += string(r)
|
||||
}
|
||||
t = append(t, []byte(word))
|
||||
}
|
||||
}
|
||||
tests = append(tests, t)
|
||||
return tests
|
||||
}
|
||||
|
||||
func generateTestTables(prefix string, tests []test) {
|
||||
fmt.Fprintf(output, testHeader, prefix)
|
||||
for _, t := range tests {
|
||||
fmt.Fprintf(output, "\t\t{\n")
|
||||
fmt.Fprintf(output, "\t\t\tinput: %#v,\n", bytes.Join(t, []byte{}))
|
||||
fmt.Fprintf(output, "\t\t\toutput: %s,\n", generateTest(t))
|
||||
fmt.Fprintf(output, "\t\t},\n")
|
||||
}
|
||||
fmt.Fprintf(output, "}\n")
|
||||
}
|
||||
|
||||
func generateTest(t test) string {
|
||||
rv := "[][]byte{"
|
||||
for _, te := range t {
|
||||
rv += fmt.Sprintf("%#v,", te)
|
||||
}
|
||||
rv += "}"
|
||||
return rv
|
||||
}
|
||||
|
||||
const fileHeader = `// Generated by running
|
||||
// maketesttables --url=%s
|
||||
// DO NOT EDIT
|
||||
|
||||
package textseg
|
||||
`
|
||||
|
||||
const testHeader = `var unicode%sTests = []struct {
|
||||
input []byte
|
||||
output [][]byte
|
||||
}{
|
||||
`
|
||||
|
||||
func setupOutput() {
|
||||
output = bufio.NewWriter(startGofmt())
|
||||
}
|
||||
|
||||
// startGofmt connects output to a gofmt process if -output is set.
|
||||
func startGofmt() io.Writer {
|
||||
if *outputFile == "" {
|
||||
return os.Stdout
|
||||
}
|
||||
stdout, err := os.Create(*outputFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Pipe output to gofmt.
|
||||
gofmt := exec.Command("gofmt")
|
||||
fd, err := gofmt.StdinPipe()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
gofmt.Stdout = stdout
|
||||
gofmt.Stderr = os.Stderr
|
||||
err = gofmt.Start()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return fd
|
||||
}
|
||||
|
||||
func flushOutput() {
|
||||
err := output.Flush()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
5700
vendor/github.com/apparentlymart/go-textseg/textseg/tables.go
generated
vendored
Normal file
5700
vendor/github.com/apparentlymart/go-textseg/textseg/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
335
vendor/github.com/apparentlymart/go-textseg/textseg/unicode2ragel.rb
generated
vendored
Normal file
335
vendor/github.com/apparentlymart/go-textseg/textseg/unicode2ragel.rb
generated
vendored
Normal file
|
|
@ -0,0 +1,335 @@
|
|||
#!/usr/bin/env ruby
|
||||
#
|
||||
# This scripted has been updated to accept more command-line arguments:
|
||||
#
|
||||
# -u, --url URL to process
|
||||
# -m, --machine Machine name
|
||||
# -p, --properties Properties to add to the machine
|
||||
# -o, --output Write output to file
|
||||
#
|
||||
# Updated by: Marty Schoch <marty.schoch@gmail.com>
|
||||
#
|
||||
# This script uses the unicode spec to generate a Ragel state machine
|
||||
# that recognizes unicode alphanumeric characters. It generates 5
|
||||
# character classes: uupper, ulower, ualpha, udigit, and ualnum.
|
||||
# Currently supported encodings are UTF-8 [default] and UCS-4.
|
||||
#
|
||||
# Usage: unicode2ragel.rb [options]
|
||||
# -e, --encoding [ucs4 | utf8] Data encoding
|
||||
# -h, --help Show this message
|
||||
#
|
||||
# This script was originally written as part of the Ferret search
|
||||
# engine library.
|
||||
#
|
||||
# Author: Rakan El-Khalil <rakan@well.com>
|
||||
|
||||
require 'optparse'
|
||||
require 'open-uri'
|
||||
|
||||
ENCODINGS = [ :utf8, :ucs4 ]
|
||||
ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" }
|
||||
DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt"
|
||||
DEFAULT_MACHINE_NAME= "WChar"
|
||||
|
||||
###
|
||||
# Display vars & default option
|
||||
|
||||
TOTAL_WIDTH = 80
|
||||
RANGE_WIDTH = 23
|
||||
@encoding = :utf8
|
||||
@chart_url = DEFAULT_CHART_URL
|
||||
machine_name = DEFAULT_MACHINE_NAME
|
||||
properties = []
|
||||
@output = $stdout
|
||||
|
||||
###
|
||||
# Option parsing
|
||||
|
||||
cli_opts = OptionParser.new do |opts|
|
||||
opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o|
|
||||
@encoding = o.downcase.to_sym
|
||||
end
|
||||
opts.on("-h", "--help", "Show this message") do
|
||||
puts opts
|
||||
exit
|
||||
end
|
||||
opts.on("-u", "--url URL", "URL to process") do |o|
|
||||
@chart_url = o
|
||||
end
|
||||
opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o|
|
||||
machine_name = o
|
||||
end
|
||||
opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o|
|
||||
properties = o
|
||||
end
|
||||
opts.on("-o", "--output FILE", "output file") do |o|
|
||||
@output = File.new(o, "w+")
|
||||
end
|
||||
end
|
||||
|
||||
cli_opts.parse(ARGV)
|
||||
unless ENCODINGS.member? @encoding
|
||||
puts "Invalid encoding: #{@encoding}"
|
||||
puts cli_opts
|
||||
exit
|
||||
end
|
||||
|
||||
##
|
||||
# Downloads the document at url and yields every alpha line's hex
|
||||
# range and description.
|
||||
|
||||
def each_alpha( url, property )
|
||||
open( url ) do |file|
|
||||
file.each_line do |line|
|
||||
next if line =~ /^#/;
|
||||
next if line !~ /; #{property} #/;
|
||||
|
||||
range, description = line.split(/;/)
|
||||
range.strip!
|
||||
description.gsub!(/.*#/, '').strip!
|
||||
|
||||
if range =~ /\.\./
|
||||
start, stop = range.split '..'
|
||||
else start = stop = range
|
||||
end
|
||||
|
||||
yield start.hex .. stop.hex, description
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
###
|
||||
# Formats to hex at minimum width
|
||||
|
||||
def to_hex( n )
|
||||
r = "%0X" % n
|
||||
r = "0#{r}" unless (r.length % 2).zero?
|
||||
r
|
||||
end
|
||||
|
||||
###
|
||||
# UCS4 is just a straight hex conversion of the unicode codepoint.
|
||||
|
||||
def to_ucs4( range )
|
||||
rangestr = "0x" + to_hex(range.begin)
|
||||
rangestr << "..0x" + to_hex(range.end) if range.begin != range.end
|
||||
[ rangestr ]
|
||||
end
|
||||
|
||||
##
|
||||
# 0x00 - 0x7f -> 0zzzzzzz[7]
|
||||
# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6]
|
||||
# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6]
|
||||
# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6]
|
||||
|
||||
UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff]
|
||||
|
||||
def to_utf8_enc( n )
|
||||
r = 0
|
||||
if n <= 0x7f
|
||||
r = n
|
||||
elsif n <= 0x7ff
|
||||
y = 0xc0 | (n >> 6)
|
||||
z = 0x80 | (n & 0x3f)
|
||||
r = y << 8 | z
|
||||
elsif n <= 0xffff
|
||||
x = 0xe0 | (n >> 12)
|
||||
y = 0x80 | (n >> 6) & 0x3f
|
||||
z = 0x80 | n & 0x3f
|
||||
r = x << 16 | y << 8 | z
|
||||
elsif n <= 0x10ffff
|
||||
w = 0xf0 | (n >> 18)
|
||||
x = 0x80 | (n >> 12) & 0x3f
|
||||
y = 0x80 | (n >> 6) & 0x3f
|
||||
z = 0x80 | n & 0x3f
|
||||
r = w << 24 | x << 16 | y << 8 | z
|
||||
end
|
||||
|
||||
to_hex(r)
|
||||
end
|
||||
|
||||
def from_utf8_enc( n )
|
||||
n = n.hex
|
||||
r = 0
|
||||
if n <= 0x7f
|
||||
r = n
|
||||
elsif n <= 0xdfff
|
||||
y = (n >> 8) & 0x1f
|
||||
z = n & 0x3f
|
||||
r = y << 6 | z
|
||||
elsif n <= 0xefffff
|
||||
x = (n >> 16) & 0x0f
|
||||
y = (n >> 8) & 0x3f
|
||||
z = n & 0x3f
|
||||
r = x << 10 | y << 6 | z
|
||||
elsif n <= 0xf7ffffff
|
||||
w = (n >> 24) & 0x07
|
||||
x = (n >> 16) & 0x3f
|
||||
y = (n >> 8) & 0x3f
|
||||
z = n & 0x3f
|
||||
r = w << 18 | x << 12 | y << 6 | z
|
||||
end
|
||||
r
|
||||
end
|
||||
|
||||
###
|
||||
# Given a range, splits it up into ranges that can be continuously
|
||||
# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff]
|
||||
# This is not strictly needed since the current [5.1] unicode standard
|
||||
# doesn't have ranges that straddle utf8 boundaries. This is included
|
||||
# for completeness as there is no telling if that will ever change.
|
||||
|
||||
def utf8_ranges( range )
|
||||
ranges = []
|
||||
UTF8_BOUNDARIES.each do |max|
|
||||
if range.begin <= max
|
||||
if range.end <= max
|
||||
ranges << range
|
||||
return ranges
|
||||
end
|
||||
|
||||
ranges << (range.begin .. max)
|
||||
range = (max + 1) .. range.end
|
||||
end
|
||||
end
|
||||
ranges
|
||||
end
|
||||
|
||||
def build_range( start, stop )
|
||||
size = start.size/2
|
||||
left = size - 1
|
||||
return [""] if size < 1
|
||||
|
||||
a = start[0..1]
|
||||
b = stop[0..1]
|
||||
|
||||
###
|
||||
# Shared prefix
|
||||
|
||||
if a == b
|
||||
return build_range(start[2..-1], stop[2..-1]).map do |elt|
|
||||
"0x#{a} " + elt
|
||||
end
|
||||
end
|
||||
|
||||
###
|
||||
# Unshared prefix, end of run
|
||||
|
||||
return ["0x#{a}..0x#{b} "] if left.zero?
|
||||
|
||||
###
|
||||
# Unshared prefix, not end of run
|
||||
# Range can be 0x123456..0x56789A
|
||||
# Which is equivalent to:
|
||||
# 0x123456 .. 0x12FFFF
|
||||
# 0x130000 .. 0x55FFFF
|
||||
# 0x560000 .. 0x56789A
|
||||
|
||||
ret = []
|
||||
ret << build_range(start, a + "FF" * left)
|
||||
|
||||
###
|
||||
# Only generate middle range if need be.
|
||||
|
||||
if a.hex+1 != b.hex
|
||||
max = to_hex(b.hex - 1)
|
||||
max = "FF" if b == "FF"
|
||||
ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left
|
||||
end
|
||||
|
||||
###
|
||||
# Don't generate last range if it is covered by first range
|
||||
|
||||
ret << build_range(b + "00" * left, stop) unless b == "FF"
|
||||
ret.flatten!
|
||||
end
|
||||
|
||||
def to_utf8( range )
|
||||
utf8_ranges( range ).map do |r|
|
||||
begin_enc = to_utf8_enc(r.begin)
|
||||
end_enc = to_utf8_enc(r.end)
|
||||
build_range begin_enc, end_enc
|
||||
end.flatten!
|
||||
end
|
||||
|
||||
##
|
||||
# Perform a 3-way comparison of the number of codepoints advertised by
|
||||
# the unicode spec for the given range, the originally parsed range,
|
||||
# and the resulting utf8 encoded range.
|
||||
|
||||
def count_codepoints( code )
|
||||
code.split(' ').inject(1) do |acc, elt|
|
||||
if elt =~ /0x(.+)\.\.0x(.+)/
|
||||
if @encoding == :utf8
|
||||
acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1)
|
||||
else
|
||||
acc * ($2.hex - $1.hex + 1)
|
||||
end
|
||||
else
|
||||
acc
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def is_valid?( range, desc, codes )
|
||||
spec_count = 1
|
||||
spec_count = $1.to_i if desc =~ /\[(\d+)\]/
|
||||
range_count = range.end - range.begin + 1
|
||||
|
||||
sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) }
|
||||
sum == spec_count and sum == range_count
|
||||
end
|
||||
|
||||
##
|
||||
# Generate the state maching to stdout
|
||||
|
||||
def generate_machine( name, property )
|
||||
pipe = " "
|
||||
@output.puts " #{name} = "
|
||||
each_alpha( @chart_url, property ) do |range, desc|
|
||||
|
||||
codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range)
|
||||
|
||||
#raise "Invalid encoding of range #{range}: #{codes.inspect}" unless
|
||||
# is_valid? range, desc, codes
|
||||
|
||||
range_width = codes.map { |a| a.size }.max
|
||||
range_width = RANGE_WIDTH if range_width < RANGE_WIDTH
|
||||
|
||||
desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11
|
||||
desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH
|
||||
|
||||
if desc.size > desc_width
|
||||
desc = desc[0..desc_width - 4] + "..."
|
||||
end
|
||||
|
||||
codes.each_with_index do |r, idx|
|
||||
desc = "" unless idx.zero?
|
||||
code = "%-#{range_width}s" % r
|
||||
@output.puts " #{pipe} #{code} ##{desc}"
|
||||
pipe = "|"
|
||||
end
|
||||
end
|
||||
@output.puts " ;"
|
||||
@output.puts ""
|
||||
end
|
||||
|
||||
@output.puts <<EOF
|
||||
# The following Ragel file was autogenerated with #{$0}
|
||||
# from: #{@chart_url}
|
||||
#
|
||||
# It defines #{properties}.
|
||||
#
|
||||
# To use this, make sure that your alphtype is set to #{ALPHTYPES[@encoding]},
|
||||
# and that your input is in #{@encoding}.
|
||||
|
||||
%%{
|
||||
machine #{machine_name};
|
||||
|
||||
EOF
|
||||
|
||||
properties.each { |x| generate_machine( x, x ) }
|
||||
|
||||
@output.puts <<EOF
|
||||
}%%
|
||||
EOF
|
||||
19
vendor/github.com/apparentlymart/go-textseg/textseg/utf8_seqs.go
generated
vendored
Normal file
19
vendor/github.com/apparentlymart/go-textseg/textseg/utf8_seqs.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
package textseg
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// ScanGraphemeClusters is a split function for bufio.Scanner that splits
|
||||
// on UTF8 sequence boundaries.
|
||||
//
|
||||
// This is included largely for completeness, since this behavior is already
|
||||
// built in to Go when ranging over a string.
|
||||
func ScanUTF8Sequences(data []byte, atEOF bool) (int, []byte, error) {
|
||||
if len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
r, seqLen := utf8.DecodeRune(data)
|
||||
if r == utf8.RuneError && !atEOF {
|
||||
return 0, nil, nil
|
||||
}
|
||||
return seqLen, data[:seqLen], nil
|
||||
}
|
||||
22
vendor/github.com/armon/go-radix/.gitignore
generated
vendored
Normal file
22
vendor/github.com/armon/go-radix/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
3
vendor/github.com/armon/go-radix/.travis.yml
generated
vendored
Normal file
3
vendor/github.com/armon/go-radix/.travis.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
language: go
|
||||
go:
|
||||
- tip
|
||||
20
vendor/github.com/armon/go-radix/LICENSE
generated
vendored
Normal file
20
vendor/github.com/armon/go-radix/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Armon Dadgar
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
38
vendor/github.com/armon/go-radix/README.md
generated
vendored
Normal file
38
vendor/github.com/armon/go-radix/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
go-radix [](https://travis-ci.org/armon/go-radix)
|
||||
=========
|
||||
|
||||
Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
|
||||
The package only provides a single `Tree` implementation, optimized for sparse nodes.
|
||||
|
||||
As a radix tree, it provides the following:
|
||||
* O(k) operations. In many cases, this can be faster than a hash table since
|
||||
the hash function is an O(k) operation, and hash tables have very poor cache locality.
|
||||
* Minimum / Maximum value lookups
|
||||
* Ordered iteration
|
||||
|
||||
For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix).
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix).
|
||||
|
||||
Example
|
||||
=======
|
||||
|
||||
Below is a simple example of usage
|
||||
|
||||
```go
|
||||
// Create a tree
|
||||
r := radix.New()
|
||||
r.Insert("foo", 1)
|
||||
r.Insert("bar", 2)
|
||||
r.Insert("foobar", 2)
|
||||
|
||||
// Find the longest prefix match
|
||||
m, _, _ := r.LongestPrefix("foozip")
|
||||
if m != "foo" {
|
||||
panic("should be foo")
|
||||
}
|
||||
```
|
||||
|
||||
496
vendor/github.com/armon/go-radix/radix.go
generated
vendored
Normal file
496
vendor/github.com/armon/go-radix/radix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,496 @@
|
|||
package radix
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// WalkFn is used when walking the tree. Takes a
|
||||
// key and value, returning if iteration should
|
||||
// be terminated.
|
||||
type WalkFn func(s string, v interface{}) bool
|
||||
|
||||
// leafNode is used to represent a value
|
||||
type leafNode struct {
|
||||
key string
|
||||
val interface{}
|
||||
}
|
||||
|
||||
// edge is used to represent an edge node
|
||||
type edge struct {
|
||||
label byte
|
||||
node *node
|
||||
}
|
||||
|
||||
type node struct {
|
||||
// leaf is used to store possible leaf
|
||||
leaf *leafNode
|
||||
|
||||
// prefix is the common prefix we ignore
|
||||
prefix string
|
||||
|
||||
// Edges should be stored in-order for iteration.
|
||||
// We avoid a fully materialized slice to save memory,
|
||||
// since in most cases we expect to be sparse
|
||||
edges edges
|
||||
}
|
||||
|
||||
func (n *node) isLeaf() bool {
|
||||
return n.leaf != nil
|
||||
}
|
||||
|
||||
func (n *node) addEdge(e edge) {
|
||||
n.edges = append(n.edges, e)
|
||||
n.edges.Sort()
|
||||
}
|
||||
|
||||
func (n *node) replaceEdge(e edge) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= e.label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == e.label {
|
||||
n.edges[idx].node = e.node
|
||||
return
|
||||
}
|
||||
panic("replacing missing edge")
|
||||
}
|
||||
|
||||
func (n *node) getEdge(label byte) *node {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == label {
|
||||
return n.edges[idx].node
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *node) delEdge(label byte) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == label {
|
||||
copy(n.edges[idx:], n.edges[idx+1:])
|
||||
n.edges[len(n.edges)-1] = edge{}
|
||||
n.edges = n.edges[:len(n.edges)-1]
|
||||
}
|
||||
}
|
||||
|
||||
type edges []edge
|
||||
|
||||
func (e edges) Len() int {
|
||||
return len(e)
|
||||
}
|
||||
|
||||
func (e edges) Less(i, j int) bool {
|
||||
return e[i].label < e[j].label
|
||||
}
|
||||
|
||||
func (e edges) Swap(i, j int) {
|
||||
e[i], e[j] = e[j], e[i]
|
||||
}
|
||||
|
||||
func (e edges) Sort() {
|
||||
sort.Sort(e)
|
||||
}
|
||||
|
||||
// Tree implements a radix tree. This can be treated as a
|
||||
// Dictionary abstract data type. The main advantage over
|
||||
// a standard hash map is prefix-based lookups and
|
||||
// ordered iteration,
|
||||
type Tree struct {
|
||||
root *node
|
||||
size int
|
||||
}
|
||||
|
||||
// New returns an empty Tree
|
||||
func New() *Tree {
|
||||
return NewFromMap(nil)
|
||||
}
|
||||
|
||||
// NewFromMap returns a new tree containing the keys
|
||||
// from an existing map
|
||||
func NewFromMap(m map[string]interface{}) *Tree {
|
||||
t := &Tree{root: &node{}}
|
||||
for k, v := range m {
|
||||
t.Insert(k, v)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Len is used to return the number of elements in the tree
|
||||
func (t *Tree) Len() int {
|
||||
return t.size
|
||||
}
|
||||
|
||||
// longestPrefix finds the length of the shared prefix
|
||||
// of two strings
|
||||
func longestPrefix(k1, k2 string) int {
|
||||
max := len(k1)
|
||||
if l := len(k2); l < max {
|
||||
max = l
|
||||
}
|
||||
var i int
|
||||
for i = 0; i < max; i++ {
|
||||
if k1[i] != k2[i] {
|
||||
break
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// Insert is used to add a newentry or update
|
||||
// an existing entry. Returns if updated.
|
||||
func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) {
|
||||
var parent *node
|
||||
n := t.root
|
||||
search := s
|
||||
for {
|
||||
// Handle key exhaution
|
||||
if len(search) == 0 {
|
||||
if n.isLeaf() {
|
||||
old := n.leaf.val
|
||||
n.leaf.val = v
|
||||
return old, true
|
||||
}
|
||||
|
||||
n.leaf = &leafNode{
|
||||
key: s,
|
||||
val: v,
|
||||
}
|
||||
t.size++
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Look for the edge
|
||||
parent = n
|
||||
n = n.getEdge(search[0])
|
||||
|
||||
// No edge, create one
|
||||
if n == nil {
|
||||
e := edge{
|
||||
label: search[0],
|
||||
node: &node{
|
||||
leaf: &leafNode{
|
||||
key: s,
|
||||
val: v,
|
||||
},
|
||||
prefix: search,
|
||||
},
|
||||
}
|
||||
parent.addEdge(e)
|
||||
t.size++
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Determine longest prefix of the search key on match
|
||||
commonPrefix := longestPrefix(search, n.prefix)
|
||||
if commonPrefix == len(n.prefix) {
|
||||
search = search[commonPrefix:]
|
||||
continue
|
||||
}
|
||||
|
||||
// Split the node
|
||||
t.size++
|
||||
child := &node{
|
||||
prefix: search[:commonPrefix],
|
||||
}
|
||||
parent.replaceEdge(edge{
|
||||
label: search[0],
|
||||
node: child,
|
||||
})
|
||||
|
||||
// Restore the existing node
|
||||
child.addEdge(edge{
|
||||
label: n.prefix[commonPrefix],
|
||||
node: n,
|
||||
})
|
||||
n.prefix = n.prefix[commonPrefix:]
|
||||
|
||||
// Create a new leaf node
|
||||
leaf := &leafNode{
|
||||
key: s,
|
||||
val: v,
|
||||
}
|
||||
|
||||
// If the new key is a subset, add to to this node
|
||||
search = search[commonPrefix:]
|
||||
if len(search) == 0 {
|
||||
child.leaf = leaf
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Create a new edge for the node
|
||||
child.addEdge(edge{
|
||||
label: search[0],
|
||||
node: &node{
|
||||
leaf: leaf,
|
||||
prefix: search,
|
||||
},
|
||||
})
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
// Delete is used to delete a key, returning the previous
|
||||
// value and if it was deleted
|
||||
func (t *Tree) Delete(s string) (interface{}, bool) {
|
||||
var parent *node
|
||||
var label byte
|
||||
n := t.root
|
||||
search := s
|
||||
for {
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
if !n.isLeaf() {
|
||||
break
|
||||
}
|
||||
goto DELETE
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
parent = n
|
||||
label = search[0]
|
||||
n = n.getEdge(label)
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if strings.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
|
||||
DELETE:
|
||||
// Delete the leaf
|
||||
leaf := n.leaf
|
||||
n.leaf = nil
|
||||
t.size--
|
||||
|
||||
// Check if we should delete this node from the parent
|
||||
if parent != nil && len(n.edges) == 0 {
|
||||
parent.delEdge(label)
|
||||
}
|
||||
|
||||
// Check if we should merge this node
|
||||
if n != t.root && len(n.edges) == 1 {
|
||||
n.mergeChild()
|
||||
}
|
||||
|
||||
// Check if we should merge the parent's other child
|
||||
if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() {
|
||||
parent.mergeChild()
|
||||
}
|
||||
|
||||
return leaf.val, true
|
||||
}
|
||||
|
||||
func (n *node) mergeChild() {
|
||||
e := n.edges[0]
|
||||
child := e.node
|
||||
n.prefix = n.prefix + child.prefix
|
||||
n.leaf = child.leaf
|
||||
n.edges = child.edges
|
||||
}
|
||||
|
||||
// Get is used to lookup a specific key, returning
|
||||
// the value and if it was found
|
||||
func (t *Tree) Get(s string) (interface{}, bool) {
|
||||
n := t.root
|
||||
search := s
|
||||
for {
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
if n.isLeaf() {
|
||||
return n.leaf.val, true
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if strings.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// LongestPrefix is like Get, but instead of an
|
||||
// exact match, it will return the longest prefix match.
|
||||
func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) {
|
||||
var last *leafNode
|
||||
n := t.root
|
||||
search := s
|
||||
for {
|
||||
// Look for a leaf node
|
||||
if n.isLeaf() {
|
||||
last = n.leaf
|
||||
}
|
||||
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if strings.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if last != nil {
|
||||
return last.key, last.val, true
|
||||
}
|
||||
return "", nil, false
|
||||
}
|
||||
|
||||
// Minimum is used to return the minimum value in the tree
|
||||
func (t *Tree) Minimum() (string, interface{}, bool) {
|
||||
n := t.root
|
||||
for {
|
||||
if n.isLeaf() {
|
||||
return n.leaf.key, n.leaf.val, true
|
||||
}
|
||||
if len(n.edges) > 0 {
|
||||
n = n.edges[0].node
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return "", nil, false
|
||||
}
|
||||
|
||||
// Maximum is used to return the maximum value in the tree
|
||||
func (t *Tree) Maximum() (string, interface{}, bool) {
|
||||
n := t.root
|
||||
for {
|
||||
if num := len(n.edges); num > 0 {
|
||||
n = n.edges[num-1].node
|
||||
continue
|
||||
}
|
||||
if n.isLeaf() {
|
||||
return n.leaf.key, n.leaf.val, true
|
||||
}
|
||||
break
|
||||
}
|
||||
return "", nil, false
|
||||
}
|
||||
|
||||
// Walk is used to walk the tree
|
||||
func (t *Tree) Walk(fn WalkFn) {
|
||||
recursiveWalk(t.root, fn)
|
||||
}
|
||||
|
||||
// WalkPrefix is used to walk the tree under a prefix
|
||||
func (t *Tree) WalkPrefix(prefix string, fn WalkFn) {
|
||||
n := t.root
|
||||
search := prefix
|
||||
for {
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
recursiveWalk(n, fn)
|
||||
return
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if strings.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
|
||||
} else if strings.HasPrefix(n.prefix, search) {
|
||||
// Child may be under our search prefix
|
||||
recursiveWalk(n, fn)
|
||||
return
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// WalkPath is used to walk the tree, but only visiting nodes
|
||||
// from the root down to a given leaf. Where WalkPrefix walks
|
||||
// all the entries *under* the given prefix, this walks the
|
||||
// entries *above* the given prefix.
|
||||
func (t *Tree) WalkPath(path string, fn WalkFn) {
|
||||
n := t.root
|
||||
search := path
|
||||
for {
|
||||
// Visit the leaf values if any
|
||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if strings.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// recursiveWalk is used to do a pre-order walk of a node
|
||||
// recursively. Returns true if the walk should be aborted
|
||||
func recursiveWalk(n *node, fn WalkFn) bool {
|
||||
// Visit the leaf values if any
|
||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Recurse on the children
|
||||
for _, e := range n.edges {
|
||||
if recursiveWalk(e.node, fn) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ToMap is used to walk the tree and convert it into a map
|
||||
func (t *Tree) ToMap() map[string]interface{} {
|
||||
out := make(map[string]interface{}, t.size)
|
||||
t.Walk(func(k string, v interface{}) bool {
|
||||
out[k] = v
|
||||
return false
|
||||
})
|
||||
return out
|
||||
}
|
||||
10
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
10
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
|
|
@ -15,6 +15,12 @@ type Config struct {
|
|||
Endpoint string
|
||||
SigningRegion string
|
||||
SigningName string
|
||||
|
||||
// States that the signing name did not come from a modeled source but
|
||||
// was derived based on other data. Used by service client constructors
|
||||
// to determine if the signin name can be overriden based on metadata the
|
||||
// service has.
|
||||
SigningNameDerived bool
|
||||
}
|
||||
|
||||
// ConfigProvider provides a generic way for a service client to receive
|
||||
|
|
@ -85,6 +91,6 @@ func (c *Client) AddDebugHandlers() {
|
|||
return
|
||||
}
|
||||
|
||||
c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest})
|
||||
c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse})
|
||||
c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
|
||||
c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
|
||||
}
|
||||
|
|
|
|||
80
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
80
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
|
|
@ -1,11 +1,11 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkrand"
|
||||
)
|
||||
|
||||
// DefaultRetryer implements basic retry logic using exponential backoff for
|
||||
|
|
@ -15,11 +15,11 @@ import (
|
|||
// the MaxRetries method:
|
||||
//
|
||||
// type retryer struct {
|
||||
// service.DefaultRetryer
|
||||
// client.DefaultRetryer
|
||||
// }
|
||||
//
|
||||
// // This implementation always has 100 max retries
|
||||
// func (d retryer) MaxRetries() uint { return 100 }
|
||||
// func (d retryer) MaxRetries() int { return 100 }
|
||||
type DefaultRetryer struct {
|
||||
NumMaxRetries int
|
||||
}
|
||||
|
|
@ -30,25 +30,27 @@ func (d DefaultRetryer) MaxRetries() int {
|
|||
return d.NumMaxRetries
|
||||
}
|
||||
|
||||
var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
|
||||
|
||||
// RetryRules returns the delay duration before retrying this request again
|
||||
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
||||
// Set the upper limit of delay in retrying at ~five minutes
|
||||
minTime := 30
|
||||
throttle := d.shouldThrottle(r)
|
||||
if throttle {
|
||||
if delay, ok := getRetryDelay(r); ok {
|
||||
return delay
|
||||
}
|
||||
|
||||
minTime = 500
|
||||
}
|
||||
|
||||
retryCount := r.RetryCount
|
||||
if retryCount > 13 {
|
||||
retryCount = 13
|
||||
} else if throttle && retryCount > 8 {
|
||||
if throttle && retryCount > 8 {
|
||||
retryCount = 8
|
||||
} else if retryCount > 13 {
|
||||
retryCount = 13
|
||||
}
|
||||
|
||||
delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
|
||||
delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
|
||||
return time.Duration(delay) * time.Millisecond
|
||||
}
|
||||
|
||||
|
|
@ -60,7 +62,7 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
|||
return *r.Retryable
|
||||
}
|
||||
|
||||
if r.HTTPResponse.StatusCode >= 500 {
|
||||
if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 {
|
||||
return true
|
||||
}
|
||||
return r.IsErrorRetryable() || d.shouldThrottle(r)
|
||||
|
|
@ -68,29 +70,47 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
|||
|
||||
// ShouldThrottle returns true if the request should be throttled.
|
||||
func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
|
||||
if r.HTTPResponse.StatusCode == 502 ||
|
||||
r.HTTPResponse.StatusCode == 503 ||
|
||||
r.HTTPResponse.StatusCode == 504 {
|
||||
return true
|
||||
switch r.HTTPResponse.StatusCode {
|
||||
case 429:
|
||||
case 502:
|
||||
case 503:
|
||||
case 504:
|
||||
default:
|
||||
return r.IsErrorThrottle()
|
||||
}
|
||||
return r.IsErrorThrottle()
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// lockedSource is a thread-safe implementation of rand.Source
|
||||
type lockedSource struct {
|
||||
lk sync.Mutex
|
||||
src rand.Source
|
||||
// This will look in the Retry-After header, RFC 7231, for how long
|
||||
// it will wait before attempting another request
|
||||
func getRetryDelay(r *request.Request) (time.Duration, bool) {
|
||||
if !canUseRetryAfterHeader(r) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
delayStr := r.HTTPResponse.Header.Get("Retry-After")
|
||||
if len(delayStr) == 0 {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
delay, err := strconv.Atoi(delayStr)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
return time.Duration(delay) * time.Second, true
|
||||
}
|
||||
|
||||
func (r *lockedSource) Int63() (n int64) {
|
||||
r.lk.Lock()
|
||||
n = r.src.Int63()
|
||||
r.lk.Unlock()
|
||||
return
|
||||
}
|
||||
// Will look at the status code to see if the retry header pertains to
|
||||
// the status code.
|
||||
func canUseRetryAfterHeader(r *request.Request) bool {
|
||||
switch r.HTTPResponse.StatusCode {
|
||||
case 429:
|
||||
case 503:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *lockedSource) Seed(seed int64) {
|
||||
r.lk.Lock()
|
||||
r.src.Seed(seed)
|
||||
r.lk.Unlock()
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
106
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
106
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
|
|
@ -44,22 +44,57 @@ func (reader *teeReaderCloser) Close() error {
|
|||
return reader.Source.Close()
|
||||
}
|
||||
|
||||
// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
|
||||
// to a service. Will include the HTTP request body if the LogLevel of the
|
||||
// request matches LogDebugWithHTTPBody.
|
||||
var LogHTTPRequestHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogRequest",
|
||||
Fn: logRequest,
|
||||
}
|
||||
|
||||
func logRequest(r *request.Request) {
|
||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||
dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
|
||||
bodySeekable := aws.IsReaderSeekable(r.Body)
|
||||
|
||||
b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
if logBody {
|
||||
if !bodySeekable {
|
||||
r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
|
||||
}
|
||||
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
|
||||
// Body as a NoOpCloser and will not be reset after read by the HTTP
|
||||
// client reader.
|
||||
r.ResetBody()
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
||||
|
||||
// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
|
||||
// to a service. Will only log the HTTP request's headers. The request payload
|
||||
// will not be read.
|
||||
var LogHTTPRequestHeaderHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogRequestHeader",
|
||||
Fn: logRequestHeader,
|
||||
}
|
||||
|
||||
func logRequestHeader(r *request.Request) {
|
||||
b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
||||
|
||||
const logRespMsg = `DEBUG: Response %s/%s Details:
|
||||
|
|
@ -72,27 +107,44 @@ const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
|
|||
%s
|
||||
-----------------------------------------------------`
|
||||
|
||||
// LogHTTPResponseHandler is a SDK request handler to log the HTTP response
|
||||
// received from a service. Will include the HTTP response body if the LogLevel
|
||||
// of the request matches LogDebugWithHTTPBody.
|
||||
var LogHTTPResponseHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogResponse",
|
||||
Fn: logResponse,
|
||||
}
|
||||
|
||||
func logResponse(r *request.Request) {
|
||||
lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
|
||||
r.HTTPResponse.Body = &teeReaderCloser{
|
||||
Reader: io.TeeReader(r.HTTPResponse.Body, lw),
|
||||
Source: r.HTTPResponse.Body,
|
||||
|
||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||
if logBody {
|
||||
r.HTTPResponse.Body = &teeReaderCloser{
|
||||
Reader: io.TeeReader(r.HTTPResponse.Body, lw),
|
||||
Source: r.HTTPResponse.Body,
|
||||
}
|
||||
}
|
||||
|
||||
handlerFn := func(req *request.Request) {
|
||||
body, err := httputil.DumpResponse(req.HTTPResponse, false)
|
||||
b, err := httputil.DumpResponse(req.HTTPResponse, false)
|
||||
if err != nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(lw.buf)
|
||||
if err != nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body)))
|
||||
if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, string(b)))
|
||||
|
||||
if logBody {
|
||||
b, err := ioutil.ReadAll(lw.buf)
|
||||
if err != nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
lw.Logger.Log(string(b))
|
||||
}
|
||||
}
|
||||
|
|
@ -106,3 +158,27 @@ func logResponse(r *request.Request) {
|
|||
Name: handlerName, Fn: handlerFn,
|
||||
})
|
||||
}
|
||||
|
||||
// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
|
||||
// response received from a service. Will only log the HTTP response's headers.
|
||||
// The response payload will not be read.
|
||||
var LogHTTPResponseHeaderHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogResponseHeader",
|
||||
Fn: logResponseHeader,
|
||||
}
|
||||
|
||||
func logResponseHeader(r *request.Request) {
|
||||
if r.Config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
b, err := httputil.DumpResponse(r.HTTPResponse, false)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logRespMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
||||
|
|
|
|||
1
vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
generated
vendored
1
vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
generated
vendored
|
|
@ -3,6 +3,7 @@ package metadata
|
|||
// ClientInfo wraps immutable data from the client.Client structure.
|
||||
type ClientInfo struct {
|
||||
ServiceName string
|
||||
ServiceID string
|
||||
APIVersion string
|
||||
Endpoint string
|
||||
SigningName string
|
||||
|
|
|
|||
26
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
26
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
|
|
@ -95,7 +95,7 @@ type Config struct {
|
|||
// recoverable failures.
|
||||
//
|
||||
// When nil or the value does not implement the request.Retryer interface,
|
||||
// the request.DefaultRetryer will be used.
|
||||
// the client.DefaultRetryer will be used.
|
||||
//
|
||||
// When both Retryer and MaxRetries are non-nil, the former is used and
|
||||
// the latter ignored.
|
||||
|
|
@ -151,6 +151,15 @@ type Config struct {
|
|||
// with accelerate.
|
||||
S3UseAccelerate *bool
|
||||
|
||||
// S3DisableContentMD5Validation config option is temporarily disabled,
|
||||
// For S3 GetObject API calls, #1837.
|
||||
//
|
||||
// Set this to `true` to disable the S3 service client from automatically
|
||||
// adding the ContentMD5 to S3 Object Put and Upload API calls. This option
|
||||
// will also disable the SDK from performing object ContentMD5 validation
|
||||
// on GetObject API calls.
|
||||
S3DisableContentMD5Validation *bool
|
||||
|
||||
// Set this to `true` to disable the EC2Metadata client from overriding the
|
||||
// default http.Client's Timeout. This is helpful if you do not want the
|
||||
// EC2Metadata client to create a new http.Client. This options is only
|
||||
|
|
@ -168,7 +177,7 @@ type Config struct {
|
|||
//
|
||||
EC2MetadataDisableTimeoutOverride *bool
|
||||
|
||||
// Instructs the endpiont to be generated for a service client to
|
||||
// Instructs the endpoint to be generated for a service client to
|
||||
// be the dual stack endpoint. The dual stack endpoint will support
|
||||
// both IPv4 and IPv6 addressing.
|
||||
//
|
||||
|
|
@ -336,6 +345,15 @@ func (c *Config) WithS3Disable100Continue(disable bool) *Config {
|
|||
func (c *Config) WithS3UseAccelerate(enable bool) *Config {
|
||||
c.S3UseAccelerate = &enable
|
||||
return c
|
||||
|
||||
}
|
||||
|
||||
// WithS3DisableContentMD5Validation sets a config
|
||||
// S3DisableContentMD5Validation value returning a Config pointer for chaining.
|
||||
func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
|
||||
c.S3DisableContentMD5Validation = &enable
|
||||
return c
|
||||
|
||||
}
|
||||
|
||||
// WithUseDualStack sets a config UseDualStack value returning a Config
|
||||
|
|
@ -435,6 +453,10 @@ func mergeInConfig(dst *Config, other *Config) {
|
|||
dst.S3UseAccelerate = other.S3UseAccelerate
|
||||
}
|
||||
|
||||
if other.S3DisableContentMD5Validation != nil {
|
||||
dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
|
||||
}
|
||||
|
||||
if other.UseDualStack != nil {
|
||||
dst.UseDualStack = other.UseDualStack
|
||||
}
|
||||
|
|
|
|||
6
vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
generated
vendored
|
|
@ -4,9 +4,9 @@ package aws
|
|||
|
||||
import "time"
|
||||
|
||||
// An emptyCtx is a copy of the the Go 1.7 context.emptyCtx type. This
|
||||
// is copied to provide a 1.6 and 1.5 safe version of context that is compatible
|
||||
// with Go 1.7's Context.
|
||||
// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to
|
||||
// provide a 1.6 and 1.5 safe version of context that is compatible with Go
|
||||
// 1.7's Context.
|
||||
//
|
||||
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
|
||||
// struct{}, since vars of this type must have distinct addresses.
|
||||
|
|
|
|||
18
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
generated
vendored
|
|
@ -311,6 +311,24 @@ func TimeValue(v *time.Time) time.Time {
|
|||
return time.Time{}
|
||||
}
|
||||
|
||||
// SecondsTimeValue converts an int64 pointer to a time.Time value
|
||||
// representing seconds since Epoch or time.Time{} if the pointer is nil.
|
||||
func SecondsTimeValue(v *int64) time.Time {
|
||||
if v != nil {
|
||||
return time.Unix((*v / 1000), 0)
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// MillisecondsTimeValue converts an int64 pointer to a time.Time value
|
||||
// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil.
|
||||
func MillisecondsTimeValue(v *int64) time.Time {
|
||||
if v != nil {
|
||||
return time.Unix(0, (*v * 1000000))
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
|
||||
// The result is undefined if the Unix time cannot be represented by an int64.
|
||||
// Which includes calling TimeUnixMilli on a zero Time is undefined.
|
||||
|
|
|
|||
28
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
28
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
|
|
@ -3,12 +3,10 @@ package corehandlers
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
|
|
@ -36,18 +34,13 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen
|
|||
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
|
||||
length, _ = strconv.ParseInt(slength, 10, 64)
|
||||
} else {
|
||||
switch body := r.Body.(type) {
|
||||
case nil:
|
||||
length = 0
|
||||
case lener:
|
||||
length = int64(body.Len())
|
||||
case io.Seeker:
|
||||
r.BodyStart, _ = body.Seek(0, 1)
|
||||
end, _ := body.Seek(0, 2)
|
||||
body.Seek(r.BodyStart, 0) // make sure to seek back to original location
|
||||
length = end - r.BodyStart
|
||||
default:
|
||||
panic("Cannot get length of body, must provide `ContentLength`")
|
||||
if r.Body != nil {
|
||||
var err error
|
||||
length, err = aws.SeekerLen(r.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -60,13 +53,6 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen
|
|||
}
|
||||
}}
|
||||
|
||||
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
|
||||
var SDKVersionUserAgentHandler = request.NamedHandler{
|
||||
Name: "core.SDKVersionUserAgentHandler",
|
||||
Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
|
||||
runtime.Version(), runtime.GOOS, runtime.GOARCH),
|
||||
}
|
||||
|
||||
var reStatusCode = regexp.MustCompile(`^(\d{3})`)
|
||||
|
||||
// ValidateReqSigHandler is a request handler to ensure that the request's
|
||||
|
|
|
|||
37
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
generated
vendored
Normal file
37
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
generated
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
package corehandlers
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version
|
||||
// to the user agent.
|
||||
var SDKVersionUserAgentHandler = request.NamedHandler{
|
||||
Name: "core.SDKVersionUserAgentHandler",
|
||||
Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
|
||||
runtime.Version(), runtime.GOOS, runtime.GOARCH),
|
||||
}
|
||||
|
||||
const execEnvVar = `AWS_EXECUTION_ENV`
|
||||
const execEnvUAKey = `exec_env`
|
||||
|
||||
// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
|
||||
// execution environment to the user agent.
|
||||
//
|
||||
// If the environment variable AWS_EXECUTION_ENV is set, its value will be
|
||||
// appended to the user agent string.
|
||||
var AddHostExecEnvUserAgentHander = request.NamedHandler{
|
||||
Name: "core.AddHostExecEnvUserAgentHander",
|
||||
Fn: func(r *request.Request) {
|
||||
v := os.Getenv(execEnvVar)
|
||||
if len(v) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
request.AddToUserAgent(r, execEnvUAKey+"/"+v)
|
||||
},
|
||||
}
|
||||
18
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
|
|
@ -178,7 +178,8 @@ func (e *Expiry) IsExpired() bool {
|
|||
type Credentials struct {
|
||||
creds Value
|
||||
forceRefresh bool
|
||||
m sync.Mutex
|
||||
|
||||
m sync.RWMutex
|
||||
|
||||
provider Provider
|
||||
}
|
||||
|
|
@ -201,6 +202,17 @@ func NewCredentials(provider Provider) *Credentials {
|
|||
// If Credentials.Expire() was called the credentials Value will be force
|
||||
// expired, and the next call to Get() will cause them to be refreshed.
|
||||
func (c *Credentials) Get() (Value, error) {
|
||||
// Check the cached credentials first with just the read lock.
|
||||
c.m.RLock()
|
||||
if !c.isExpired() {
|
||||
creds := c.creds
|
||||
c.m.RUnlock()
|
||||
return creds, nil
|
||||
}
|
||||
c.m.RUnlock()
|
||||
|
||||
// Credentials are expired need to retrieve the credentials taking the full
|
||||
// lock.
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
|
||||
|
|
@ -234,8 +246,8 @@ func (c *Credentials) Expire() {
|
|||
// If the Credentials were forced to be expired with Expire() this will
|
||||
// reflect that override.
|
||||
func (c *Credentials) IsExpired() bool {
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
|
||||
return c.isExpired()
|
||||
}
|
||||
|
|
|
|||
6
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
|
@ -12,6 +11,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkuri"
|
||||
)
|
||||
|
||||
// ProviderName provides a name of EC2Role provider
|
||||
|
|
@ -125,7 +125,7 @@ type ec2RoleCredRespBody struct {
|
|||
Message string
|
||||
}
|
||||
|
||||
const iamSecurityCredsPath = "/iam/security-credentials"
|
||||
const iamSecurityCredsPath = "iam/security-credentials/"
|
||||
|
||||
// requestCredList requests a list of credentials from the EC2 service.
|
||||
// If there are no credentials, or there is an error making or receiving the request
|
||||
|
|
@ -153,7 +153,7 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
|
|||
// If the credentials cannot be found, or there is an error reading the response
|
||||
// and error will be returned.
|
||||
func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
|
||||
resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
|
||||
resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName))
|
||||
if err != nil {
|
||||
return ec2RoleCredRespBody{},
|
||||
awserr.New("EC2RoleRequestError",
|
||||
|
|
|
|||
46
vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
generated
vendored
Normal file
46
vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
// Package csm provides Client Side Monitoring (CSM) which enables sending metrics
|
||||
// via UDP connection. Using the Start function will enable the reporting of
|
||||
// metrics on a given port. If Start is called, with different parameters, again,
|
||||
// a panic will occur.
|
||||
//
|
||||
// Pause can be called to pause any metrics publishing on a given port. Sessions
|
||||
// that have had their handlers modified via InjectHandlers may still be used.
|
||||
// However, the handlers will act as a no-op meaning no metrics will be published.
|
||||
//
|
||||
// Example:
|
||||
// r, err := csm.Start("clientID", ":31000")
|
||||
// if err != nil {
|
||||
// panic(fmt.Errorf("failed starting CSM: %v", err))
|
||||
// }
|
||||
//
|
||||
// sess, err := session.NewSession(&aws.Config{})
|
||||
// if err != nil {
|
||||
// panic(fmt.Errorf("failed loading session: %v", err))
|
||||
// }
|
||||
//
|
||||
// r.InjectHandlers(&sess.Handlers)
|
||||
//
|
||||
// client := s3.New(sess)
|
||||
// resp, err := client.GetObject(&s3.GetObjectInput{
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("key"),
|
||||
// })
|
||||
//
|
||||
// // Will pause monitoring
|
||||
// r.Pause()
|
||||
// resp, err = client.GetObject(&s3.GetObjectInput{
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("key"),
|
||||
// })
|
||||
//
|
||||
// // Resume monitoring
|
||||
// r.Continue()
|
||||
//
|
||||
// Start returns a Reporter that is used to enable or disable monitoring. If
|
||||
// access to the Reporter is required later, calling Get will return the Reporter
|
||||
// singleton.
|
||||
//
|
||||
// Example:
|
||||
// r := csm.Get()
|
||||
// r.Continue()
|
||||
package csm
|
||||
67
vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
generated
vendored
Normal file
67
vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
generated
vendored
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
package csm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
lock sync.Mutex
|
||||
)
|
||||
|
||||
// Client side metric handler names
|
||||
const (
|
||||
APICallMetricHandlerName = "awscsm.SendAPICallMetric"
|
||||
APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
|
||||
)
|
||||
|
||||
// Start will start the a long running go routine to capture
|
||||
// client side metrics. Calling start multiple time will only
|
||||
// start the metric listener once and will panic if a different
|
||||
// client ID or port is passed in.
|
||||
//
|
||||
// Example:
|
||||
// r, err := csm.Start("clientID", "127.0.0.1:8094")
|
||||
// if err != nil {
|
||||
// panic(fmt.Errorf("expected no error, but received %v", err))
|
||||
// }
|
||||
// sess := session.NewSession()
|
||||
// r.InjectHandlers(sess.Handlers)
|
||||
//
|
||||
// svc := s3.New(sess)
|
||||
// out, err := svc.GetObject(&s3.GetObjectInput{
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("key"),
|
||||
// })
|
||||
func Start(clientID string, url string) (*Reporter, error) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
if sender == nil {
|
||||
sender = newReporter(clientID, url)
|
||||
} else {
|
||||
if sender.clientID != clientID {
|
||||
panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID))
|
||||
}
|
||||
|
||||
if sender.url != url {
|
||||
panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url))
|
||||
}
|
||||
}
|
||||
|
||||
if err := connect(url); err != nil {
|
||||
sender = nil
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sender, nil
|
||||
}
|
||||
|
||||
// Get will return a reporter if one exists, if one does not exist, nil will
|
||||
// be returned.
|
||||
func Get() *Reporter {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
return sender
|
||||
}
|
||||
51
vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
generated
vendored
Normal file
51
vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
generated
vendored
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
package csm
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type metricTime time.Time
|
||||
|
||||
func (t metricTime) MarshalJSON() ([]byte, error) {
|
||||
ns := time.Duration(time.Time(t).UnixNano())
|
||||
return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
ClientID *string `json:"ClientId,omitempty"`
|
||||
API *string `json:"Api,omitempty"`
|
||||
Service *string `json:"Service,omitempty"`
|
||||
Timestamp *metricTime `json:"Timestamp,omitempty"`
|
||||
Type *string `json:"Type,omitempty"`
|
||||
Version *int `json:"Version,omitempty"`
|
||||
|
||||
AttemptCount *int `json:"AttemptCount,omitempty"`
|
||||
Latency *int `json:"Latency,omitempty"`
|
||||
|
||||
Fqdn *string `json:"Fqdn,omitempty"`
|
||||
UserAgent *string `json:"UserAgent,omitempty"`
|
||||
AttemptLatency *int `json:"AttemptLatency,omitempty"`
|
||||
|
||||
SessionToken *string `json:"SessionToken,omitempty"`
|
||||
Region *string `json:"Region,omitempty"`
|
||||
AccessKey *string `json:"AccessKey,omitempty"`
|
||||
HTTPStatusCode *int `json:"HttpStatusCode,omitempty"`
|
||||
XAmzID2 *string `json:"XAmzId2,omitempty"`
|
||||
XAmzRequestID *string `json:"XAmznRequestId,omitempty"`
|
||||
|
||||
AWSException *string `json:"AwsException,omitempty"`
|
||||
AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"`
|
||||
SDKException *string `json:"SdkException,omitempty"`
|
||||
SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"`
|
||||
|
||||
DestinationIP *string `json:"DestinationIp,omitempty"`
|
||||
ConnectionReused *int `json:"ConnectionReused,omitempty"`
|
||||
|
||||
AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"`
|
||||
ConnectLatency *int `json:"ConnectLatency,omitempty"`
|
||||
RequestLatency *int `json:"RequestLatency,omitempty"`
|
||||
DNSLatency *int `json:"DnsLatency,omitempty"`
|
||||
TCPLatency *int `json:"TcpLatency,omitempty"`
|
||||
SSLLatency *int `json:"SslLatency,omitempty"`
|
||||
}
|
||||
54
vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
generated
vendored
Normal file
54
vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
generated
vendored
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
package csm
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
runningEnum = iota
|
||||
pausedEnum
|
||||
)
|
||||
|
||||
var (
|
||||
// MetricsChannelSize of metrics to hold in the channel
|
||||
MetricsChannelSize = 100
|
||||
)
|
||||
|
||||
type metricChan struct {
|
||||
ch chan metric
|
||||
paused int64
|
||||
}
|
||||
|
||||
func newMetricChan(size int) metricChan {
|
||||
return metricChan{
|
||||
ch: make(chan metric, size),
|
||||
}
|
||||
}
|
||||
|
||||
func (ch *metricChan) Pause() {
|
||||
atomic.StoreInt64(&ch.paused, pausedEnum)
|
||||
}
|
||||
|
||||
func (ch *metricChan) Continue() {
|
||||
atomic.StoreInt64(&ch.paused, runningEnum)
|
||||
}
|
||||
|
||||
func (ch *metricChan) IsPaused() bool {
|
||||
v := atomic.LoadInt64(&ch.paused)
|
||||
return v == pausedEnum
|
||||
}
|
||||
|
||||
// Push will push metrics to the metric channel if the channel
|
||||
// is not paused
|
||||
func (ch *metricChan) Push(m metric) bool {
|
||||
if ch.IsPaused() {
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case ch.ch <- m:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
231
vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
generated
vendored
Normal file
231
vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
generated
vendored
Normal file
|
|
@ -0,0 +1,231 @@
|
|||
package csm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultPort is used when no port is specified
|
||||
DefaultPort = "31000"
|
||||
)
|
||||
|
||||
// Reporter will gather metrics of API requests made and
|
||||
// send those metrics to the CSM endpoint.
|
||||
type Reporter struct {
|
||||
clientID string
|
||||
url string
|
||||
conn net.Conn
|
||||
metricsCh metricChan
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
var (
|
||||
sender *Reporter
|
||||
)
|
||||
|
||||
func connect(url string) error {
|
||||
const network = "udp"
|
||||
if err := sender.connect(network, url); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sender.done == nil {
|
||||
sender.done = make(chan struct{})
|
||||
go sender.start()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newReporter(clientID, url string) *Reporter {
|
||||
return &Reporter{
|
||||
clientID: clientID,
|
||||
url: url,
|
||||
metricsCh: newMetricChan(MetricsChannelSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
creds, _ := r.Config.Credentials.Get()
|
||||
|
||||
m := metric{
|
||||
ClientID: aws.String(rep.clientID),
|
||||
API: aws.String(r.Operation.Name),
|
||||
Service: aws.String(r.ClientInfo.ServiceID),
|
||||
Timestamp: (*metricTime)(&now),
|
||||
UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
|
||||
Region: r.Config.Region,
|
||||
Type: aws.String("ApiCallAttempt"),
|
||||
Version: aws.Int(1),
|
||||
|
||||
XAmzRequestID: aws.String(r.RequestID),
|
||||
|
||||
AttemptCount: aws.Int(r.RetryCount + 1),
|
||||
AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))),
|
||||
AccessKey: aws.String(creds.AccessKeyID),
|
||||
}
|
||||
|
||||
if r.HTTPResponse != nil {
|
||||
m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
|
||||
}
|
||||
|
||||
if r.Error != nil {
|
||||
if awserr, ok := r.Error.(awserr.Error); ok {
|
||||
setError(&m, awserr)
|
||||
}
|
||||
}
|
||||
|
||||
rep.metricsCh.Push(m)
|
||||
}
|
||||
|
||||
func setError(m *metric, err awserr.Error) {
|
||||
msg := err.Error()
|
||||
code := err.Code()
|
||||
|
||||
switch code {
|
||||
case "RequestError",
|
||||
"SerializationError",
|
||||
request.CanceledErrorCode:
|
||||
m.SDKException = &code
|
||||
m.SDKExceptionMessage = &msg
|
||||
default:
|
||||
m.AWSException = &code
|
||||
m.AWSExceptionMessage = &msg
|
||||
}
|
||||
}
|
||||
|
||||
func (rep *Reporter) sendAPICallMetric(r *request.Request) {
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
m := metric{
|
||||
ClientID: aws.String(rep.clientID),
|
||||
API: aws.String(r.Operation.Name),
|
||||
Service: aws.String(r.ClientInfo.ServiceID),
|
||||
Timestamp: (*metricTime)(&now),
|
||||
Type: aws.String("ApiCall"),
|
||||
AttemptCount: aws.Int(r.RetryCount + 1),
|
||||
Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
|
||||
XAmzRequestID: aws.String(r.RequestID),
|
||||
}
|
||||
|
||||
// TODO: Probably want to figure something out for logging dropped
|
||||
// metrics
|
||||
rep.metricsCh.Push(m)
|
||||
}
|
||||
|
||||
func (rep *Reporter) connect(network, url string) error {
|
||||
if rep.conn != nil {
|
||||
rep.conn.Close()
|
||||
}
|
||||
|
||||
conn, err := net.Dial(network, url)
|
||||
if err != nil {
|
||||
return awserr.New("UDPError", "Could not connect", err)
|
||||
}
|
||||
|
||||
rep.conn = conn
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rep *Reporter) close() {
|
||||
if rep.done != nil {
|
||||
close(rep.done)
|
||||
}
|
||||
|
||||
rep.metricsCh.Pause()
|
||||
}
|
||||
|
||||
func (rep *Reporter) start() {
|
||||
defer func() {
|
||||
rep.metricsCh.Pause()
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-rep.done:
|
||||
rep.done = nil
|
||||
return
|
||||
case m := <-rep.metricsCh.ch:
|
||||
// TODO: What to do with this error? Probably should just log
|
||||
b, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
rep.conn.Write(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pause will pause the metric channel preventing any new metrics from
|
||||
// being added.
|
||||
func (rep *Reporter) Pause() {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
rep.close()
|
||||
}
|
||||
|
||||
// Continue will reopen the metric channel and allow for monitoring
|
||||
// to be resumed.
|
||||
func (rep *Reporter) Continue() {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !rep.metricsCh.IsPaused() {
|
||||
return
|
||||
}
|
||||
|
||||
rep.metricsCh.Continue()
|
||||
}
|
||||
|
||||
// InjectHandlers will will enable client side metrics and inject the proper
|
||||
// handlers to handle how metrics are sent.
|
||||
//
|
||||
// Example:
|
||||
// // Start must be called in order to inject the correct handlers
|
||||
// r, err := csm.Start("clientID", "127.0.0.1:8094")
|
||||
// if err != nil {
|
||||
// panic(fmt.Errorf("expected no error, but received %v", err))
|
||||
// }
|
||||
//
|
||||
// sess := session.NewSession()
|
||||
// r.InjectHandlers(&sess.Handlers)
|
||||
//
|
||||
// // create a new service client with our client side metric session
|
||||
// svc := s3.New(sess)
|
||||
func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric}
|
||||
apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric}
|
||||
|
||||
handlers.Complete.PushFrontNamed(apiCallHandler)
|
||||
handlers.Complete.PushFrontNamed(apiCallAttemptHandler)
|
||||
|
||||
handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler)
|
||||
}
|
||||
56
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
56
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
|
|
@ -9,6 +9,7 @@ package defaults
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
|
@ -72,6 +73,7 @@ func Handlers() request.Handlers {
|
|||
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
|
||||
handlers.Validate.AfterEachFn = request.HandlerListStopOnError
|
||||
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
|
||||
handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander)
|
||||
handlers.Build.AfterEachFn = request.HandlerListStopOnError
|
||||
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
||||
handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
|
||||
|
|
@ -90,14 +92,25 @@ func Handlers() request.Handlers {
|
|||
func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
|
||||
return credentials.NewCredentials(&credentials.ChainProvider{
|
||||
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
|
||||
Providers: []credentials.Provider{
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
|
||||
RemoteCredProvider(*cfg, handlers),
|
||||
},
|
||||
Providers: CredProviders(cfg, handlers),
|
||||
})
|
||||
}
|
||||
|
||||
// CredProviders returns the slice of providers used in
|
||||
// the default credential chain.
|
||||
//
|
||||
// For applications that need to use some other provider (for example use
|
||||
// different environment variables for legacy reasons) but still fall back
|
||||
// on the default chain of providers. This allows that default chaint to be
|
||||
// automatically updated
|
||||
func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider {
|
||||
return []credentials.Provider{
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
|
||||
RemoteCredProvider(*cfg, handlers),
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
|
||||
ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
|
||||
|
|
@ -118,14 +131,43 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P
|
|||
return ec2RoleProvider(cfg, handlers)
|
||||
}
|
||||
|
||||
var lookupHostFn = net.LookupHost
|
||||
|
||||
func isLoopbackHost(host string) (bool, error) {
|
||||
ip := net.ParseIP(host)
|
||||
if ip != nil {
|
||||
return ip.IsLoopback(), nil
|
||||
}
|
||||
|
||||
// Host is not an ip, perform lookup
|
||||
addrs, err := lookupHostFn(host)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
if !net.ParseIP(addr).IsLoopback() {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
|
||||
var errMsg string
|
||||
|
||||
parsed, err := url.Parse(u)
|
||||
if err != nil {
|
||||
errMsg = fmt.Sprintf("invalid URL, %v", err)
|
||||
} else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") {
|
||||
errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host)
|
||||
} else {
|
||||
host := aws.URLHostname(parsed)
|
||||
if len(host) == 0 {
|
||||
errMsg = "unable to parse host from local HTTP cred provider URL"
|
||||
} else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
|
||||
errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr)
|
||||
} else if !isLoopback {
|
||||
errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errMsg) > 0 {
|
||||
|
|
|
|||
8
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
|
|
@ -4,12 +4,12 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkuri"
|
||||
)
|
||||
|
||||
// GetMetadata uses the path provided to request information from the EC2
|
||||
|
|
@ -19,7 +19,7 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
|||
op := &request.Operation{
|
||||
Name: "GetMetadata",
|
||||
HTTPMethod: "GET",
|
||||
HTTPPath: path.Join("/", "meta-data", p),
|
||||
HTTPPath: sdkuri.PathJoin("/meta-data", p),
|
||||
}
|
||||
|
||||
output := &metadataOutput{}
|
||||
|
|
@ -35,7 +35,7 @@ func (c *EC2Metadata) GetUserData() (string, error) {
|
|||
op := &request.Operation{
|
||||
Name: "GetUserData",
|
||||
HTTPMethod: "GET",
|
||||
HTTPPath: path.Join("/", "user-data"),
|
||||
HTTPPath: "/user-data",
|
||||
}
|
||||
|
||||
output := &metadataOutput{}
|
||||
|
|
@ -56,7 +56,7 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
|
|||
op := &request.Operation{
|
||||
Name: "GetDynamicData",
|
||||
HTTPMethod: "GET",
|
||||
HTTPPath: path.Join("/", "dynamic", p),
|
||||
HTTPPath: sdkuri.PathJoin("/dynamic", p),
|
||||
}
|
||||
|
||||
output := &metadataOutput{}
|
||||
|
|
|
|||
24
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
24
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
|
|
@ -1,5 +1,10 @@
|
|||
// Package ec2metadata provides the client for making API calls to the
|
||||
// EC2 Metadata service.
|
||||
//
|
||||
// This package's client can be disabled completely by setting the environment
|
||||
// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
|
||||
// true instructs the SDK to disable the EC2 Metadata client. The client cannot
|
||||
// be used while the environemnt variable is set to true, (case insensitive).
|
||||
package ec2metadata
|
||||
|
||||
import (
|
||||
|
|
@ -7,17 +12,21 @@ import (
|
|||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// ServiceName is the name of the service.
|
||||
const ServiceName = "ec2metadata"
|
||||
const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
|
||||
|
||||
// A EC2Metadata is an EC2 Metadata service Client.
|
||||
type EC2Metadata struct {
|
||||
|
|
@ -75,6 +84,21 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
|
|||
svc.Handlers.Validate.Clear()
|
||||
svc.Handlers.Validate.PushBack(validateEndpointHandler)
|
||||
|
||||
// Disable the EC2 Metadata service if the environment variable is set.
|
||||
// This shortcirctes the service's functionality to always fail to send
|
||||
// requests.
|
||||
if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" {
|
||||
svc.Handlers.Send.SwapNamed(request.NamedHandler{
|
||||
Name: corehandlers.SendHandler.Name,
|
||||
Fn: func(r *request.Request) {
|
||||
r.Error = awserr.New(
|
||||
request.CanceledErrorCode,
|
||||
"EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
|
||||
nil)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Add additional options to the service config
|
||||
for _, option := range opts {
|
||||
option(svc.Client)
|
||||
|
|
|
|||
1073
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
1073
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
File diff suppressed because it is too large
Load diff
4
vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
generated
vendored
|
|
@ -21,12 +21,12 @@
|
|||
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
|
||||
//
|
||||
// for _, p := range partitions {
|
||||
// fmt.Println("Regions for", p.Name)
|
||||
// fmt.Println("Regions for", p.ID())
|
||||
// for id, _ := range p.Regions() {
|
||||
// fmt.Println("*", id)
|
||||
// }
|
||||
//
|
||||
// fmt.Println("Services for", p.Name)
|
||||
// fmt.Println("Services for", p.ID())
|
||||
// for id, _ := range p.Services() {
|
||||
// fmt.Println("*", id)
|
||||
// }
|
||||
|
|
|
|||
22
vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
generated
vendored
22
vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
generated
vendored
|
|
@ -206,10 +206,11 @@ func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (
|
|||
// enumerating over the regions in a partition.
|
||||
func (p Partition) Regions() map[string]Region {
|
||||
rs := map[string]Region{}
|
||||
for id := range p.p.Regions {
|
||||
for id, r := range p.p.Regions {
|
||||
rs[id] = Region{
|
||||
id: id,
|
||||
p: p.p,
|
||||
id: id,
|
||||
desc: r.Description,
|
||||
p: p.p,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -240,6 +241,10 @@ type Region struct {
|
|||
// ID returns the region's identifier.
|
||||
func (r Region) ID() string { return r.id }
|
||||
|
||||
// Description returns the region's description. The region description
|
||||
// is free text, it can be empty, and it may change between SDK releases.
|
||||
func (r Region) Description() string { return r.desc }
|
||||
|
||||
// ResolveEndpoint resolves an endpoint from the context of the region given
|
||||
// a service. See Partition.EndpointFor for usage and errors that can be returned.
|
||||
func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
||||
|
|
@ -284,10 +289,11 @@ func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (Resolve
|
|||
func (s Service) Regions() map[string]Region {
|
||||
rs := map[string]Region{}
|
||||
for id := range s.p.Services[s.id].Endpoints {
|
||||
if _, ok := s.p.Regions[id]; ok {
|
||||
if r, ok := s.p.Regions[id]; ok {
|
||||
rs[id] = Region{
|
||||
id: id,
|
||||
p: s.p,
|
||||
id: id,
|
||||
desc: r.Description,
|
||||
p: s.p,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -347,6 +353,10 @@ type ResolvedEndpoint struct {
|
|||
// The service name that should be used for signing requests.
|
||||
SigningName string
|
||||
|
||||
// States that the signing name for this endpoint was derived from metadata
|
||||
// passed in, but was not explicitly modeled.
|
||||
SigningNameDerived bool
|
||||
|
||||
// The signing method that should be used for signing requests.
|
||||
SigningMethod string
|
||||
}
|
||||
|
|
|
|||
12
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
generated
vendored
12
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
generated
vendored
|
|
@ -226,16 +226,20 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op
|
|||
if len(signingRegion) == 0 {
|
||||
signingRegion = region
|
||||
}
|
||||
|
||||
signingName := e.CredentialScope.Service
|
||||
var signingNameDerived bool
|
||||
if len(signingName) == 0 {
|
||||
signingName = service
|
||||
signingNameDerived = true
|
||||
}
|
||||
|
||||
return ResolvedEndpoint{
|
||||
URL: u,
|
||||
SigningRegion: signingRegion,
|
||||
SigningName: signingName,
|
||||
SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
|
||||
URL: u,
|
||||
SigningRegion: signingRegion,
|
||||
SigningName: signingName,
|
||||
SigningNameDerived: signingNameDerived,
|
||||
SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
10
vendor/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
10
vendor/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
|
|
@ -26,14 +26,14 @@ func (l *LogLevelType) Value() LogLevelType {
|
|||
|
||||
// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
|
||||
// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
|
||||
// LogLevel is nill, will default to LogOff comparison.
|
||||
// LogLevel is nil, will default to LogOff comparison.
|
||||
func (l *LogLevelType) Matches(v LogLevelType) bool {
|
||||
c := l.Value()
|
||||
return c&v == v
|
||||
}
|
||||
|
||||
// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
|
||||
// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default
|
||||
// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default
|
||||
// to LogOff comparison.
|
||||
func (l *LogLevelType) AtLeast(v LogLevelType) bool {
|
||||
c := l.Value()
|
||||
|
|
@ -71,6 +71,12 @@ const (
|
|||
// LogDebugWithRequestErrors states the SDK should log when service requests fail
|
||||
// to build, send, validate, or unmarshal.
|
||||
LogDebugWithRequestErrors
|
||||
|
||||
// LogDebugWithEventStreamBody states the SDK should log EventStream
|
||||
// request and response bodys. This should be used to log the EventStream
|
||||
// wire unmarshaled message content of requests and responses made while
|
||||
// using the SDK Will also enable LogDebug.
|
||||
LogDebugWithEventStreamBody
|
||||
)
|
||||
|
||||
// A Logger is a minimalistic interface for the SDK to log messages to. Should
|
||||
|
|
|
|||
18
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
|
|
@ -14,6 +14,7 @@ type Handlers struct {
|
|||
Send HandlerList
|
||||
ValidateResponse HandlerList
|
||||
Unmarshal HandlerList
|
||||
UnmarshalStream HandlerList
|
||||
UnmarshalMeta HandlerList
|
||||
UnmarshalError HandlerList
|
||||
Retry HandlerList
|
||||
|
|
@ -30,6 +31,7 @@ func (h *Handlers) Copy() Handlers {
|
|||
Send: h.Send.copy(),
|
||||
ValidateResponse: h.ValidateResponse.copy(),
|
||||
Unmarshal: h.Unmarshal.copy(),
|
||||
UnmarshalStream: h.UnmarshalStream.copy(),
|
||||
UnmarshalError: h.UnmarshalError.copy(),
|
||||
UnmarshalMeta: h.UnmarshalMeta.copy(),
|
||||
Retry: h.Retry.copy(),
|
||||
|
|
@ -45,6 +47,7 @@ func (h *Handlers) Clear() {
|
|||
h.Send.Clear()
|
||||
h.Sign.Clear()
|
||||
h.Unmarshal.Clear()
|
||||
h.UnmarshalStream.Clear()
|
||||
h.UnmarshalMeta.Clear()
|
||||
h.UnmarshalError.Clear()
|
||||
h.ValidateResponse.Clear()
|
||||
|
|
@ -172,6 +175,21 @@ func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) {
|
|||
return swapped
|
||||
}
|
||||
|
||||
// Swap will swap out all handlers matching the name passed in. The matched
|
||||
// handlers will be swapped in. True is returned if the handlers were swapped.
|
||||
func (l *HandlerList) Swap(name string, replace NamedHandler) bool {
|
||||
var swapped bool
|
||||
|
||||
for i := 0; i < len(l.list); i++ {
|
||||
if l.list[i].Name == name {
|
||||
l.list[i] = replace
|
||||
swapped = true
|
||||
}
|
||||
}
|
||||
|
||||
return swapped
|
||||
}
|
||||
|
||||
// SetBackNamed will replace the named handler if it exists in the handler list.
|
||||
// If the handler does not exist the handler will be added to the end of the list.
|
||||
func (l *HandlerList) SetBackNamed(n NamedHandler) {
|
||||
|
|
|
|||
4
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
generated
vendored
|
|
@ -3,6 +3,8 @@ package request
|
|||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
// offsetReader is a thread-safe io.ReadCloser to prevent racing
|
||||
|
|
@ -15,7 +17,7 @@ type offsetReader struct {
|
|||
|
||||
func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
|
||||
reader := &offsetReader{}
|
||||
buf.Seek(offset, 0)
|
||||
buf.Seek(offset, sdkio.SeekStart)
|
||||
|
||||
reader.buf = buf
|
||||
return reader
|
||||
|
|
|
|||
210
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
210
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -24,10 +25,14 @@ const (
|
|||
// ErrCodeRead is an error that is returned during HTTP reads.
|
||||
ErrCodeRead = "ReadError"
|
||||
|
||||
// ErrCodeResponseTimeout is the connection timeout error that is recieved
|
||||
// ErrCodeResponseTimeout is the connection timeout error that is received
|
||||
// during body reads.
|
||||
ErrCodeResponseTimeout = "ResponseTimeout"
|
||||
|
||||
// ErrCodeInvalidPresignExpire is returned when the expire time provided to
|
||||
// presign is invalid
|
||||
ErrCodeInvalidPresignExpire = "InvalidPresignExpireError"
|
||||
|
||||
// CanceledErrorCode is the error code that will be returned by an
|
||||
// API request that was canceled. Requests given a aws.Context may
|
||||
// return this error when canceled.
|
||||
|
|
@ -41,8 +46,8 @@ type Request struct {
|
|||
Handlers Handlers
|
||||
|
||||
Retryer
|
||||
AttemptTime time.Time
|
||||
Time time.Time
|
||||
ExpireTime time.Duration
|
||||
Operation *Operation
|
||||
HTTPRequest *http.Request
|
||||
HTTPResponse *http.Response
|
||||
|
|
@ -60,6 +65,11 @@ type Request struct {
|
|||
LastSignedAt time.Time
|
||||
DisableFollowRedirects bool
|
||||
|
||||
// A value greater than 0 instructs the request to be signed as Presigned URL
|
||||
// You should not set this field directly. Instead use Request's
|
||||
// Presign or PresignRequest methods.
|
||||
ExpireTime time.Duration
|
||||
|
||||
context aws.Context
|
||||
|
||||
built bool
|
||||
|
|
@ -104,12 +114,15 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
|
|||
err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
|
||||
}
|
||||
|
||||
SanitizeHostForHeader(httpReq)
|
||||
|
||||
r := &Request{
|
||||
Config: cfg,
|
||||
ClientInfo: clientInfo,
|
||||
Handlers: handlers.Copy(),
|
||||
|
||||
Retryer: retryer,
|
||||
AttemptTime: time.Now(),
|
||||
Time: time.Now(),
|
||||
ExpireTime: 0,
|
||||
Operation: operation,
|
||||
|
|
@ -214,6 +227,9 @@ func (r *Request) SetContext(ctx aws.Context) {
|
|||
|
||||
// WillRetry returns if the request's can be retried.
|
||||
func (r *Request) WillRetry() bool {
|
||||
if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody {
|
||||
return false
|
||||
}
|
||||
return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
|
||||
}
|
||||
|
||||
|
|
@ -245,39 +261,70 @@ func (r *Request) SetStringBody(s string) {
|
|||
// SetReaderBody will set the request's body reader.
|
||||
func (r *Request) SetReaderBody(reader io.ReadSeeker) {
|
||||
r.Body = reader
|
||||
r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset.
|
||||
r.ResetBody()
|
||||
}
|
||||
|
||||
// Presign returns the request's signed URL. Error will be returned
|
||||
// if the signing fails.
|
||||
func (r *Request) Presign(expireTime time.Duration) (string, error) {
|
||||
r.ExpireTime = expireTime
|
||||
//
|
||||
// It is invalid to create a presigned URL with a expire duration 0 or less. An
|
||||
// error is returned if expire duration is 0 or less.
|
||||
func (r *Request) Presign(expire time.Duration) (string, error) {
|
||||
r = r.copy()
|
||||
|
||||
// Presign requires all headers be hoisted. There is no way to retrieve
|
||||
// the signed headers not hoisted without this. Making the presigned URL
|
||||
// useless.
|
||||
r.NotHoist = false
|
||||
|
||||
u, _, err := getPresignedURL(r, expire)
|
||||
return u, err
|
||||
}
|
||||
|
||||
// PresignRequest behaves just like presign, with the addition of returning a
|
||||
// set of headers that were signed.
|
||||
//
|
||||
// It is invalid to create a presigned URL with a expire duration 0 or less. An
|
||||
// error is returned if expire duration is 0 or less.
|
||||
//
|
||||
// Returns the URL string for the API operation with signature in the query string,
|
||||
// and the HTTP headers that were included in the signature. These headers must
|
||||
// be included in any HTTP request made with the presigned URL.
|
||||
//
|
||||
// To prevent hoisting any headers to the query string set NotHoist to true on
|
||||
// this Request value prior to calling PresignRequest.
|
||||
func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) {
|
||||
r = r.copy()
|
||||
return getPresignedURL(r, expire)
|
||||
}
|
||||
|
||||
// IsPresigned returns true if the request represents a presigned API url.
|
||||
func (r *Request) IsPresigned() bool {
|
||||
return r.ExpireTime != 0
|
||||
}
|
||||
|
||||
func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) {
|
||||
if expire <= 0 {
|
||||
return "", nil, awserr.New(
|
||||
ErrCodeInvalidPresignExpire,
|
||||
"presigned URL requires an expire duration greater than 0",
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
r.ExpireTime = expire
|
||||
|
||||
if r.Operation.BeforePresignFn != nil {
|
||||
r = r.copy()
|
||||
err := r.Operation.BeforePresignFn(r)
|
||||
if err != nil {
|
||||
return "", err
|
||||
if err := r.Operation.BeforePresignFn(r); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
}
|
||||
|
||||
r.Sign()
|
||||
if r.Error != nil {
|
||||
return "", r.Error
|
||||
if err := r.Sign(); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return r.HTTPRequest.URL.String(), nil
|
||||
}
|
||||
|
||||
// PresignRequest behaves just like presign, but hoists all headers and signs them.
|
||||
// Also returns the signed hash back to the user
|
||||
func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
|
||||
r.ExpireTime = expireTime
|
||||
r.NotHoist = true
|
||||
r.Sign()
|
||||
if r.Error != nil {
|
||||
return "", nil, r.Error
|
||||
}
|
||||
return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
|
||||
}
|
||||
|
||||
|
|
@ -297,7 +344,7 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) {
|
|||
|
||||
// Build will build the request's object so it can be signed and sent
|
||||
// to the service. Build will also validate all the request's parameters.
|
||||
// Anny additional build Handlers set on this request will be run
|
||||
// Any additional build Handlers set on this request will be run
|
||||
// in the order they were set.
|
||||
//
|
||||
// The request will only be built once. Multiple calls to build will have
|
||||
|
|
@ -323,9 +370,9 @@ func (r *Request) Build() error {
|
|||
return r.Error
|
||||
}
|
||||
|
||||
// Sign will sign the request returning error if errors are encountered.
|
||||
// Sign will sign the request, returning error if errors are encountered.
|
||||
//
|
||||
// Send will build the request prior to signing. All Sign Handlers will
|
||||
// Sign will build the request prior to signing. All Sign Handlers will
|
||||
// be executed in the order they were set.
|
||||
func (r *Request) Sign() error {
|
||||
r.Build()
|
||||
|
|
@ -358,7 +405,7 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
|
|||
// of the SDK if they used that field.
|
||||
//
|
||||
// Related golang/go#18257
|
||||
l, err := computeBodyLength(r.Body)
|
||||
l, err := aws.SeekerLen(r.Body)
|
||||
if err != nil {
|
||||
return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
|
||||
}
|
||||
|
|
@ -376,7 +423,8 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
|
|||
// Transfer-Encoding: chunked bodies for these methods.
|
||||
//
|
||||
// This would only happen if a aws.ReaderSeekerCloser was used with
|
||||
// a io.Reader that was not also an io.Seeker.
|
||||
// a io.Reader that was not also an io.Seeker, or did not implement
|
||||
// Len() method.
|
||||
switch r.Operation.HTTPMethod {
|
||||
case "GET", "HEAD", "DELETE":
|
||||
body = NoBody
|
||||
|
|
@ -388,49 +436,13 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
|
|||
return body, nil
|
||||
}
|
||||
|
||||
// Attempts to compute the length of the body of the reader using the
|
||||
// io.Seeker interface. If the value is not seekable because of being
|
||||
// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned.
|
||||
// If no error occurs the length of the body will be returned.
|
||||
func computeBodyLength(r io.ReadSeeker) (int64, error) {
|
||||
seekable := true
|
||||
// Determine if the seeker is actually seekable. ReaderSeekerCloser
|
||||
// hides the fact that a io.Readers might not actually be seekable.
|
||||
switch v := r.(type) {
|
||||
case aws.ReaderSeekerCloser:
|
||||
seekable = v.IsSeeker()
|
||||
case *aws.ReaderSeekerCloser:
|
||||
seekable = v.IsSeeker()
|
||||
}
|
||||
if !seekable {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
curOffset, err := r.Seek(0, 1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
endOffset, err := r.Seek(0, 2)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
_, err = r.Seek(curOffset, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return endOffset - curOffset, nil
|
||||
}
|
||||
|
||||
// GetBody will return an io.ReadSeeker of the Request's underlying
|
||||
// input body with a concurrency safe wrapper.
|
||||
func (r *Request) GetBody() io.ReadSeeker {
|
||||
return r.safeBody
|
||||
}
|
||||
|
||||
// Send will send the request returning error if errors are encountered.
|
||||
// Send will send the request, returning error if errors are encountered.
|
||||
//
|
||||
// Send will sign the request prior to sending. All Send Handlers will
|
||||
// be executed in the order they were set.
|
||||
|
|
@ -451,6 +463,7 @@ func (r *Request) Send() error {
|
|||
}()
|
||||
|
||||
for {
|
||||
r.AttemptTime = time.Now()
|
||||
if aws.BoolValue(r.Retryable) {
|
||||
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
|
||||
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
|
||||
|
|
@ -573,3 +586,72 @@ func shouldRetryCancel(r *Request) bool {
|
|||
errStr != "net/http: request canceled while waiting for connection")
|
||||
|
||||
}
|
||||
|
||||
// SanitizeHostForHeader removes default port from host and updates request.Host
|
||||
func SanitizeHostForHeader(r *http.Request) {
|
||||
host := getHost(r)
|
||||
port := portOnly(host)
|
||||
if port != "" && isDefaultPort(r.URL.Scheme, port) {
|
||||
r.Host = stripPort(host)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns host from request
|
||||
func getHost(r *http.Request) string {
|
||||
if r.Host != "" {
|
||||
return r.Host
|
||||
}
|
||||
|
||||
return r.URL.Host
|
||||
}
|
||||
|
||||
// Hostname returns u.Host, without any port number.
|
||||
//
|
||||
// If Host is an IPv6 literal with a port number, Hostname returns the
|
||||
// IPv6 literal without the square brackets. IPv6 literals may include
|
||||
// a zone identifier.
|
||||
//
|
||||
// Copied from the Go 1.8 standard library (net/url)
|
||||
func stripPort(hostport string) string {
|
||||
colon := strings.IndexByte(hostport, ':')
|
||||
if colon == -1 {
|
||||
return hostport
|
||||
}
|
||||
if i := strings.IndexByte(hostport, ']'); i != -1 {
|
||||
return strings.TrimPrefix(hostport[:i], "[")
|
||||
}
|
||||
return hostport[:colon]
|
||||
}
|
||||
|
||||
// Port returns the port part of u.Host, without the leading colon.
|
||||
// If u.Host doesn't contain a port, Port returns an empty string.
|
||||
//
|
||||
// Copied from the Go 1.8 standard library (net/url)
|
||||
func portOnly(hostport string) string {
|
||||
colon := strings.IndexByte(hostport, ':')
|
||||
if colon == -1 {
|
||||
return ""
|
||||
}
|
||||
if i := strings.Index(hostport, "]:"); i != -1 {
|
||||
return hostport[i+len("]:"):]
|
||||
}
|
||||
if strings.Contains(hostport, "]") {
|
||||
return ""
|
||||
}
|
||||
return hostport[colon+len(":"):]
|
||||
}
|
||||
|
||||
// Returns true if the specified URI is using the standard port
|
||||
// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
|
||||
func isDefaultPort(scheme, port string) bool {
|
||||
if port == "" {
|
||||
return true
|
||||
}
|
||||
|
||||
lowerCaseScheme := strings.ToLower(scheme)
|
||||
if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
generated
vendored
|
|
@ -21,7 +21,7 @@ func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
|
|||
var NoBody = noBody{}
|
||||
|
||||
// ResetBody rewinds the request body back to its starting position, and
|
||||
// set's the HTTP Request body reference. When the body is read prior
|
||||
// sets the HTTP Request body reference. When the body is read prior
|
||||
// to being sent in the HTTP request it will need to be rewound.
|
||||
//
|
||||
// ResetBody will automatically be called by the SDK's build handler, but if
|
||||
|
|
|
|||
2
vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
generated
vendored
|
|
@ -11,7 +11,7 @@ import (
|
|||
var NoBody = http.NoBody
|
||||
|
||||
// ResetBody rewinds the request body back to its starting position, and
|
||||
// set's the HTTP Request body reference. When the body is read prior
|
||||
// sets the HTTP Request body reference. When the body is read prior
|
||||
// to being sent in the HTTP request it will need to be rewound.
|
||||
//
|
||||
// ResetBody will automatically be called by the SDK's build handler, but if
|
||||
|
|
|
|||
40
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
generated
vendored
40
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
generated
vendored
|
|
@ -35,8 +35,12 @@ type Pagination struct {
|
|||
// NewRequest should always be built from the same API operations. It is
|
||||
// undefined if different API operations are returned on subsequent calls.
|
||||
NewRequest func() (*Request, error)
|
||||
// EndPageOnSameToken, when enabled, will allow the paginator to stop on
|
||||
// token that are the same as its previous tokens.
|
||||
EndPageOnSameToken bool
|
||||
|
||||
started bool
|
||||
prevTokens []interface{}
|
||||
nextTokens []interface{}
|
||||
|
||||
err error
|
||||
|
|
@ -49,7 +53,15 @@ type Pagination struct {
|
|||
//
|
||||
// Will always return true if Next has not been called yet.
|
||||
func (p *Pagination) HasNextPage() bool {
|
||||
return !(p.started && len(p.nextTokens) == 0)
|
||||
if !p.started {
|
||||
return true
|
||||
}
|
||||
|
||||
hasNextPage := len(p.nextTokens) != 0
|
||||
if p.EndPageOnSameToken {
|
||||
return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens)
|
||||
}
|
||||
return hasNextPage
|
||||
}
|
||||
|
||||
// Err returns the error Pagination encountered when retrieving the next page.
|
||||
|
|
@ -96,6 +108,7 @@ func (p *Pagination) Next() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
p.prevTokens = p.nextTokens
|
||||
p.nextTokens = req.nextPageTokens()
|
||||
p.curPage = req.Data
|
||||
|
||||
|
|
@ -142,13 +155,28 @@ func (r *Request) nextPageTokens() []interface{} {
|
|||
tokens := []interface{}{}
|
||||
tokenAdded := false
|
||||
for _, outToken := range r.Operation.OutputTokens {
|
||||
v, _ := awsutil.ValuesAtPath(r.Data, outToken)
|
||||
if len(v) > 0 {
|
||||
tokens = append(tokens, v[0])
|
||||
tokenAdded = true
|
||||
} else {
|
||||
vs, _ := awsutil.ValuesAtPath(r.Data, outToken)
|
||||
if len(vs) == 0 {
|
||||
tokens = append(tokens, nil)
|
||||
continue
|
||||
}
|
||||
v := vs[0]
|
||||
|
||||
switch tv := v.(type) {
|
||||
case *string:
|
||||
if len(aws.StringValue(tv)) == 0 {
|
||||
tokens = append(tokens, nil)
|
||||
continue
|
||||
}
|
||||
case string:
|
||||
if len(tv) == 0 {
|
||||
tokens = append(tokens, nil)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
tokenAdded = true
|
||||
tokens = append(tokens, v)
|
||||
}
|
||||
if !tokenAdded {
|
||||
return nil
|
||||
|
|
|
|||
8
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
|
|
@ -8,7 +8,7 @@ import (
|
|||
)
|
||||
|
||||
// Retryer is an interface to control retry logic for a given service.
|
||||
// The default implementation used by most services is the service.DefaultRetryer
|
||||
// The default implementation used by most services is the client.DefaultRetryer
|
||||
// structure, which contains basic retry logic using exponential backoff.
|
||||
type Retryer interface {
|
||||
RetryRules(*Request) time.Duration
|
||||
|
|
@ -70,8 +70,8 @@ func isCodeExpiredCreds(code string) bool {
|
|||
}
|
||||
|
||||
var validParentCodes = map[string]struct{}{
|
||||
ErrCodeSerialization: struct{}{},
|
||||
ErrCodeRead: struct{}{},
|
||||
ErrCodeSerialization: {},
|
||||
ErrCodeRead: {},
|
||||
}
|
||||
|
||||
type temporaryError interface {
|
||||
|
|
@ -97,7 +97,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool {
|
|||
}
|
||||
|
||||
if t, ok := err.(temporaryError); ok {
|
||||
return t.Temporary()
|
||||
return t.Temporary() || isErrConnectionReset(err)
|
||||
}
|
||||
|
||||
return isErrConnectionReset(err)
|
||||
|
|
|
|||
2
vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
generated
vendored
|
|
@ -220,7 +220,7 @@ type ErrParamMinLen struct {
|
|||
func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
|
||||
return &ErrParamMinLen{
|
||||
errInvalidParam: errInvalidParam{
|
||||
code: ParamMinValueErrCode,
|
||||
code: ParamMinLenErrCode,
|
||||
field: field,
|
||||
msg: fmt.Sprintf("minimum field size of %v", min),
|
||||
},
|
||||
|
|
|
|||
16
vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
generated
vendored
16
vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
generated
vendored
|
|
@ -79,8 +79,9 @@ type Waiter struct {
|
|||
MaxAttempts int
|
||||
Delay WaiterDelay
|
||||
|
||||
RequestOptions []Option
|
||||
NewRequest func([]Option) (*Request, error)
|
||||
RequestOptions []Option
|
||||
NewRequest func([]Option) (*Request, error)
|
||||
SleepWithContext func(aws.Context, time.Duration) error
|
||||
}
|
||||
|
||||
// ApplyOptions updates the waiter with the list of waiter options provided.
|
||||
|
|
@ -195,8 +196,15 @@ func (w Waiter) WaitWithContext(ctx aws.Context) error {
|
|||
if sleepFn := req.Config.SleepDelay; sleepFn != nil {
|
||||
// Support SleepDelay for backwards compatibility and testing
|
||||
sleepFn(delay)
|
||||
} else if err := aws.SleepWithContext(ctx, delay); err != nil {
|
||||
return awserr.New(CanceledErrorCode, "waiter context canceled", err)
|
||||
} else {
|
||||
sleepCtxFn := w.SleepWithContext
|
||||
if sleepCtxFn == nil {
|
||||
sleepCtxFn = aws.SleepWithContext
|
||||
}
|
||||
|
||||
if err := sleepCtxFn(ctx, delay); err != nil {
|
||||
return awserr.New(CanceledErrorCode, "waiter context canceled", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
2
vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
generated
vendored
|
|
@ -128,7 +128,7 @@ read. The Session will be created from configuration values from the shared
|
|||
credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config).
|
||||
|
||||
Credentials are the values the SDK should use for authenticating requests with
|
||||
AWS Services. They arfrom a configuration file will need to include both
|
||||
AWS Services. They are from a configuration file will need to include both
|
||||
aws_access_key_id and aws_secret_access_key must be provided together in the
|
||||
same file to be considered valid. The values will be ignored if not a complete
|
||||
group. aws_session_token is an optional field that can be provided if both of
|
||||
|
|
|
|||
35
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
35
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
|
|
@ -5,8 +5,12 @@ import (
|
|||
"strconv"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
)
|
||||
|
||||
// EnvProviderName provides a name of the provider when config is loaded from environment.
|
||||
const EnvProviderName = "EnvConfigCredentials"
|
||||
|
||||
// envConfig is a collection of environment values the SDK will read
|
||||
// setup config from. All environment values are optional. But some values
|
||||
// such as credentials require multiple values to be complete or the values
|
||||
|
|
@ -76,7 +80,7 @@ type envConfig struct {
|
|||
SharedConfigFile string
|
||||
|
||||
// Sets the path to a custom Credentials Authroity (CA) Bundle PEM file
|
||||
// that the SDK will use instead of the the system's root CA bundle.
|
||||
// that the SDK will use instead of the system's root CA bundle.
|
||||
// Only use this if you want to configure the SDK to use a custom set
|
||||
// of CAs.
|
||||
//
|
||||
|
|
@ -92,9 +96,23 @@ type envConfig struct {
|
|||
//
|
||||
// AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
|
||||
CustomCABundle string
|
||||
|
||||
csmEnabled string
|
||||
CSMEnabled bool
|
||||
CSMPort string
|
||||
CSMClientID string
|
||||
}
|
||||
|
||||
var (
|
||||
csmEnabledEnvKey = []string{
|
||||
"AWS_CSM_ENABLED",
|
||||
}
|
||||
csmPortEnvKey = []string{
|
||||
"AWS_CSM_PORT",
|
||||
}
|
||||
csmClientIDEnvKey = []string{
|
||||
"AWS_CSM_CLIENT_ID",
|
||||
}
|
||||
credAccessEnvKey = []string{
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_ACCESS_KEY",
|
||||
|
|
@ -153,11 +171,17 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
|
|||
setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey)
|
||||
setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey)
|
||||
|
||||
// CSM environment variables
|
||||
setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey)
|
||||
setFromEnvVal(&cfg.CSMPort, csmPortEnvKey)
|
||||
setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey)
|
||||
cfg.CSMEnabled = len(cfg.csmEnabled) > 0
|
||||
|
||||
// Require logical grouping of credentials
|
||||
if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
|
||||
cfg.Creds = credentials.Value{}
|
||||
} else {
|
||||
cfg.Creds.ProviderName = "EnvConfigCredentials"
|
||||
cfg.Creds.ProviderName = EnvProviderName
|
||||
}
|
||||
|
||||
regionKeys := regionEnvKeys
|
||||
|
|
@ -173,6 +197,13 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
|
|||
setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey)
|
||||
setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey)
|
||||
|
||||
if len(cfg.SharedCredentialsFile) == 0 {
|
||||
cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename()
|
||||
}
|
||||
if len(cfg.SharedConfigFile) == 0 {
|
||||
cfg.SharedConfigFile = defaults.SharedConfigFilename()
|
||||
}
|
||||
|
||||
cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
|
||||
|
||||
return cfg
|
||||
|
|
|
|||
64
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
64
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/csm"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
|
|
@ -26,7 +27,7 @@ import (
|
|||
// Sessions are safe to create service clients concurrently, but it is not safe
|
||||
// to mutate the Session concurrently.
|
||||
//
|
||||
// The Session satisfies the service client's client.ClientConfigProvider.
|
||||
// The Session satisfies the service client's client.ConfigProvider.
|
||||
type Session struct {
|
||||
Config *aws.Config
|
||||
Handlers request.Handlers
|
||||
|
|
@ -58,7 +59,12 @@ func New(cfgs ...*aws.Config) *Session {
|
|||
envCfg := loadEnvConfig()
|
||||
|
||||
if envCfg.EnableSharedConfig {
|
||||
s, err := newSession(Options{}, envCfg, cfgs...)
|
||||
var cfg aws.Config
|
||||
cfg.MergeIn(cfgs...)
|
||||
s, err := NewSessionWithOptions(Options{
|
||||
Config: cfg,
|
||||
SharedConfigState: SharedConfigEnable,
|
||||
})
|
||||
if err != nil {
|
||||
// Old session.New expected all errors to be discovered when
|
||||
// a request is made, and would report the errors then. This
|
||||
|
|
@ -76,10 +82,16 @@ func New(cfgs ...*aws.Config) *Session {
|
|||
r.Error = err
|
||||
})
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
return deprecatedNewSession(cfgs...)
|
||||
s := deprecatedNewSession(cfgs...)
|
||||
if envCfg.CSMEnabled {
|
||||
enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// NewSession returns a new Session created from SDK defaults, config files,
|
||||
|
|
@ -243,13 +255,6 @@ func NewSessionWithOptions(opts Options) (*Session, error) {
|
|||
envCfg.EnableSharedConfig = true
|
||||
}
|
||||
|
||||
if len(envCfg.SharedCredentialsFile) == 0 {
|
||||
envCfg.SharedCredentialsFile = defaults.SharedCredentialsFilename()
|
||||
}
|
||||
if len(envCfg.SharedConfigFile) == 0 {
|
||||
envCfg.SharedConfigFile = defaults.SharedConfigFilename()
|
||||
}
|
||||
|
||||
// Only use AWS_CA_BUNDLE if session option is not provided.
|
||||
if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
|
||||
f, err := os.Open(envCfg.CustomCABundle)
|
||||
|
|
@ -302,10 +307,22 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session {
|
|||
}
|
||||
|
||||
initHandlers(s)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) {
|
||||
logger.Log("Enabling CSM")
|
||||
if len(port) == 0 {
|
||||
port = csm.DefaultPort
|
||||
}
|
||||
|
||||
r, err := csm.Start(clientID, "127.0.0.1:"+port)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
r.InjectHandlers(handlers)
|
||||
}
|
||||
|
||||
func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
|
||||
cfg := defaults.Config()
|
||||
handlers := defaults.Handlers()
|
||||
|
|
@ -345,6 +362,9 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
|
|||
}
|
||||
|
||||
initHandlers(s)
|
||||
if envCfg.CSMEnabled {
|
||||
enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
|
||||
}
|
||||
|
||||
// Setup HTTP client with custom cert bundle if enabled
|
||||
if opts.CustomCABundle != nil {
|
||||
|
|
@ -573,11 +593,12 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (
|
|||
}
|
||||
|
||||
return client.Config{
|
||||
Config: s.Config,
|
||||
Handlers: s.Handlers,
|
||||
Endpoint: resolved.URL,
|
||||
SigningRegion: resolved.SigningRegion,
|
||||
SigningName: resolved.SigningName,
|
||||
Config: s.Config,
|
||||
Handlers: s.Handlers,
|
||||
Endpoint: resolved.URL,
|
||||
SigningRegion: resolved.SigningRegion,
|
||||
SigningNameDerived: resolved.SigningNameDerived,
|
||||
SigningName: resolved.SigningName,
|
||||
}, err
|
||||
}
|
||||
|
||||
|
|
@ -597,10 +618,11 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf
|
|||
}
|
||||
|
||||
return client.Config{
|
||||
Config: s.Config,
|
||||
Handlers: s.Handlers,
|
||||
Endpoint: resolved.URL,
|
||||
SigningRegion: resolved.SigningRegion,
|
||||
SigningName: resolved.SigningName,
|
||||
Config: s.Config,
|
||||
Handlers: s.Handlers,
|
||||
Endpoint: resolved.URL,
|
||||
SigningRegion: resolved.SigningRegion,
|
||||
SigningNameDerived: resolved.SigningNameDerived,
|
||||
SigningName: resolved.SigningName,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
173
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
173
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
|
|
@ -45,7 +45,7 @@
|
|||
// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
|
||||
// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
|
||||
// request URL. https://github.com/golang/go/issues/16847 points to a bug in
|
||||
// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP
|
||||
// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP
|
||||
// message. URL.Opaque generally will force Go to make requests with absolute URL.
|
||||
// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
|
||||
// or url.EscapedPath will ignore the RawPath escaping.
|
||||
|
|
@ -55,7 +55,6 @@
|
|||
package v4
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
|
|
@ -72,6 +71,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
|
|
@ -98,25 +98,25 @@ var ignoredHeaders = rules{
|
|||
var requiredSignedHeaders = rules{
|
||||
whitelist{
|
||||
mapRule{
|
||||
"Cache-Control": struct{}{},
|
||||
"Content-Disposition": struct{}{},
|
||||
"Content-Encoding": struct{}{},
|
||||
"Content-Language": struct{}{},
|
||||
"Content-Md5": struct{}{},
|
||||
"Content-Type": struct{}{},
|
||||
"Expires": struct{}{},
|
||||
"If-Match": struct{}{},
|
||||
"If-Modified-Since": struct{}{},
|
||||
"If-None-Match": struct{}{},
|
||||
"If-Unmodified-Since": struct{}{},
|
||||
"Range": struct{}{},
|
||||
"X-Amz-Acl": struct{}{},
|
||||
"X-Amz-Copy-Source": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Match": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
||||
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
||||
"X-Amz-Copy-Source-Range": struct{}{},
|
||||
"Cache-Control": struct{}{},
|
||||
"Content-Disposition": struct{}{},
|
||||
"Content-Encoding": struct{}{},
|
||||
"Content-Language": struct{}{},
|
||||
"Content-Md5": struct{}{},
|
||||
"Content-Type": struct{}{},
|
||||
"Expires": struct{}{},
|
||||
"If-Match": struct{}{},
|
||||
"If-Modified-Since": struct{}{},
|
||||
"If-None-Match": struct{}{},
|
||||
"If-Unmodified-Since": struct{}{},
|
||||
"Range": struct{}{},
|
||||
"X-Amz-Acl": struct{}{},
|
||||
"X-Amz-Copy-Source": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Match": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
||||
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
||||
"X-Amz-Copy-Source-Range": struct{}{},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||
|
|
@ -135,6 +135,7 @@ var requiredSignedHeaders = rules{
|
|||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||
"X-Amz-Storage-Class": struct{}{},
|
||||
"X-Amz-Website-Redirect-Location": struct{}{},
|
||||
"X-Amz-Content-Sha256": struct{}{},
|
||||
},
|
||||
},
|
||||
patterns{"X-Amz-Meta-"},
|
||||
|
|
@ -269,7 +270,7 @@ type signingCtx struct {
|
|||
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
|
||||
// only compute the hash if the request header value is empty.
|
||||
func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
|
||||
return v4.signWithBody(r, body, service, region, 0, signTime)
|
||||
return v4.signWithBody(r, body, service, region, 0, false, signTime)
|
||||
}
|
||||
|
||||
// Presign signs AWS v4 requests with the provided body, service name, region
|
||||
|
|
@ -303,10 +304,10 @@ func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region strin
|
|||
// presigned request's signature you can set the "X-Amz-Content-Sha256"
|
||||
// HTTP header and that will be included in the request's signature.
|
||||
func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
|
||||
return v4.signWithBody(r, body, service, region, exp, signTime)
|
||||
return v4.signWithBody(r, body, service, region, exp, true, signTime)
|
||||
}
|
||||
|
||||
func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
|
||||
func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) {
|
||||
currentTimeFn := v4.currentTimeFn
|
||||
if currentTimeFn == nil {
|
||||
currentTimeFn = time.Now
|
||||
|
|
@ -318,7 +319,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi
|
|||
Query: r.URL.Query(),
|
||||
Time: signTime,
|
||||
ExpireTime: exp,
|
||||
isPresign: exp != 0,
|
||||
isPresign: isPresign,
|
||||
ServiceName: service,
|
||||
Region: region,
|
||||
DisableURIPathEscaping: v4.DisableURIPathEscaping,
|
||||
|
|
@ -340,8 +341,11 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi
|
|||
return http.Header{}, err
|
||||
}
|
||||
|
||||
ctx.sanitizeHostForHeader()
|
||||
ctx.assignAmzQueryValues()
|
||||
ctx.build(v4.DisableHeaderHoisting)
|
||||
if err := ctx.build(v4.DisableHeaderHoisting); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the request is not presigned the body should be attached to it. This
|
||||
// prevents the confusion of wanting to send a signed request without
|
||||
|
|
@ -364,6 +368,10 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi
|
|||
return ctx.SignedHeaderVals, nil
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) sanitizeHostForHeader() {
|
||||
request.SanitizeHostForHeader(ctx.Request)
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) handlePresignRemoval() {
|
||||
if !ctx.isPresign {
|
||||
return
|
||||
|
|
@ -402,7 +410,7 @@ var SignRequestHandler = request.NamedHandler{
|
|||
}
|
||||
|
||||
// SignSDKRequest signs an AWS request with the V4 signature. This
|
||||
// request handler is bested used only with the SDK's built in service client's
|
||||
// request handler should only be used with the SDK's built in service client's
|
||||
// API operation requests.
|
||||
//
|
||||
// This function should not be used on its on its own, but in conjunction with
|
||||
|
|
@ -468,7 +476,7 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time
|
|||
}
|
||||
|
||||
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
|
||||
name, region, req.ExpireTime, signingTime,
|
||||
name, region, req.ExpireTime, req.ExpireTime > 0, signingTime,
|
||||
)
|
||||
if err != nil {
|
||||
req.Error = err
|
||||
|
|
@ -499,10 +507,14 @@ func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
|
|||
v4.Logger.Log(msg)
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) build(disableHeaderHoisting bool) {
|
||||
func (ctx *signingCtx) build(disableHeaderHoisting bool) error {
|
||||
ctx.buildTime() // no depends
|
||||
ctx.buildCredentialString() // no depends
|
||||
|
||||
if err := ctx.buildBodyDigest(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unsignedHeaders := ctx.Request.Header
|
||||
if ctx.isPresign {
|
||||
if !disableHeaderHoisting {
|
||||
|
|
@ -514,7 +526,6 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) {
|
|||
}
|
||||
}
|
||||
|
||||
ctx.buildBodyDigest()
|
||||
ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
|
||||
ctx.buildCanonicalString() // depends on canon headers / signed headers
|
||||
ctx.buildStringToSign() // depends on canon string
|
||||
|
|
@ -530,6 +541,8 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) {
|
|||
}
|
||||
ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) buildTime() {
|
||||
|
|
@ -604,14 +617,18 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
|
|||
headerValues := make([]string, len(headers))
|
||||
for i, k := range headers {
|
||||
if k == "host" {
|
||||
headerValues[i] = "host:" + ctx.Request.URL.Host
|
||||
if ctx.Request.Host != "" {
|
||||
headerValues[i] = "host:" + ctx.Request.Host
|
||||
} else {
|
||||
headerValues[i] = "host:" + ctx.Request.URL.Host
|
||||
}
|
||||
} else {
|
||||
headerValues[i] = k + ":" +
|
||||
strings.Join(ctx.SignedHeaderVals[k], ",")
|
||||
}
|
||||
}
|
||||
|
||||
ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n")
|
||||
stripExcessSpaces(headerValues)
|
||||
ctx.canonicalHeaders = strings.Join(headerValues, "\n")
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) buildCanonicalString() {
|
||||
|
|
@ -652,21 +669,34 @@ func (ctx *signingCtx) buildSignature() {
|
|||
ctx.signature = hex.EncodeToString(signature)
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) buildBodyDigest() {
|
||||
func (ctx *signingCtx) buildBodyDigest() error {
|
||||
hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
|
||||
if hash == "" {
|
||||
if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") {
|
||||
includeSHA256Header := ctx.unsignedPayload ||
|
||||
ctx.ServiceName == "s3" ||
|
||||
ctx.ServiceName == "glacier"
|
||||
|
||||
s3Presign := ctx.isPresign && ctx.ServiceName == "s3"
|
||||
|
||||
if ctx.unsignedPayload || s3Presign {
|
||||
hash = "UNSIGNED-PAYLOAD"
|
||||
includeSHA256Header = !s3Presign
|
||||
} else if ctx.Body == nil {
|
||||
hash = emptyStringSHA256
|
||||
} else {
|
||||
if !aws.IsReaderSeekable(ctx.Body) {
|
||||
return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
|
||||
}
|
||||
hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
|
||||
}
|
||||
if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" {
|
||||
|
||||
if includeSHA256Header {
|
||||
ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
|
||||
}
|
||||
}
|
||||
ctx.bodyDigest = hash
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isRequestSigned returns if the request is currently signed or presigned
|
||||
|
|
@ -706,56 +736,53 @@ func makeSha256(data []byte) []byte {
|
|||
|
||||
func makeSha256Reader(reader io.ReadSeeker) []byte {
|
||||
hash := sha256.New()
|
||||
start, _ := reader.Seek(0, 1)
|
||||
defer reader.Seek(start, 0)
|
||||
start, _ := reader.Seek(0, sdkio.SeekCurrent)
|
||||
defer reader.Seek(start, sdkio.SeekStart)
|
||||
|
||||
io.Copy(hash, reader)
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
const doubleSpaces = " "
|
||||
const doubleSpace = " "
|
||||
|
||||
var doubleSpaceBytes = []byte(doubleSpaces)
|
||||
// stripExcessSpaces will rewrite the passed in slice's string values to not
|
||||
// contain muliple side-by-side spaces.
|
||||
func stripExcessSpaces(vals []string) {
|
||||
var j, k, l, m, spaces int
|
||||
for i, str := range vals {
|
||||
// Trim trailing spaces
|
||||
for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
|
||||
}
|
||||
|
||||
func stripExcessSpaces(headerVals []string) []string {
|
||||
vals := make([]string, len(headerVals))
|
||||
for i, str := range headerVals {
|
||||
// Trim leading and trailing spaces
|
||||
trimmed := strings.TrimSpace(str)
|
||||
// Trim leading spaces
|
||||
for k = 0; k < j && str[k] == ' '; k++ {
|
||||
}
|
||||
str = str[k : j+1]
|
||||
|
||||
idx := strings.Index(trimmed, doubleSpaces)
|
||||
var buf []byte
|
||||
for idx > -1 {
|
||||
// Multiple adjacent spaces found
|
||||
if buf == nil {
|
||||
// first time create the buffer
|
||||
buf = []byte(trimmed)
|
||||
}
|
||||
// Strip multiple spaces.
|
||||
j = strings.Index(str, doubleSpace)
|
||||
if j < 0 {
|
||||
vals[i] = str
|
||||
continue
|
||||
}
|
||||
|
||||
stripToIdx := -1
|
||||
for j := idx + 1; j < len(buf); j++ {
|
||||
if buf[j] != ' ' {
|
||||
buf = append(buf[:idx+1], buf[j:]...)
|
||||
stripToIdx = j
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if stripToIdx >= 0 {
|
||||
idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes)
|
||||
if idx >= 0 {
|
||||
idx += stripToIdx
|
||||
buf := []byte(str)
|
||||
for k, m, l = j, j, len(buf); k < l; k++ {
|
||||
if buf[k] == ' ' {
|
||||
if spaces == 0 {
|
||||
// First space.
|
||||
buf[m] = buf[k]
|
||||
m++
|
||||
}
|
||||
spaces++
|
||||
} else {
|
||||
idx = -1
|
||||
// End of multiple spaces.
|
||||
spaces = 0
|
||||
buf[m] = buf[k]
|
||||
m++
|
||||
}
|
||||
}
|
||||
|
||||
if buf != nil {
|
||||
vals[i] = string(buf)
|
||||
} else {
|
||||
vals[i] = trimmed
|
||||
}
|
||||
vals[i] = string(buf[:m])
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
|
|
|||
83
vendor/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
83
vendor/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
|
|
@ -3,6 +3,8 @@ package aws
|
|||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should
|
||||
|
|
@ -22,6 +24,22 @@ type ReaderSeekerCloser struct {
|
|||
r io.Reader
|
||||
}
|
||||
|
||||
// IsReaderSeekable returns if the underlying reader type can be seeked. A
|
||||
// io.Reader might not actually be seekable if it is the ReaderSeekerCloser
|
||||
// type.
|
||||
func IsReaderSeekable(r io.Reader) bool {
|
||||
switch v := r.(type) {
|
||||
case ReaderSeekerCloser:
|
||||
return v.IsSeeker()
|
||||
case *ReaderSeekerCloser:
|
||||
return v.IsSeeker()
|
||||
case io.ReadSeeker:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Read reads from the reader up to size of p. The number of bytes read, and
|
||||
// error if it occurred will be returned.
|
||||
//
|
||||
|
|
@ -56,6 +74,71 @@ func (r ReaderSeekerCloser) IsSeeker() bool {
|
|||
return ok
|
||||
}
|
||||
|
||||
// HasLen returns the length of the underlying reader if the value implements
|
||||
// the Len() int method.
|
||||
func (r ReaderSeekerCloser) HasLen() (int, bool) {
|
||||
type lenner interface {
|
||||
Len() int
|
||||
}
|
||||
|
||||
if lr, ok := r.r.(lenner); ok {
|
||||
return lr.Len(), true
|
||||
}
|
||||
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// GetLen returns the length of the bytes remaining in the underlying reader.
|
||||
// Checks first for Len(), then io.Seeker to determine the size of the
|
||||
// underlying reader.
|
||||
//
|
||||
// Will return -1 if the length cannot be determined.
|
||||
func (r ReaderSeekerCloser) GetLen() (int64, error) {
|
||||
if l, ok := r.HasLen(); ok {
|
||||
return int64(l), nil
|
||||
}
|
||||
|
||||
if s, ok := r.r.(io.Seeker); ok {
|
||||
return seekerLen(s)
|
||||
}
|
||||
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// SeekerLen attempts to get the number of bytes remaining at the seeker's
|
||||
// current position. Returns the number of bytes remaining or error.
|
||||
func SeekerLen(s io.Seeker) (int64, error) {
|
||||
// Determine if the seeker is actually seekable. ReaderSeekerCloser
|
||||
// hides the fact that a io.Readers might not actually be seekable.
|
||||
switch v := s.(type) {
|
||||
case ReaderSeekerCloser:
|
||||
return v.GetLen()
|
||||
case *ReaderSeekerCloser:
|
||||
return v.GetLen()
|
||||
}
|
||||
|
||||
return seekerLen(s)
|
||||
}
|
||||
|
||||
func seekerLen(s io.Seeker) (int64, error) {
|
||||
curOffset, err := s.Seek(0, sdkio.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
endOffset, err := s.Seek(0, sdkio.SeekEnd)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
_, err = s.Seek(curOffset, sdkio.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return endOffset - curOffset, nil
|
||||
}
|
||||
|
||||
// Close closes the ReaderSeekerCloser.
|
||||
//
|
||||
// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
|
||||
|
|
|
|||
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.8.34"
|
||||
const SDKVersion = "1.14.31"
|
||||
|
|
|
|||
10
vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
generated
vendored
Normal file
10
vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
generated
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
// +build !go1.7
|
||||
|
||||
package sdkio
|
||||
|
||||
// Copy of Go 1.7 io package's Seeker constants.
|
||||
const (
|
||||
SeekStart = 0 // seek relative to the origin of the file
|
||||
SeekCurrent = 1 // seek relative to the current offset
|
||||
SeekEnd = 2 // seek relative to the end
|
||||
)
|
||||
12
vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
generated
vendored
Normal file
12
vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
generated
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
// +build go1.7
|
||||
|
||||
package sdkio
|
||||
|
||||
import "io"
|
||||
|
||||
// Alias for Go 1.7 io package Seeker constants
|
||||
const (
|
||||
SeekStart = io.SeekStart // seek relative to the origin of the file
|
||||
SeekCurrent = io.SeekCurrent // seek relative to the current offset
|
||||
SeekEnd = io.SeekEnd // seek relative to the end
|
||||
)
|
||||
29
vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
generated
vendored
Normal file
29
vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
generated
vendored
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
package sdkrand
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// lockedSource is a thread-safe implementation of rand.Source
|
||||
type lockedSource struct {
|
||||
lk sync.Mutex
|
||||
src rand.Source
|
||||
}
|
||||
|
||||
func (r *lockedSource) Int63() (n int64) {
|
||||
r.lk.Lock()
|
||||
n = r.src.Int63()
|
||||
r.lk.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *lockedSource) Seed(seed int64) {
|
||||
r.lk.Lock()
|
||||
r.src.Seed(seed)
|
||||
r.lk.Unlock()
|
||||
}
|
||||
|
||||
// SeededRand is a new RNG using a thread safe implementation of rand.Source
|
||||
var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
|
||||
23
vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
generated
vendored
Normal file
23
vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
generated
vendored
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
package sdkuri
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// PathJoin will join the elements of the path delimited by the "/"
|
||||
// character. Similar to path.Join with the exception the trailing "/"
|
||||
// character is preserved if present.
|
||||
func PathJoin(elems ...string) string {
|
||||
if len(elems) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/")
|
||||
str := path.Join(elems...)
|
||||
if hasTrailing && str != "/" {
|
||||
str += "/"
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
||||
144
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go
generated
vendored
Normal file
144
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go
generated
vendored
Normal file
|
|
@ -0,0 +1,144 @@
|
|||
package eventstream
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type decodedMessage struct {
|
||||
rawMessage
|
||||
Headers decodedHeaders `json:"headers"`
|
||||
}
|
||||
type jsonMessage struct {
|
||||
Length json.Number `json:"total_length"`
|
||||
HeadersLen json.Number `json:"headers_length"`
|
||||
PreludeCRC json.Number `json:"prelude_crc"`
|
||||
Headers decodedHeaders `json:"headers"`
|
||||
Payload []byte `json:"payload"`
|
||||
CRC json.Number `json:"message_crc"`
|
||||
}
|
||||
|
||||
func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) {
|
||||
var jsonMsg jsonMessage
|
||||
if err = json.Unmarshal(b, &jsonMsg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Length, err = numAsUint32(jsonMsg.Length)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Headers = jsonMsg.Headers
|
||||
d.Payload = jsonMsg.Payload
|
||||
d.CRC, err = numAsUint32(jsonMsg.CRC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decodedMessage) MarshalJSON() ([]byte, error) {
|
||||
jsonMsg := jsonMessage{
|
||||
Length: json.Number(strconv.Itoa(int(d.Length))),
|
||||
HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))),
|
||||
PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))),
|
||||
Headers: d.Headers,
|
||||
Payload: d.Payload,
|
||||
CRC: json.Number(strconv.Itoa(int(d.CRC))),
|
||||
}
|
||||
|
||||
return json.Marshal(jsonMsg)
|
||||
}
|
||||
|
||||
func numAsUint32(n json.Number) (uint32, error) {
|
||||
v, err := n.Int64()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get int64 json number, %v", err)
|
||||
}
|
||||
|
||||
return uint32(v), nil
|
||||
}
|
||||
|
||||
func (d decodedMessage) Message() Message {
|
||||
return Message{
|
||||
Headers: Headers(d.Headers),
|
||||
Payload: d.Payload,
|
||||
}
|
||||
}
|
||||
|
||||
type decodedHeaders Headers
|
||||
|
||||
func (hs *decodedHeaders) UnmarshalJSON(b []byte) error {
|
||||
var jsonHeaders []struct {
|
||||
Name string `json:"name"`
|
||||
Type valueType `json:"type"`
|
||||
Value interface{} `json:"value"`
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(bytes.NewReader(b))
|
||||
decoder.UseNumber()
|
||||
if err := decoder.Decode(&jsonHeaders); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var headers Headers
|
||||
for _, h := range jsonHeaders {
|
||||
value, err := valueFromType(h.Type, h.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headers.Set(h.Name, value)
|
||||
}
|
||||
(*hs) = decodedHeaders(headers)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func valueFromType(typ valueType, val interface{}) (Value, error) {
|
||||
switch typ {
|
||||
case trueValueType:
|
||||
return BoolValue(true), nil
|
||||
case falseValueType:
|
||||
return BoolValue(false), nil
|
||||
case int8ValueType:
|
||||
v, err := val.(json.Number).Int64()
|
||||
return Int8Value(int8(v)), err
|
||||
case int16ValueType:
|
||||
v, err := val.(json.Number).Int64()
|
||||
return Int16Value(int16(v)), err
|
||||
case int32ValueType:
|
||||
v, err := val.(json.Number).Int64()
|
||||
return Int32Value(int32(v)), err
|
||||
case int64ValueType:
|
||||
v, err := val.(json.Number).Int64()
|
||||
return Int64Value(v), err
|
||||
case bytesValueType:
|
||||
v, err := base64.StdEncoding.DecodeString(val.(string))
|
||||
return BytesValue(v), err
|
||||
case stringValueType:
|
||||
v, err := base64.StdEncoding.DecodeString(val.(string))
|
||||
return StringValue(string(v)), err
|
||||
case timestampValueType:
|
||||
v, err := val.(json.Number).Int64()
|
||||
return TimestampValue(timeFromEpochMilli(v)), err
|
||||
case uuidValueType:
|
||||
v, err := base64.StdEncoding.DecodeString(val.(string))
|
||||
var tv UUIDValue
|
||||
copy(tv[:], v)
|
||||
return tv, err
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val))
|
||||
}
|
||||
}
|
||||
199
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go
generated
vendored
Normal file
199
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go
generated
vendored
Normal file
|
|
@ -0,0 +1,199 @@
|
|||
package eventstream
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
)
|
||||
|
||||
// Decoder provides decoding of an Event Stream messages.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
logger aws.Logger
|
||||
}
|
||||
|
||||
// NewDecoder initializes and returns a Decoder for decoding event
|
||||
// stream messages from the reader provided.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
// Decode attempts to decode a single message from the event stream reader.
|
||||
// Will return the event stream message, or error if Decode fails to read
|
||||
// the message from the stream.
|
||||
func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) {
|
||||
reader := d.r
|
||||
if d.logger != nil {
|
||||
debugMsgBuf := bytes.NewBuffer(nil)
|
||||
reader = io.TeeReader(reader, debugMsgBuf)
|
||||
defer func() {
|
||||
logMessageDecode(d.logger, debugMsgBuf, m, err)
|
||||
}()
|
||||
}
|
||||
|
||||
crc := crc32.New(crc32IEEETable)
|
||||
hashReader := io.TeeReader(reader, crc)
|
||||
|
||||
prelude, err := decodePrelude(hashReader, crc)
|
||||
if err != nil {
|
||||
return Message{}, err
|
||||
}
|
||||
|
||||
if prelude.HeadersLen > 0 {
|
||||
lr := io.LimitReader(hashReader, int64(prelude.HeadersLen))
|
||||
m.Headers, err = decodeHeaders(lr)
|
||||
if err != nil {
|
||||
return Message{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if payloadLen := prelude.PayloadLen(); payloadLen > 0 {
|
||||
buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen)))
|
||||
if err != nil {
|
||||
return Message{}, err
|
||||
}
|
||||
m.Payload = buf
|
||||
}
|
||||
|
||||
msgCRC := crc.Sum32()
|
||||
if err := validateCRC(reader, msgCRC); err != nil {
|
||||
return Message{}, err
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// UseLogger specifies the Logger that that the decoder should use to log the
|
||||
// message decode to.
|
||||
func (d *Decoder) UseLogger(logger aws.Logger) {
|
||||
d.logger = logger
|
||||
}
|
||||
|
||||
func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) {
|
||||
w := bytes.NewBuffer(nil)
|
||||
defer func() { logger.Log(w.String()) }()
|
||||
|
||||
fmt.Fprintf(w, "Raw message:\n%s\n",
|
||||
hex.Dump(msgBuf.Bytes()))
|
||||
|
||||
if decodeErr != nil {
|
||||
fmt.Fprintf(w, "Decode error: %v\n", decodeErr)
|
||||
return
|
||||
}
|
||||
|
||||
rawMsg, err := msg.rawMessage()
|
||||
if err != nil {
|
||||
fmt.Fprintf(w, "failed to create raw message, %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
decodedMsg := decodedMessage{
|
||||
rawMessage: rawMsg,
|
||||
Headers: decodedHeaders(msg.Headers),
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "Decoded message:\n")
|
||||
encoder := json.NewEncoder(w)
|
||||
if err := encoder.Encode(decodedMsg); err != nil {
|
||||
fmt.Fprintf(w, "failed to generate decoded message, %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) {
|
||||
var p messagePrelude
|
||||
|
||||
var err error
|
||||
p.Length, err = decodeUint32(r)
|
||||
if err != nil {
|
||||
return messagePrelude{}, err
|
||||
}
|
||||
|
||||
p.HeadersLen, err = decodeUint32(r)
|
||||
if err != nil {
|
||||
return messagePrelude{}, err
|
||||
}
|
||||
|
||||
if err := p.ValidateLens(); err != nil {
|
||||
return messagePrelude{}, err
|
||||
}
|
||||
|
||||
preludeCRC := crc.Sum32()
|
||||
if err := validateCRC(r, preludeCRC); err != nil {
|
||||
return messagePrelude{}, err
|
||||
}
|
||||
|
||||
p.PreludeCRC = preludeCRC
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func decodePayload(buf []byte, r io.Reader) ([]byte, error) {
|
||||
w := bytes.NewBuffer(buf[0:0])
|
||||
|
||||
_, err := io.Copy(w, r)
|
||||
return w.Bytes(), err
|
||||
}
|
||||
|
||||
func decodeUint8(r io.Reader) (uint8, error) {
|
||||
type byteReader interface {
|
||||
ReadByte() (byte, error)
|
||||
}
|
||||
|
||||
if br, ok := r.(byteReader); ok {
|
||||
v, err := br.ReadByte()
|
||||
return uint8(v), err
|
||||
}
|
||||
|
||||
var b [1]byte
|
||||
_, err := io.ReadFull(r, b[:])
|
||||
return uint8(b[0]), err
|
||||
}
|
||||
func decodeUint16(r io.Reader) (uint16, error) {
|
||||
var b [2]byte
|
||||
bs := b[:]
|
||||
_, err := io.ReadFull(r, bs)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return binary.BigEndian.Uint16(bs), nil
|
||||
}
|
||||
func decodeUint32(r io.Reader) (uint32, error) {
|
||||
var b [4]byte
|
||||
bs := b[:]
|
||||
_, err := io.ReadFull(r, bs)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return binary.BigEndian.Uint32(bs), nil
|
||||
}
|
||||
func decodeUint64(r io.Reader) (uint64, error) {
|
||||
var b [8]byte
|
||||
bs := b[:]
|
||||
_, err := io.ReadFull(r, bs)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return binary.BigEndian.Uint64(bs), nil
|
||||
}
|
||||
|
||||
func validateCRC(r io.Reader, expect uint32) error {
|
||||
msgCRC, err := decodeUint32(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if msgCRC != expect {
|
||||
return ChecksumError{}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
114
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go
generated
vendored
Normal file
114
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go
generated
vendored
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
package eventstream
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Encoder provides EventStream message encoding.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
|
||||
headersBuf *bytes.Buffer
|
||||
}
|
||||
|
||||
// NewEncoder initializes and returns an Encoder to encode Event Stream
|
||||
// messages to an io.Writer.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
w: w,
|
||||
headersBuf: bytes.NewBuffer(nil),
|
||||
}
|
||||
}
|
||||
|
||||
// Encode encodes a single EventStream message to the io.Writer the Encoder
|
||||
// was created with. An error is returned if writing the message fails.
|
||||
func (e *Encoder) Encode(msg Message) error {
|
||||
e.headersBuf.Reset()
|
||||
|
||||
err := encodeHeaders(e.headersBuf, msg.Headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
crc := crc32.New(crc32IEEETable)
|
||||
hashWriter := io.MultiWriter(e.w, crc)
|
||||
|
||||
headersLen := uint32(e.headersBuf.Len())
|
||||
payloadLen := uint32(len(msg.Payload))
|
||||
|
||||
if err := encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if headersLen > 0 {
|
||||
if _, err := io.Copy(hashWriter, e.headersBuf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if payloadLen > 0 {
|
||||
if _, err := hashWriter.Write(msg.Payload); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
msgCRC := crc.Sum32()
|
||||
return binary.Write(e.w, binary.BigEndian, msgCRC)
|
||||
}
|
||||
|
||||
func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error {
|
||||
p := messagePrelude{
|
||||
Length: minMsgLen + headersLen + payloadLen,
|
||||
HeadersLen: headersLen,
|
||||
}
|
||||
if err := p.ValidateLens(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := binaryWriteFields(w, binary.BigEndian,
|
||||
p.Length,
|
||||
p.HeadersLen,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.PreludeCRC = crc.Sum32()
|
||||
err = binary.Write(w, binary.BigEndian, p.PreludeCRC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func encodeHeaders(w io.Writer, headers Headers) error {
|
||||
for _, h := range headers {
|
||||
hn := headerName{
|
||||
Len: uint8(len(h.Name)),
|
||||
}
|
||||
copy(hn.Name[:hn.Len], h.Name)
|
||||
if err := hn.encode(w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := h.Value.encode(w); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error {
|
||||
for _, v := range vs {
|
||||
if err := binary.Write(w, order, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
23
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go
generated
vendored
Normal file
23
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go
generated
vendored
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
package eventstream
|
||||
|
||||
import "fmt"
|
||||
|
||||
// LengthError provides the error for items being larger than a maximum length.
|
||||
type LengthError struct {
|
||||
Part string
|
||||
Want int
|
||||
Have int
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
func (e LengthError) Error() string {
|
||||
return fmt.Sprintf("%s length invalid, %d/%d, %v",
|
||||
e.Part, e.Want, e.Have, e.Value)
|
||||
}
|
||||
|
||||
// ChecksumError provides the error for message checksum invalidation errors.
|
||||
type ChecksumError struct{}
|
||||
|
||||
func (e ChecksumError) Error() string {
|
||||
return "message checksum mismatch"
|
||||
}
|
||||
196
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go
generated
vendored
Normal file
196
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go
generated
vendored
Normal file
|
|
@ -0,0 +1,196 @@
|
|||
package eventstreamapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/eventstream"
|
||||
)
|
||||
|
||||
// Unmarshaler provides the interface for unmarshaling a EventStream
|
||||
// message into a SDK type.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error
|
||||
}
|
||||
|
||||
// EventStream headers with specific meaning to async API functionality.
|
||||
const (
|
||||
MessageTypeHeader = `:message-type` // Identifies type of message.
|
||||
EventMessageType = `event`
|
||||
ErrorMessageType = `error`
|
||||
ExceptionMessageType = `exception`
|
||||
|
||||
// Message Events
|
||||
EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats".
|
||||
|
||||
// Message Error
|
||||
ErrorCodeHeader = `:error-code`
|
||||
ErrorMessageHeader = `:error-message`
|
||||
|
||||
// Message Exception
|
||||
ExceptionTypeHeader = `:exception-type`
|
||||
)
|
||||
|
||||
// EventReader provides reading from the EventStream of an reader.
|
||||
type EventReader struct {
|
||||
reader io.ReadCloser
|
||||
decoder *eventstream.Decoder
|
||||
|
||||
unmarshalerForEventType func(string) (Unmarshaler, error)
|
||||
payloadUnmarshaler protocol.PayloadUnmarshaler
|
||||
|
||||
payloadBuf []byte
|
||||
}
|
||||
|
||||
// NewEventReader returns a EventReader built from the reader and unmarshaler
|
||||
// provided. Use ReadStream method to start reading from the EventStream.
|
||||
func NewEventReader(
|
||||
reader io.ReadCloser,
|
||||
payloadUnmarshaler protocol.PayloadUnmarshaler,
|
||||
unmarshalerForEventType func(string) (Unmarshaler, error),
|
||||
) *EventReader {
|
||||
return &EventReader{
|
||||
reader: reader,
|
||||
decoder: eventstream.NewDecoder(reader),
|
||||
payloadUnmarshaler: payloadUnmarshaler,
|
||||
unmarshalerForEventType: unmarshalerForEventType,
|
||||
payloadBuf: make([]byte, 10*1024),
|
||||
}
|
||||
}
|
||||
|
||||
// UseLogger instructs the EventReader to use the logger and log level
|
||||
// specified.
|
||||
func (r *EventReader) UseLogger(logger aws.Logger, logLevel aws.LogLevelType) {
|
||||
if logger != nil && logLevel.Matches(aws.LogDebugWithEventStreamBody) {
|
||||
r.decoder.UseLogger(logger)
|
||||
}
|
||||
}
|
||||
|
||||
// ReadEvent attempts to read a message from the EventStream and return the
|
||||
// unmarshaled event value that the message is for.
|
||||
//
|
||||
// For EventStream API errors check if the returned error satisfies the
|
||||
// awserr.Error interface to get the error's Code and Message components.
|
||||
//
|
||||
// EventUnmarshalers called with EventStream messages must take copies of the
|
||||
// message's Payload. The payload will is reused between events read.
|
||||
func (r *EventReader) ReadEvent() (event interface{}, err error) {
|
||||
msg, err := r.decoder.Decode(r.payloadBuf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
// Reclaim payload buffer for next message read.
|
||||
r.payloadBuf = msg.Payload[0:0]
|
||||
}()
|
||||
|
||||
typ, err := GetHeaderString(msg, MessageTypeHeader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch typ {
|
||||
case EventMessageType:
|
||||
return r.unmarshalEventMessage(msg)
|
||||
case ExceptionMessageType:
|
||||
err = r.unmarshalEventException(msg)
|
||||
return nil, err
|
||||
case ErrorMessageType:
|
||||
return nil, r.unmarshalErrorMessage(msg)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown eventstream message type, %v", typ)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *EventReader) unmarshalEventMessage(
|
||||
msg eventstream.Message,
|
||||
) (event interface{}, err error) {
|
||||
eventType, err := GetHeaderString(msg, EventTypeHeader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ev, err := r.unmarshalerForEventType(eventType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
func (r *EventReader) unmarshalEventException(
|
||||
msg eventstream.Message,
|
||||
) (err error) {
|
||||
eventType, err := GetHeaderString(msg, ExceptionTypeHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ev, err := r.unmarshalerForEventType(eventType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
err, ok = ev.(error)
|
||||
if !ok {
|
||||
err = messageError{
|
||||
code: "SerializationError",
|
||||
msg: fmt.Sprintf(
|
||||
"event stream exception %s mapped to non-error %T, %v",
|
||||
eventType, ev, ev,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) {
|
||||
var msgErr messageError
|
||||
|
||||
msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return msgErr
|
||||
}
|
||||
|
||||
// Close closes the EventReader's EventStream reader.
|
||||
func (r *EventReader) Close() error {
|
||||
return r.reader.Close()
|
||||
}
|
||||
|
||||
// GetHeaderString returns the value of the header as a string. If the header
|
||||
// is not set or the value is not a string an error will be returned.
|
||||
func GetHeaderString(msg eventstream.Message, headerName string) (string, error) {
|
||||
headerVal := msg.Headers.Get(headerName)
|
||||
if headerVal == nil {
|
||||
return "", fmt.Errorf("error header %s not present", headerName)
|
||||
}
|
||||
|
||||
v, ok := headerVal.Get().(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("error header value is not a string, %T", headerVal)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
24
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go
generated
vendored
Normal file
24
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go
generated
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
package eventstreamapi
|
||||
|
||||
import "fmt"
|
||||
|
||||
type messageError struct {
|
||||
code string
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e messageError) Code() string {
|
||||
return e.code
|
||||
}
|
||||
|
||||
func (e messageError) Message() string {
|
||||
return e.msg
|
||||
}
|
||||
|
||||
func (e messageError) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.code, e.msg)
|
||||
}
|
||||
|
||||
func (e messageError) OrigErr() error {
|
||||
return nil
|
||||
}
|
||||
166
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go
generated
vendored
Normal file
166
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go
generated
vendored
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
package eventstream
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Headers are a collection of EventStream header values.
|
||||
type Headers []Header
|
||||
|
||||
// Header is a single EventStream Key Value header pair.
|
||||
type Header struct {
|
||||
Name string
|
||||
Value Value
|
||||
}
|
||||
|
||||
// Set associates the name with a value. If the header name already exists in
|
||||
// the Headers the value will be replaced with the new one.
|
||||
func (hs *Headers) Set(name string, value Value) {
|
||||
var i int
|
||||
for ; i < len(*hs); i++ {
|
||||
if (*hs)[i].Name == name {
|
||||
(*hs)[i].Value = value
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
*hs = append(*hs, Header{
|
||||
Name: name, Value: value,
|
||||
})
|
||||
}
|
||||
|
||||
// Get returns the Value associated with the header. Nil is returned if the
|
||||
// value does not exist.
|
||||
func (hs Headers) Get(name string) Value {
|
||||
for i := 0; i < len(hs); i++ {
|
||||
if h := hs[i]; h.Name == name {
|
||||
return h.Value
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Del deletes the value in the Headers if it exists.
|
||||
func (hs *Headers) Del(name string) {
|
||||
for i := 0; i < len(*hs); i++ {
|
||||
if (*hs)[i].Name == name {
|
||||
copy((*hs)[i:], (*hs)[i+1:])
|
||||
(*hs) = (*hs)[:len(*hs)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeHeaders(r io.Reader) (Headers, error) {
|
||||
hs := Headers{}
|
||||
|
||||
for {
|
||||
name, err := decodeHeaderName(r)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
// EOF while getting header name means no more headers
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
value, err := decodeHeaderValue(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hs.Set(name, value)
|
||||
}
|
||||
|
||||
return hs, nil
|
||||
}
|
||||
|
||||
func decodeHeaderName(r io.Reader) (string, error) {
|
||||
var n headerName
|
||||
|
||||
var err error
|
||||
n.Len, err = decodeUint8(r)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
name := n.Name[:n.Len]
|
||||
if _, err := io.ReadFull(r, name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(name), nil
|
||||
}
|
||||
|
||||
func decodeHeaderValue(r io.Reader) (Value, error) {
|
||||
var raw rawValue
|
||||
|
||||
typ, err := decodeUint8(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
raw.Type = valueType(typ)
|
||||
|
||||
var v Value
|
||||
|
||||
switch raw.Type {
|
||||
case trueValueType:
|
||||
v = BoolValue(true)
|
||||
case falseValueType:
|
||||
v = BoolValue(false)
|
||||
case int8ValueType:
|
||||
var tv Int8Value
|
||||
err = tv.decode(r)
|
||||
v = tv
|
||||
case int16ValueType:
|
||||
var tv Int16Value
|
||||
err = tv.decode(r)
|
||||
v = tv
|
||||
case int32ValueType:
|
||||
var tv Int32Value
|
||||
err = tv.decode(r)
|
||||
v = tv
|
||||
case int64ValueType:
|
||||
var tv Int64Value
|
||||
err = tv.decode(r)
|
||||
v = tv
|
||||
case bytesValueType:
|
||||
var tv BytesValue
|
||||
err = tv.decode(r)
|
||||
v = tv
|
||||
case stringValueType:
|
||||
var tv StringValue
|
||||
err = tv.decode(r)
|
||||
v = tv
|
||||
case timestampValueType:
|
||||
var tv TimestampValue
|
||||
err = tv.decode(r)
|
||||
v = tv
|
||||
case uuidValueType:
|
||||
var tv UUIDValue
|
||||
err = tv.decode(r)
|
||||
v = tv
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown value type %d", raw.Type))
|
||||
}
|
||||
|
||||
// Error could be EOF, let caller deal with it
|
||||
return v, err
|
||||
}
|
||||
|
||||
const maxHeaderNameLen = 255
|
||||
|
||||
type headerName struct {
|
||||
Len uint8
|
||||
Name [maxHeaderNameLen]byte
|
||||
}
|
||||
|
||||
func (v headerName) encode(w io.Writer) error {
|
||||
if err := binary.Write(w, binary.BigEndian, v.Len); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := w.Write(v.Name[:v.Len])
|
||||
return err
|
||||
}
|
||||
501
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go
generated
vendored
Normal file
501
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go
generated
vendored
Normal file
|
|
@ -0,0 +1,501 @@
|
|||
package eventstream
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1
|
||||
|
||||
// valueType is the EventStream header value type.
|
||||
type valueType uint8
|
||||
|
||||
// Header value types
|
||||
const (
|
||||
trueValueType valueType = iota
|
||||
falseValueType
|
||||
int8ValueType // Byte
|
||||
int16ValueType // Short
|
||||
int32ValueType // Integer
|
||||
int64ValueType // Long
|
||||
bytesValueType
|
||||
stringValueType
|
||||
timestampValueType
|
||||
uuidValueType
|
||||
)
|
||||
|
||||
func (t valueType) String() string {
|
||||
switch t {
|
||||
case trueValueType:
|
||||
return "bool"
|
||||
case falseValueType:
|
||||
return "bool"
|
||||
case int8ValueType:
|
||||
return "int8"
|
||||
case int16ValueType:
|
||||
return "int16"
|
||||
case int32ValueType:
|
||||
return "int32"
|
||||
case int64ValueType:
|
||||
return "int64"
|
||||
case bytesValueType:
|
||||
return "byte_array"
|
||||
case stringValueType:
|
||||
return "string"
|
||||
case timestampValueType:
|
||||
return "timestamp"
|
||||
case uuidValueType:
|
||||
return "uuid"
|
||||
default:
|
||||
return fmt.Sprintf("unknown value type %d", uint8(t))
|
||||
}
|
||||
}
|
||||
|
||||
type rawValue struct {
|
||||
Type valueType
|
||||
Len uint16 // Only set for variable length slices
|
||||
Value []byte // byte representation of value, BigEndian encoding.
|
||||
}
|
||||
|
||||
func (r rawValue) encodeScalar(w io.Writer, v interface{}) error {
|
||||
return binaryWriteFields(w, binary.BigEndian,
|
||||
r.Type,
|
||||
v,
|
||||
)
|
||||
}
|
||||
|
||||
func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error {
|
||||
binary.Write(w, binary.BigEndian, r.Type)
|
||||
|
||||
_, err := w.Write(v)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r rawValue) encodeBytes(w io.Writer, v []byte) error {
|
||||
if len(v) > maxHeaderValueLen {
|
||||
return LengthError{
|
||||
Part: "header value",
|
||||
Want: maxHeaderValueLen, Have: len(v),
|
||||
Value: v,
|
||||
}
|
||||
}
|
||||
r.Len = uint16(len(v))
|
||||
|
||||
err := binaryWriteFields(w, binary.BigEndian,
|
||||
r.Type,
|
||||
r.Len,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = w.Write(v)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r rawValue) encodeString(w io.Writer, v string) error {
|
||||
if len(v) > maxHeaderValueLen {
|
||||
return LengthError{
|
||||
Part: "header value",
|
||||
Want: maxHeaderValueLen, Have: len(v),
|
||||
Value: v,
|
||||
}
|
||||
}
|
||||
r.Len = uint16(len(v))
|
||||
|
||||
type stringWriter interface {
|
||||
WriteString(string) (int, error)
|
||||
}
|
||||
|
||||
err := binaryWriteFields(w, binary.BigEndian,
|
||||
r.Type,
|
||||
r.Len,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sw, ok := w.(stringWriter); ok {
|
||||
_, err = sw.WriteString(v)
|
||||
} else {
|
||||
_, err = w.Write([]byte(v))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func decodeFixedBytesValue(r io.Reader, buf []byte) error {
|
||||
_, err := io.ReadFull(r, buf)
|
||||
return err
|
||||
}
|
||||
|
||||
func decodeBytesValue(r io.Reader) ([]byte, error) {
|
||||
var raw rawValue
|
||||
var err error
|
||||
raw.Len, err = decodeUint16(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf := make([]byte, raw.Len)
|
||||
_, err = io.ReadFull(r, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func decodeStringValue(r io.Reader) (string, error) {
|
||||
v, err := decodeBytesValue(r)
|
||||
return string(v), err
|
||||
}
|
||||
|
||||
// Value represents the abstract header value.
|
||||
type Value interface {
|
||||
Get() interface{}
|
||||
String() string
|
||||
valueType() valueType
|
||||
encode(io.Writer) error
|
||||
}
|
||||
|
||||
// An BoolValue provides eventstream encoding, and representation
|
||||
// of a Go bool value.
|
||||
type BoolValue bool
|
||||
|
||||
// Get returns the underlying type
|
||||
func (v BoolValue) Get() interface{} {
|
||||
return bool(v)
|
||||
}
|
||||
|
||||
// valueType returns the EventStream header value type value.
|
||||
func (v BoolValue) valueType() valueType {
|
||||
if v {
|
||||
return trueValueType
|
||||
}
|
||||
return falseValueType
|
||||
}
|
||||
|
||||
func (v BoolValue) String() string {
|
||||
return strconv.FormatBool(bool(v))
|
||||
}
|
||||
|
||||
// encode encodes the BoolValue into an eventstream binary value
|
||||
// representation.
|
||||
func (v BoolValue) encode(w io.Writer) error {
|
||||
return binary.Write(w, binary.BigEndian, v.valueType())
|
||||
}
|
||||
|
||||
// An Int8Value provides eventstream encoding, and representation of a Go
|
||||
// int8 value.
|
||||
type Int8Value int8
|
||||
|
||||
// Get returns the underlying value.
|
||||
func (v Int8Value) Get() interface{} {
|
||||
return int8(v)
|
||||
}
|
||||
|
||||
// valueType returns the EventStream header value type value.
|
||||
func (Int8Value) valueType() valueType {
|
||||
return int8ValueType
|
||||
}
|
||||
|
||||
func (v Int8Value) String() string {
|
||||
return fmt.Sprintf("0x%02x", int8(v))
|
||||
}
|
||||
|
||||
// encode encodes the Int8Value into an eventstream binary value
|
||||
// representation.
|
||||
func (v Int8Value) encode(w io.Writer) error {
|
||||
raw := rawValue{
|
||||
Type: v.valueType(),
|
||||
}
|
||||
|
||||
return raw.encodeScalar(w, v)
|
||||
}
|
||||
|
||||
func (v *Int8Value) decode(r io.Reader) error {
|
||||
n, err := decodeUint8(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*v = Int8Value(n)
|
||||
return nil
|
||||
}
|
||||
|
||||
// An Int16Value provides eventstream encoding, and representation of a Go
|
||||
// int16 value.
|
||||
type Int16Value int16
|
||||
|
||||
// Get returns the underlying value.
|
||||
func (v Int16Value) Get() interface{} {
|
||||
return int16(v)
|
||||
}
|
||||
|
||||
// valueType returns the EventStream header value type value.
|
||||
func (Int16Value) valueType() valueType {
|
||||
return int16ValueType
|
||||
}
|
||||
|
||||
func (v Int16Value) String() string {
|
||||
return fmt.Sprintf("0x%04x", int16(v))
|
||||
}
|
||||
|
||||
// encode encodes the Int16Value into an eventstream binary value
|
||||
// representation.
|
||||
func (v Int16Value) encode(w io.Writer) error {
|
||||
raw := rawValue{
|
||||
Type: v.valueType(),
|
||||
}
|
||||
return raw.encodeScalar(w, v)
|
||||
}
|
||||
|
||||
func (v *Int16Value) decode(r io.Reader) error {
|
||||
n, err := decodeUint16(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*v = Int16Value(n)
|
||||
return nil
|
||||
}
|
||||
|
||||
// An Int32Value provides eventstream encoding, and representation of a Go
|
||||
// int32 value.
|
||||
type Int32Value int32
|
||||
|
||||
// Get returns the underlying value.
|
||||
func (v Int32Value) Get() interface{} {
|
||||
return int32(v)
|
||||
}
|
||||
|
||||
// valueType returns the EventStream header value type value.
|
||||
func (Int32Value) valueType() valueType {
|
||||
return int32ValueType
|
||||
}
|
||||
|
||||
func (v Int32Value) String() string {
|
||||
return fmt.Sprintf("0x%08x", int32(v))
|
||||
}
|
||||
|
||||
// encode encodes the Int32Value into an eventstream binary value
|
||||
// representation.
|
||||
func (v Int32Value) encode(w io.Writer) error {
|
||||
raw := rawValue{
|
||||
Type: v.valueType(),
|
||||
}
|
||||
return raw.encodeScalar(w, v)
|
||||
}
|
||||
|
||||
func (v *Int32Value) decode(r io.Reader) error {
|
||||
n, err := decodeUint32(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*v = Int32Value(n)
|
||||
return nil
|
||||
}
|
||||
|
||||
// An Int64Value provides eventstream encoding, and representation of a Go
|
||||
// int64 value.
|
||||
type Int64Value int64
|
||||
|
||||
// Get returns the underlying value.
|
||||
func (v Int64Value) Get() interface{} {
|
||||
return int64(v)
|
||||
}
|
||||
|
||||
// valueType returns the EventStream header value type value.
|
||||
func (Int64Value) valueType() valueType {
|
||||
return int64ValueType
|
||||
}
|
||||
|
||||
func (v Int64Value) String() string {
|
||||
return fmt.Sprintf("0x%016x", int64(v))
|
||||
}
|
||||
|
||||
// encode encodes the Int64Value into an eventstream binary value
|
||||
// representation.
|
||||
func (v Int64Value) encode(w io.Writer) error {
|
||||
raw := rawValue{
|
||||
Type: v.valueType(),
|
||||
}
|
||||
return raw.encodeScalar(w, v)
|
||||
}
|
||||
|
||||
func (v *Int64Value) decode(r io.Reader) error {
|
||||
n, err := decodeUint64(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*v = Int64Value(n)
|
||||
return nil
|
||||
}
|
||||
|
||||
// An BytesValue provides eventstream encoding, and representation of a Go
|
||||
// byte slice.
|
||||
type BytesValue []byte
|
||||
|
||||
// Get returns the underlying value.
|
||||
func (v BytesValue) Get() interface{} {
|
||||
return []byte(v)
|
||||
}
|
||||
|
||||
// valueType returns the EventStream header value type value.
|
||||
func (BytesValue) valueType() valueType {
|
||||
return bytesValueType
|
||||
}
|
||||
|
||||
func (v BytesValue) String() string {
|
||||
return base64.StdEncoding.EncodeToString([]byte(v))
|
||||
}
|
||||
|
||||
// encode encodes the BytesValue into an eventstream binary value
|
||||
// representation.
|
||||
func (v BytesValue) encode(w io.Writer) error {
|
||||
raw := rawValue{
|
||||
Type: v.valueType(),
|
||||
}
|
||||
|
||||
return raw.encodeBytes(w, []byte(v))
|
||||
}
|
||||
|
||||
func (v *BytesValue) decode(r io.Reader) error {
|
||||
buf, err := decodeBytesValue(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*v = BytesValue(buf)
|
||||
return nil
|
||||
}
|
||||
|
||||
// An StringValue provides eventstream encoding, and representation of a Go
|
||||
// string.
|
||||
type StringValue string
|
||||
|
||||
// Get returns the underlying value.
|
||||
func (v StringValue) Get() interface{} {
|
||||
return string(v)
|
||||
}
|
||||
|
||||
// valueType returns the EventStream header value type value.
|
||||
func (StringValue) valueType() valueType {
|
||||
return stringValueType
|
||||
}
|
||||
|
||||
func (v StringValue) String() string {
|
||||
return string(v)
|
||||
}
|
||||
|
||||
// encode encodes the StringValue into an eventstream binary value
|
||||
// representation.
|
||||
func (v StringValue) encode(w io.Writer) error {
|
||||
raw := rawValue{
|
||||
Type: v.valueType(),
|
||||
}
|
||||
|
||||
return raw.encodeString(w, string(v))
|
||||
}
|
||||
|
||||
func (v *StringValue) decode(r io.Reader) error {
|
||||
s, err := decodeStringValue(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*v = StringValue(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// An TimestampValue provides eventstream encoding, and representation of a Go
|
||||
// timestamp.
|
||||
type TimestampValue time.Time
|
||||
|
||||
// Get returns the underlying value.
|
||||
func (v TimestampValue) Get() interface{} {
|
||||
return time.Time(v)
|
||||
}
|
||||
|
||||
// valueType returns the EventStream header value type value.
|
||||
func (TimestampValue) valueType() valueType {
|
||||
return timestampValueType
|
||||
}
|
||||
|
||||
func (v TimestampValue) epochMilli() int64 {
|
||||
nano := time.Time(v).UnixNano()
|
||||
msec := nano / int64(time.Millisecond)
|
||||
return msec
|
||||
}
|
||||
|
||||
func (v TimestampValue) String() string {
|
||||
msec := v.epochMilli()
|
||||
return strconv.FormatInt(msec, 10)
|
||||
}
|
||||
|
||||
// encode encodes the TimestampValue into an eventstream binary value
|
||||
// representation.
|
||||
func (v TimestampValue) encode(w io.Writer) error {
|
||||
raw := rawValue{
|
||||
Type: v.valueType(),
|
||||
}
|
||||
|
||||
msec := v.epochMilli()
|
||||
return raw.encodeScalar(w, msec)
|
||||
}
|
||||
|
||||
func (v *TimestampValue) decode(r io.Reader) error {
|
||||
n, err := decodeUint64(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*v = TimestampValue(timeFromEpochMilli(int64(n)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func timeFromEpochMilli(t int64) time.Time {
|
||||
secs := t / 1e3
|
||||
msec := t % 1e3
|
||||
return time.Unix(secs, msec*int64(time.Millisecond)).UTC()
|
||||
}
|
||||
|
||||
// An UUIDValue provides eventstream encoding, and representation of a UUID
|
||||
// value.
|
||||
type UUIDValue [16]byte
|
||||
|
||||
// Get returns the underlying value.
|
||||
func (v UUIDValue) Get() interface{} {
|
||||
return v[:]
|
||||
}
|
||||
|
||||
// valueType returns the EventStream header value type value.
|
||||
func (UUIDValue) valueType() valueType {
|
||||
return uuidValueType
|
||||
}
|
||||
|
||||
func (v UUIDValue) String() string {
|
||||
return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:])
|
||||
}
|
||||
|
||||
// encode encodes the UUIDValue into an eventstream binary value
|
||||
// representation.
|
||||
func (v UUIDValue) encode(w io.Writer) error {
|
||||
raw := rawValue{
|
||||
Type: v.valueType(),
|
||||
}
|
||||
|
||||
return raw.encodeFixedSlice(w, v[:])
|
||||
}
|
||||
|
||||
func (v *UUIDValue) decode(r io.Reader) error {
|
||||
tv := (*v)[:]
|
||||
return decodeFixedBytesValue(r, tv)
|
||||
}
|
||||
103
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go
generated
vendored
Normal file
103
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go
generated
vendored
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
package eventstream
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"hash/crc32"
|
||||
)
|
||||
|
||||
const preludeLen = 8
|
||||
const preludeCRCLen = 4
|
||||
const msgCRCLen = 4
|
||||
const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen
|
||||
const maxPayloadLen = 1024 * 1024 * 16 // 16MB
|
||||
const maxHeadersLen = 1024 * 128 // 128KB
|
||||
const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen
|
||||
|
||||
var crc32IEEETable = crc32.MakeTable(crc32.IEEE)
|
||||
|
||||
// A Message provides the eventstream message representation.
|
||||
type Message struct {
|
||||
Headers Headers
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
func (m *Message) rawMessage() (rawMessage, error) {
|
||||
var raw rawMessage
|
||||
|
||||
if len(m.Headers) > 0 {
|
||||
var headers bytes.Buffer
|
||||
if err := encodeHeaders(&headers, m.Headers); err != nil {
|
||||
return rawMessage{}, err
|
||||
}
|
||||
raw.Headers = headers.Bytes()
|
||||
raw.HeadersLen = uint32(len(raw.Headers))
|
||||
}
|
||||
|
||||
raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen
|
||||
|
||||
hash := crc32.New(crc32IEEETable)
|
||||
binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen)
|
||||
raw.PreludeCRC = hash.Sum32()
|
||||
|
||||
binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC)
|
||||
|
||||
if raw.HeadersLen > 0 {
|
||||
hash.Write(raw.Headers)
|
||||
}
|
||||
|
||||
// Read payload bytes and update hash for it as well.
|
||||
if len(m.Payload) > 0 {
|
||||
raw.Payload = m.Payload
|
||||
hash.Write(raw.Payload)
|
||||
}
|
||||
|
||||
raw.CRC = hash.Sum32()
|
||||
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
type messagePrelude struct {
|
||||
Length uint32
|
||||
HeadersLen uint32
|
||||
PreludeCRC uint32
|
||||
}
|
||||
|
||||
func (p messagePrelude) PayloadLen() uint32 {
|
||||
return p.Length - p.HeadersLen - minMsgLen
|
||||
}
|
||||
|
||||
func (p messagePrelude) ValidateLens() error {
|
||||
if p.Length == 0 || p.Length > maxMsgLen {
|
||||
return LengthError{
|
||||
Part: "message prelude",
|
||||
Want: maxMsgLen,
|
||||
Have: int(p.Length),
|
||||
}
|
||||
}
|
||||
if p.HeadersLen > maxHeadersLen {
|
||||
return LengthError{
|
||||
Part: "message headers",
|
||||
Want: maxHeadersLen,
|
||||
Have: int(p.HeadersLen),
|
||||
}
|
||||
}
|
||||
if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen {
|
||||
return LengthError{
|
||||
Part: "message payload",
|
||||
Want: maxPayloadLen,
|
||||
Have: int(payloadLen),
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type rawMessage struct {
|
||||
messagePrelude
|
||||
|
||||
Headers []byte
|
||||
Payload []byte
|
||||
|
||||
CRC uint32
|
||||
}
|
||||
76
vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
generated
vendored
Normal file
76
vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
generated
vendored
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
)
|
||||
|
||||
// EscapeMode is the mode that should be use for escaping a value
|
||||
type EscapeMode uint
|
||||
|
||||
// The modes for escaping a value before it is marshaled, and unmarshaled.
|
||||
const (
|
||||
NoEscape EscapeMode = iota
|
||||
Base64Escape
|
||||
QuotedEscape
|
||||
)
|
||||
|
||||
// EncodeJSONValue marshals the value into a JSON string, and optionally base64
|
||||
// encodes the string before returning it.
|
||||
//
|
||||
// Will panic if the escape mode is unknown.
|
||||
func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch escape {
|
||||
case NoEscape:
|
||||
return string(b), nil
|
||||
case Base64Escape:
|
||||
return base64.StdEncoding.EncodeToString(b), nil
|
||||
case QuotedEscape:
|
||||
return strconv.Quote(string(b)), nil
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape))
|
||||
}
|
||||
|
||||
// DecodeJSONValue will attempt to decode the string input as a JSONValue.
|
||||
// Optionally decoding base64 the value first before JSON unmarshaling.
|
||||
//
|
||||
// Will panic if the escape mode is unknown.
|
||||
func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) {
|
||||
var b []byte
|
||||
var err error
|
||||
|
||||
switch escape {
|
||||
case NoEscape:
|
||||
b = []byte(v)
|
||||
case Base64Escape:
|
||||
b, err = base64.StdEncoding.DecodeString(v)
|
||||
case QuotedEscape:
|
||||
var u string
|
||||
u, err = strconv.Unquote(v)
|
||||
b = []byte(u)
|
||||
default:
|
||||
panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := aws.JSONValue{}
|
||||
err = json.Unmarshal(b, &m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
81
vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
generated
vendored
Normal file
81
vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
generated
vendored
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// PayloadUnmarshaler provides the interface for unmarshaling a payload's
|
||||
// reader into a SDK shape.
|
||||
type PayloadUnmarshaler interface {
|
||||
UnmarshalPayload(io.Reader, interface{}) error
|
||||
}
|
||||
|
||||
// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a
|
||||
// HandlerList. This provides the support for unmarshaling a payload reader to
|
||||
// a shape without needing a SDK request first.
|
||||
type HandlerPayloadUnmarshal struct {
|
||||
Unmarshalers request.HandlerList
|
||||
}
|
||||
|
||||
// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using
|
||||
// the Unmarshalers HandlerList provided. Returns an error if unable
|
||||
// unmarshaling fails.
|
||||
func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error {
|
||||
req := &request.Request{
|
||||
HTTPRequest: &http.Request{},
|
||||
HTTPResponse: &http.Response{
|
||||
StatusCode: 200,
|
||||
Header: http.Header{},
|
||||
Body: ioutil.NopCloser(r),
|
||||
},
|
||||
Data: v,
|
||||
}
|
||||
|
||||
h.Unmarshalers.Run(req)
|
||||
|
||||
return req.Error
|
||||
}
|
||||
|
||||
// PayloadMarshaler provides the interface for marshaling a SDK shape into and
|
||||
// io.Writer.
|
||||
type PayloadMarshaler interface {
|
||||
MarshalPayload(io.Writer, interface{}) error
|
||||
}
|
||||
|
||||
// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList.
|
||||
// This provides support for marshaling a SDK shape into an io.Writer without
|
||||
// needing a SDK request first.
|
||||
type HandlerPayloadMarshal struct {
|
||||
Marshalers request.HandlerList
|
||||
}
|
||||
|
||||
// MarshalPayload marshals the SDK shape into the io.Writer using the
|
||||
// Marshalers HandlerList provided. Returns an error if unable if marshal
|
||||
// fails.
|
||||
func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error {
|
||||
req := request.New(
|
||||
aws.Config{},
|
||||
metadata.ClientInfo{},
|
||||
request.Handlers{},
|
||||
nil,
|
||||
&request.Operation{HTTPMethod: "GET"},
|
||||
v,
|
||||
nil,
|
||||
)
|
||||
|
||||
h.Marshalers.Run(req)
|
||||
|
||||
if req.Error != nil {
|
||||
return req.Error
|
||||
}
|
||||
|
||||
io.Copy(w, req.GetBody())
|
||||
|
||||
return nil
|
||||
}
|
||||
2
vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
generated
vendored
|
|
@ -25,7 +25,7 @@ func Build(r *request.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if r.ExpireTime == 0 {
|
||||
if !r.IsPresigned() {
|
||||
r.HTTPRequest.Method = "POST"
|
||||
r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
|
||||
r.SetBufferBody([]byte(body.Encode()))
|
||||
|
|
|
|||
11
vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
generated
vendored
11
vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
generated
vendored
|
|
@ -121,6 +121,10 @@ func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string
|
|||
return nil
|
||||
}
|
||||
|
||||
if _, ok := value.Interface().([]byte); ok {
|
||||
return q.parseScalar(v, value, prefix, tag)
|
||||
}
|
||||
|
||||
// check for unflattened list member
|
||||
if !q.isEC2 && tag.Get("flattened") == "" {
|
||||
if listName := tag.Get("locationNameList"); listName == "" {
|
||||
|
|
@ -229,7 +233,12 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta
|
|||
v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
|
||||
case time.Time:
|
||||
const ISO8601UTC = "2006-01-02T15:04:05Z"
|
||||
v.Set(name, value.UTC().Format(ISO8601UTC))
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
format = protocol.ISO8601TimeFormatName
|
||||
}
|
||||
|
||||
v.Set(name, protocol.FormatTime(format, value))
|
||||
default:
|
||||
return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
|
||||
}
|
||||
|
|
|
|||
35
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
35
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
|
|
@ -4,7 +4,6 @@ package rest
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
|
@ -18,11 +17,9 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
// RFC822 returns an RFC822 formatted timestamp for AWS protocols
|
||||
const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT"
|
||||
|
||||
// Whether the byte value can be sent without escaping in AWS URLs
|
||||
var noEscape [256]bool
|
||||
|
||||
|
|
@ -252,13 +249,12 @@ func EscapePath(path string, encodeSep bool) string {
|
|||
return buf.String()
|
||||
}
|
||||
|
||||
func convertType(v reflect.Value, tag reflect.StructTag) (string, error) {
|
||||
func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) {
|
||||
v = reflect.Indirect(v)
|
||||
if !v.IsValid() {
|
||||
return "", errValueNotSet
|
||||
}
|
||||
|
||||
var str string
|
||||
switch value := v.Interface().(type) {
|
||||
case string:
|
||||
str = value
|
||||
|
|
@ -271,19 +267,28 @@ func convertType(v reflect.Value, tag reflect.StructTag) (string, error) {
|
|||
case float64:
|
||||
str = strconv.FormatFloat(value, 'f', -1, 64)
|
||||
case time.Time:
|
||||
str = value.UTC().Format(RFC822)
|
||||
case aws.JSONValue:
|
||||
b, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return "", err
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
format = protocol.RFC822TimeFormatName
|
||||
if tag.Get("location") == "querystring" {
|
||||
format = protocol.ISO8601TimeFormatName
|
||||
}
|
||||
}
|
||||
str = protocol.FormatTime(format, value)
|
||||
case aws.JSONValue:
|
||||
if len(value) == 0 {
|
||||
return "", errValueNotSet
|
||||
}
|
||||
escaping := protocol.NoEscape
|
||||
if tag.Get("location") == "header" {
|
||||
str = base64.StdEncoding.EncodeToString(b)
|
||||
} else {
|
||||
str = string(b)
|
||||
escaping = protocol.Base64Escape
|
||||
}
|
||||
str, err = protocol.EncodeJSONValue(value, escaping)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to encode JSONValue, %v", err)
|
||||
}
|
||||
default:
|
||||
err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
|
||||
err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type())
|
||||
return "", err
|
||||
}
|
||||
return str, nil
|
||||
|
|
|
|||
20
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
20
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
|
|
@ -3,7 +3,6 @@ package rest
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
|
@ -16,6 +15,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
|
||||
|
|
@ -198,23 +198,21 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro
|
|||
}
|
||||
v.Set(reflect.ValueOf(&f))
|
||||
case *time.Time:
|
||||
t, err := time.Parse(RFC822, header)
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
format = protocol.RFC822TimeFormatName
|
||||
}
|
||||
t, err := protocol.ParseTime(format, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set(reflect.ValueOf(&t))
|
||||
case aws.JSONValue:
|
||||
b := []byte(header)
|
||||
var err error
|
||||
escaping := protocol.NoEscape
|
||||
if tag.Get("location") == "header" {
|
||||
b, err = base64.StdEncoding.DecodeString(header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
escaping = protocol.Base64Escape
|
||||
}
|
||||
|
||||
m := aws.JSONValue{}
|
||||
err = json.Unmarshal(b, &m)
|
||||
m, err := protocol.DecodeJSONValue(header, escaping)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
72
vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
generated
vendored
Normal file
72
vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
generated
vendored
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Names of time formats supported by the SDK
|
||||
const (
|
||||
RFC822TimeFormatName = "rfc822"
|
||||
ISO8601TimeFormatName = "iso8601"
|
||||
UnixTimeFormatName = "unixTimestamp"
|
||||
)
|
||||
|
||||
// Time formats supported by the SDK
|
||||
const (
|
||||
// RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
|
||||
RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
|
||||
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
ISO8601TimeFormat = "2006-01-02T15:04:05Z"
|
||||
)
|
||||
|
||||
// IsKnownTimestampFormat returns if the timestamp format name
|
||||
// is know to the SDK's protocols.
|
||||
func IsKnownTimestampFormat(name string) bool {
|
||||
switch name {
|
||||
case RFC822TimeFormatName:
|
||||
fallthrough
|
||||
case ISO8601TimeFormatName:
|
||||
fallthrough
|
||||
case UnixTimeFormatName:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// FormatTime returns a string value of the time.
|
||||
func FormatTime(name string, t time.Time) string {
|
||||
t = t.UTC()
|
||||
|
||||
switch name {
|
||||
case RFC822TimeFormatName:
|
||||
return t.Format(RFC822TimeFormat)
|
||||
case ISO8601TimeFormatName:
|
||||
return t.Format(ISO8601TimeFormat)
|
||||
case UnixTimeFormatName:
|
||||
return strconv.FormatInt(t.Unix(), 10)
|
||||
default:
|
||||
panic("unknown timestamp format name, " + name)
|
||||
}
|
||||
}
|
||||
|
||||
// ParseTime attempts to parse the time given the format. Returns
|
||||
// the time if it was able to be parsed, and fails otherwise.
|
||||
func ParseTime(formatName, value string) (time.Time, error) {
|
||||
switch formatName {
|
||||
case RFC822TimeFormatName:
|
||||
return time.Parse(RFC822TimeFormat, value)
|
||||
case ISO8601TimeFormatName:
|
||||
return time.Parse(ISO8601TimeFormat, value)
|
||||
case UnixTimeFormatName:
|
||||
v, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return time.Unix(int64(v), 0), nil
|
||||
default:
|
||||
panic("unknown timestamp format name, " + formatName)
|
||||
}
|
||||
}
|
||||
18
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
generated
vendored
|
|
@ -13,9 +13,13 @@ import (
|
|||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
// BuildXML will serialize params into an xml.Encoder.
|
||||
// Error will be returned if the serialization of any of the params or nested values fails.
|
||||
// BuildXML will serialize params into an xml.Encoder. Error will be returned
|
||||
// if the serialization of any of the params or nested values fails.
|
||||
func BuildXML(params interface{}, e *xml.Encoder) error {
|
||||
return buildXML(params, e, false)
|
||||
}
|
||||
|
||||
func buildXML(params interface{}, e *xml.Encoder, sorted bool) error {
|
||||
b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
|
||||
root := NewXMLElement(xml.Name{})
|
||||
if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
|
||||
|
|
@ -23,7 +27,7 @@ func BuildXML(params interface{}, e *xml.Encoder) error {
|
|||
}
|
||||
for _, c := range root.Children {
|
||||
for _, v := range c {
|
||||
return StructToXML(e, v, false)
|
||||
return StructToXML(e, v, sorted)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
@ -278,8 +282,12 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl
|
|||
case float32:
|
||||
str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
|
||||
case time.Time:
|
||||
const ISO8601UTC = "2006-01-02T15:04:05Z"
|
||||
str = converted.UTC().Format(ISO8601UTC)
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
format = protocol.ISO8601TimeFormatName
|
||||
}
|
||||
|
||||
str = protocol.FormatTime(format, converted)
|
||||
default:
|
||||
return fmt.Errorf("unsupported value for param %s: %v (%s)",
|
||||
tag.Get("locationName"), value.Interface(), value.Type().Name())
|
||||
|
|
|
|||
20
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
generated
vendored
20
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
generated
vendored
|
|
@ -9,6 +9,8 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
// UnmarshalXML deserializes an xml.Decoder into the container v. V
|
||||
|
|
@ -52,9 +54,15 @@ func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
|
|||
if t == "" {
|
||||
switch rtype.Kind() {
|
||||
case reflect.Struct:
|
||||
t = "structure"
|
||||
// also it can't be a time object
|
||||
if _, ok := r.Interface().(*time.Time); !ok {
|
||||
t = "structure"
|
||||
}
|
||||
case reflect.Slice:
|
||||
t = "list"
|
||||
// also it can't be a byte slice
|
||||
if _, ok := r.Interface().([]byte); !ok {
|
||||
t = "list"
|
||||
}
|
||||
case reflect.Map:
|
||||
t = "map"
|
||||
}
|
||||
|
|
@ -247,8 +255,12 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
|
|||
}
|
||||
r.Set(reflect.ValueOf(&v))
|
||||
case *time.Time:
|
||||
const ISO8601UTC = "2006-01-02T15:04:05Z"
|
||||
t, err := time.Parse(ISO8601UTC, node.Text)
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
format = protocol.ISO8601TimeFormatName
|
||||
}
|
||||
|
||||
t, err := protocol.ParseTime(format, node.Text)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
1
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
generated
vendored
1
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
generated
vendored
|
|
@ -29,6 +29,7 @@ func NewXMLElement(name xml.Name) *XMLNode {
|
|||
|
||||
// AddChild adds child to the XMLNode.
|
||||
func (n *XMLNode) AddChild(child *XMLNode) {
|
||||
child.parent = n
|
||||
if _, ok := n.Children[child.Name.Local]; !ok {
|
||||
n.Children[child.Name.Local] = []*XMLNode{}
|
||||
}
|
||||
|
|
|
|||
5185
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
5185
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
File diff suppressed because it is too large
Load diff
249
vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
generated
vendored
Normal file
249
vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
generated
vendored
Normal file
|
|
@ -0,0 +1,249 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
const (
|
||||
contentMD5Header = "Content-Md5"
|
||||
contentSha256Header = "X-Amz-Content-Sha256"
|
||||
amzTeHeader = "X-Amz-Te"
|
||||
amzTxEncodingHeader = "X-Amz-Transfer-Encoding"
|
||||
|
||||
appendMD5TxEncoding = "append-md5"
|
||||
)
|
||||
|
||||
// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
|
||||
// require it.
|
||||
func contentMD5(r *request.Request) {
|
||||
h := md5.New()
|
||||
|
||||
if !aws.IsReaderSeekable(r.Body) {
|
||||
if r.Config.Logger != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(
|
||||
"Unable to compute Content-MD5 for unseekable body, S3.%s",
|
||||
r.Operation.Name))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := copySeekableBody(h, r.Body); err != nil {
|
||||
r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err)
|
||||
return
|
||||
}
|
||||
|
||||
// encode the md5 checksum in base64 and set the request header.
|
||||
v := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
r.HTTPRequest.Header.Set(contentMD5Header, v)
|
||||
}
|
||||
|
||||
// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the
|
||||
// request. If the body is not seekable or S3DisableContentMD5Validation set
|
||||
// this handler will be ignored.
|
||||
func computeBodyHashes(r *request.Request) {
|
||||
if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
|
||||
return
|
||||
}
|
||||
if r.IsPresigned() {
|
||||
return
|
||||
}
|
||||
if r.Error != nil || !aws.IsReaderSeekable(r.Body) {
|
||||
return
|
||||
}
|
||||
|
||||
var md5Hash, sha256Hash hash.Hash
|
||||
hashers := make([]io.Writer, 0, 2)
|
||||
|
||||
// Determine upfront which hashes can be set without overriding user
|
||||
// provide header data.
|
||||
if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 {
|
||||
md5Hash = md5.New()
|
||||
hashers = append(hashers, md5Hash)
|
||||
}
|
||||
|
||||
if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 {
|
||||
sha256Hash = sha256.New()
|
||||
hashers = append(hashers, sha256Hash)
|
||||
}
|
||||
|
||||
// Create the destination writer based on the hashes that are not already
|
||||
// provided by the user.
|
||||
var dst io.Writer
|
||||
switch len(hashers) {
|
||||
case 0:
|
||||
return
|
||||
case 1:
|
||||
dst = hashers[0]
|
||||
default:
|
||||
dst = io.MultiWriter(hashers...)
|
||||
}
|
||||
|
||||
if _, err := copySeekableBody(dst, r.Body); err != nil {
|
||||
r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err)
|
||||
return
|
||||
}
|
||||
|
||||
// For the hashes created, set the associated headers that the user did not
|
||||
// already provide.
|
||||
if md5Hash != nil {
|
||||
sum := make([]byte, md5.Size)
|
||||
encoded := make([]byte, md5Base64EncLen)
|
||||
|
||||
base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0]))
|
||||
r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)}
|
||||
}
|
||||
|
||||
if sha256Hash != nil {
|
||||
encoded := make([]byte, sha256HexEncLen)
|
||||
sum := make([]byte, sha256.Size)
|
||||
|
||||
hex.Encode(encoded, sha256Hash.Sum(sum[0:0]))
|
||||
r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen
|
||||
sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen
|
||||
)
|
||||
|
||||
func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) {
|
||||
curPos, err := src.Seek(0, sdkio.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// hash the body. seek back to the first position after reading to reset
|
||||
// the body for transmission. copy errors may be assumed to be from the
|
||||
// body.
|
||||
n, err := io.Copy(dst, src)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
_, err = src.Seek(curPos, sdkio.SeekStart)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Adds the x-amz-te: append_md5 header to the request. This requests the service
|
||||
// responds with a trailing MD5 checksum.
|
||||
//
|
||||
// Will not ask for append MD5 if disabled, the request is presigned or,
|
||||
// or the API operation does not support content MD5 validation.
|
||||
func askForTxEncodingAppendMD5(r *request.Request) {
|
||||
if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
|
||||
return
|
||||
}
|
||||
if r.IsPresigned() {
|
||||
return
|
||||
}
|
||||
r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding)
|
||||
}
|
||||
|
||||
func useMD5ValidationReader(r *request.Request) {
|
||||
if r.Error != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding {
|
||||
return
|
||||
}
|
||||
|
||||
var bodyReader *io.ReadCloser
|
||||
var contentLen int64
|
||||
switch tv := r.Data.(type) {
|
||||
case *GetObjectOutput:
|
||||
bodyReader = &tv.Body
|
||||
contentLen = aws.Int64Value(tv.ContentLength)
|
||||
// Update ContentLength hiden the trailing MD5 checksum.
|
||||
tv.ContentLength = aws.Int64(contentLen - md5.Size)
|
||||
tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range"))
|
||||
default:
|
||||
r.Error = awserr.New("ChecksumValidationError",
|
||||
fmt.Sprintf("%s: %s header received on unsupported API, %s",
|
||||
amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name,
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if contentLen < md5.Size {
|
||||
r.Error = awserr.New("ChecksumValidationError",
|
||||
fmt.Sprintf("invalid Content-Length %d for %s %s",
|
||||
contentLen, appendMD5TxEncoding, amzTxEncodingHeader,
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Wrap and swap the response body reader with the validation reader.
|
||||
*bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size)
|
||||
}
|
||||
|
||||
type md5ValidationReader struct {
|
||||
rawReader io.ReadCloser
|
||||
payload io.Reader
|
||||
hash hash.Hash
|
||||
|
||||
payloadLen int64
|
||||
read int64
|
||||
}
|
||||
|
||||
func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader {
|
||||
h := md5.New()
|
||||
return &md5ValidationReader{
|
||||
rawReader: reader,
|
||||
payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h),
|
||||
hash: h,
|
||||
payloadLen: payloadLen,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *md5ValidationReader) Read(p []byte) (n int, err error) {
|
||||
n, err = v.payload.Read(p)
|
||||
if err != nil && err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
|
||||
v.read += int64(n)
|
||||
|
||||
if err == io.EOF {
|
||||
if v.read != v.payloadLen {
|
||||
return n, io.ErrUnexpectedEOF
|
||||
}
|
||||
expectSum := make([]byte, md5.Size)
|
||||
actualSum := make([]byte, md5.Size)
|
||||
if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil {
|
||||
return n, sumReadErr
|
||||
}
|
||||
actualSum = v.hash.Sum(actualSum[0:0])
|
||||
if !bytes.Equal(expectSum, actualSum) {
|
||||
return n, awserr.New("InvalidChecksum",
|
||||
fmt.Sprintf("expected MD5 checksum %s, got %s",
|
||||
hex.EncodeToString(expectSum),
|
||||
hex.EncodeToString(actualSum),
|
||||
),
|
||||
nil)
|
||||
}
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (v *md5ValidationReader) Close() error {
|
||||
return v.rawReader.Close()
|
||||
}
|
||||
36
vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go
generated
vendored
36
vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go
generated
vendored
|
|
@ -1,36 +0,0 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
|
||||
// require it.
|
||||
func contentMD5(r *request.Request) {
|
||||
h := md5.New()
|
||||
|
||||
// hash the body. seek back to the first position after reading to reset
|
||||
// the body for transmission. copy errors may be assumed to be from the
|
||||
// body.
|
||||
_, err := io.Copy(h, r.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("ContentMD5", "failed to read body", err)
|
||||
return
|
||||
}
|
||||
_, err = r.Body.Seek(0, 0)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("ContentMD5", "failed to seek body", err)
|
||||
return
|
||||
}
|
||||
|
||||
// encode the md5 checksum in base64 and set the request header.
|
||||
sum := h.Sum(nil)
|
||||
sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
|
||||
base64.StdEncoding.Encode(sum64, sum)
|
||||
r.HTTPRequest.Header.Set("Content-MD5", string(sum64))
|
||||
}
|
||||
24
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
24
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
|
|
@ -42,5 +42,29 @@ func defaultInitRequestFn(r *request.Request) {
|
|||
r.Handlers.Validate.PushFront(populateLocationConstraint)
|
||||
case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
|
||||
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
|
||||
case opPutObject, opUploadPart:
|
||||
r.Handlers.Build.PushBack(computeBodyHashes)
|
||||
// Disabled until #1837 root issue is resolved.
|
||||
// case opGetObject:
|
||||
// r.Handlers.Build.PushBack(askForTxEncodingAppendMD5)
|
||||
// r.Handlers.Unmarshal.PushBack(useMD5ValidationReader)
|
||||
}
|
||||
}
|
||||
|
||||
// bucketGetter is an accessor interface to grab the "Bucket" field from
|
||||
// an S3 type.
|
||||
type bucketGetter interface {
|
||||
getBucket() string
|
||||
}
|
||||
|
||||
// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey"
|
||||
// field from an S3 type.
|
||||
type sseCustomerKeyGetter interface {
|
||||
getSSECustomerKey() string
|
||||
}
|
||||
|
||||
// copySourceSSECustomerKeyGetter is an accessor interface to grab the
|
||||
// "CopySourceSSECustomerKey" field from an S3 type.
|
||||
type copySourceSSECustomerKeyGetter interface {
|
||||
getCopySourceSSECustomerKey() string
|
||||
}
|
||||
|
|
|
|||
64
vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
generated
vendored
64
vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
generated
vendored
|
|
@ -10,69 +10,17 @@
|
|||
//
|
||||
// Using the Client
|
||||
//
|
||||
// To use the client for Amazon Simple Storage Service you will first need
|
||||
// to create a new instance of it.
|
||||
// To contact Amazon Simple Storage Service with the SDK use the New function to create
|
||||
// a new service client. With that client you can make API requests to the service.
|
||||
// These clients are safe to use concurrently.
|
||||
//
|
||||
// When creating a client for an AWS service you'll first need to have a Session
|
||||
// already created. The Session provides configuration that can be shared
|
||||
// between multiple service clients. Additional configuration can be applied to
|
||||
// the Session and service's client when they are constructed. The aws package's
|
||||
// Config type contains several fields such as Region for the AWS Region the
|
||||
// client should make API requests too. The optional Config value can be provided
|
||||
// as the variadic argument for Sessions and client creation.
|
||||
//
|
||||
// Once the service's client is created you can use it to make API requests the
|
||||
// AWS service. These clients are safe to use concurrently.
|
||||
//
|
||||
// // Create a session to share configuration, and load external configuration.
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create the service's client with the session.
|
||||
// svc := s3.New(sess)
|
||||
//
|
||||
// See the SDK's documentation for more information on how to use service clients.
|
||||
// See the SDK's documentation for more information on how to use the SDK.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/
|
||||
//
|
||||
// See aws package's Config type for more information on configuration options.
|
||||
// See aws.Config documentation for more information on configuring SDK clients.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
|
||||
//
|
||||
// See the Amazon Simple Storage Service client S3 for more
|
||||
// information on creating the service's client.
|
||||
// information on creating client for this service.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New
|
||||
//
|
||||
// Once the client is created you can make an API request to the service.
|
||||
// Each API method takes a input parameter, and returns the service response
|
||||
// and an error.
|
||||
//
|
||||
// The API method will document which error codes the service can be returned
|
||||
// by the operation if the service models the API operation's errors. These
|
||||
// errors will also be available as const strings prefixed with "ErrCode".
|
||||
//
|
||||
// result, err := svc.AbortMultipartUpload(params)
|
||||
// if err != nil {
|
||||
// // Cast err to awserr.Error to handle specific error codes.
|
||||
// aerr, ok := err.(awserr.Error)
|
||||
// if ok && aerr.Code() == <error code to check for> {
|
||||
// // Specific error code handling
|
||||
// }
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// fmt.Println("AbortMultipartUpload result:")
|
||||
// fmt.Println(result)
|
||||
//
|
||||
// Using the Client with Context
|
||||
//
|
||||
// The service's client also provides methods to make API requests with a Context
|
||||
// value. This allows you to control the timeout, and cancellation of pending
|
||||
// requests. These methods also take request Option as variadic parameter to apply
|
||||
// additional configuration to the API request.
|
||||
//
|
||||
// ctx := context.Background()
|
||||
//
|
||||
// result, err := svc.AbortMultipartUploadWithContext(ctx, params)
|
||||
//
|
||||
// See the request package documentation for more information on using Context pattern
|
||||
// with the SDK.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/
|
||||
package s3
|
||||
|
|
|
|||
4
vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
generated
vendored
|
|
@ -35,7 +35,7 @@
|
|||
//
|
||||
// The s3manager package's Downloader provides concurrently downloading of Objects
|
||||
// from S3. The Downloader will write S3 Object content with an io.WriterAt.
|
||||
// Once the Downloader instance is created you can call Upload concurrently from
|
||||
// Once the Downloader instance is created you can call Download concurrently from
|
||||
// multiple goroutines safely.
|
||||
//
|
||||
// // The session the S3 Downloader will use
|
||||
|
|
@ -56,7 +56,7 @@
|
|||
// Key: aws.String(myString),
|
||||
// })
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to upload file, %v", err)
|
||||
// return fmt.Errorf("failed to download file, %v", err)
|
||||
// }
|
||||
// fmt.Printf("file downloaded, %d bytes\n", n)
|
||||
//
|
||||
|
|
|
|||
13
vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
generated
vendored
13
vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
generated
vendored
|
|
@ -8,7 +8,6 @@ import (
|
|||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
|
|
@ -113,15 +112,9 @@ func updateEndpointForAccelerate(r *request.Request) {
|
|||
// Attempts to retrieve the bucket name from the request input parameters.
|
||||
// If no bucket is found, or the field is empty "", false will be returned.
|
||||
func bucketNameFromReqParams(params interface{}) (string, bool) {
|
||||
b, _ := awsutil.ValuesAtPath(params, "Bucket")
|
||||
if len(b) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
if bucket, ok := b[0].(*string); ok {
|
||||
if bucketStr := aws.StringValue(bucket); bucketStr != "" {
|
||||
return bucketStr, true
|
||||
}
|
||||
if iface, ok := params.(bucketGetter); ok {
|
||||
b := iface.getBucket()
|
||||
return b, len(b) > 0
|
||||
}
|
||||
|
||||
return "", false
|
||||
|
|
|
|||
8
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
|
|
@ -29,8 +29,9 @@ var initRequest func(*request.Request)
|
|||
|
||||
// Service information constants
|
||||
const (
|
||||
ServiceName = "s3" // Service endpoint prefix API calls made to.
|
||||
EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
|
||||
ServiceName = "s3" // Name of service.
|
||||
EndpointsID = ServiceName // ID to lookup a service endpoint with.
|
||||
ServiceID = "S3" // ServiceID is a unique identifer of a specific service.
|
||||
)
|
||||
|
||||
// New creates a new instance of the S3 client with a session.
|
||||
|
|
@ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
|
|||
cfg,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: ServiceName,
|
||||
ServiceID: ServiceID,
|
||||
SigningName: signingName,
|
||||
SigningRegion: signingRegion,
|
||||
Endpoint: endpoint,
|
||||
|
|
@ -71,6 +73,8 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
|
|||
svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
|
||||
svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler)
|
||||
|
||||
svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler)
|
||||
|
||||
// Run custom client initialization if present
|
||||
if initClient != nil {
|
||||
initClient(svc.Client)
|
||||
|
|
|
|||
18
vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
generated
vendored
|
|
@ -5,17 +5,27 @@ import (
|
|||
"encoding/base64"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil)
|
||||
|
||||
func validateSSERequiresSSL(r *request.Request) {
|
||||
if r.HTTPRequest.URL.Scheme != "https" {
|
||||
p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey")
|
||||
if len(p) > 0 {
|
||||
if r.HTTPRequest.URL.Scheme == "https" {
|
||||
return
|
||||
}
|
||||
|
||||
if iface, ok := r.Params.(sseCustomerKeyGetter); ok {
|
||||
if len(iface.getSSECustomerKey()) > 0 {
|
||||
r.Error = errSSERequiresSSL
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok {
|
||||
if len(iface.getCopySourceSSECustomerKey()) > 0 {
|
||||
r.Error = errSSERequiresSSL
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
3
vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
generated
vendored
|
|
@ -7,6 +7,7 @@ import (
|
|||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
|
||||
|
|
@ -17,7 +18,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
|
|||
}
|
||||
body := bytes.NewReader(b)
|
||||
r.HTTPResponse.Body = ioutil.NopCloser(body)
|
||||
defer body.Seek(0, 0)
|
||||
defer body.Seek(0, sdkio.SeekStart)
|
||||
|
||||
if body.Len() == 0 {
|
||||
// If there is no body don't attempt to parse the body.
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue